diff --git a/libritts-r/log/log-train-2024-08-06-08-02-16-0 b/libritts-r/log/log-train-2024-08-06-08-02-16-0 new file mode 100644 index 0000000000000000000000000000000000000000..3cb78779f787ef1a843ad1af14071ffbee95cec7 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-02-16-0 @@ -0,0 +1,14 @@ +2024-08-06 08:02:16,439 INFO [trainer.py:870] (0/8) Training started +2024-08-06 08:02:16,443 INFO [trainer.py:889] (0/8) Device: cuda:0 +2024-08-06 08:02:16,444 INFO [trainer.py:890] (0/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:02:16,444 INFO [trainer.py:892] (0/8) About to create model +2024-08-06 08:02:17,494 INFO [trainer.py:899] (0/8) Number of model parameters: 367386628 +2024-08-06 08:02:18,318 INFO [trainer.py:914] (0/8) Using DDP +2024-08-06 08:02:20,442 INFO [datamodule.py:427] (0/8) About to get train cuts +2024-08-06 08:02:20,455 INFO [datamodule.py:434] (0/8) About to get dev cuts +2024-08-06 08:02:20,462 INFO [datamodule.py:292] (0/8) Disable SpecAugment +2024-08-06 08:02:20,462 INFO [datamodule.py:294] (0/8) About to create train dataset +2024-08-06 08:02:20,464 INFO [datamodule.py:323] (0/8) Using DynamicBucketingSampler +2024-08-06 08:02:21,071 INFO [datamodule.py:344] (0/8) About to create train dataloader +2024-08-06 08:02:21,071 INFO [datamodule.py:367] (0/8) About to create dev dataset +2024-08-06 08:02:21,396 INFO [datamodule.py:388] (0/8) About to create dev dataloader diff --git a/libritts-r/log/log-train-2024-08-06-08-02-16-1 b/libritts-r/log/log-train-2024-08-06-08-02-16-1 new file mode 100644 index 0000000000000000000000000000000000000000..2d2e5cc10001f05cf6fd9afb0f61f959eb750b63 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-02-16-1 @@ -0,0 +1,14 @@ +2024-08-06 08:02:16,470 INFO [trainer.py:870] (1/8) Training started +2024-08-06 08:02:16,471 INFO [trainer.py:889] (1/8) Device: cuda:1 +2024-08-06 08:02:16,471 INFO [trainer.py:890] (1/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:02:16,471 INFO [trainer.py:892] (1/8) About to create model +2024-08-06 08:02:17,214 INFO [trainer.py:899] (1/8) Number of model parameters: 367386628 +2024-08-06 08:02:17,933 INFO [trainer.py:914] (1/8) Using DDP +2024-08-06 08:02:20,444 INFO [datamodule.py:427] (1/8) About to get train cuts +2024-08-06 08:02:20,455 INFO [datamodule.py:434] (1/8) About to get dev cuts +2024-08-06 08:02:20,462 INFO [datamodule.py:292] (1/8) Disable SpecAugment +2024-08-06 08:02:20,462 INFO [datamodule.py:294] (1/8) About to create train dataset +2024-08-06 08:02:20,464 INFO [datamodule.py:323] (1/8) Using DynamicBucketingSampler +2024-08-06 08:02:21,071 INFO [datamodule.py:344] (1/8) About to create train dataloader +2024-08-06 08:02:21,071 INFO [datamodule.py:367] (1/8) About to create dev dataset +2024-08-06 08:02:21,393 INFO [datamodule.py:388] (1/8) About to create dev dataloader diff --git a/libritts-r/log/log-train-2024-08-06-08-02-16-2 b/libritts-r/log/log-train-2024-08-06-08-02-16-2 new file mode 100644 index 0000000000000000000000000000000000000000..b6d52c20900232a8cba5918bd30c780727cb8f5e --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-02-16-2 @@ -0,0 +1,14 @@ +2024-08-06 08:02:16,456 INFO [trainer.py:870] (2/8) Training started +2024-08-06 08:02:16,457 INFO [trainer.py:889] (2/8) Device: cuda:2 +2024-08-06 08:02:16,457 INFO [trainer.py:890] (2/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:02:16,458 INFO [trainer.py:892] (2/8) About to create model +2024-08-06 08:02:17,414 INFO [trainer.py:899] (2/8) Number of model parameters: 367386628 +2024-08-06 08:02:18,019 INFO [trainer.py:914] (2/8) Using DDP +2024-08-06 08:02:20,445 INFO [datamodule.py:427] (2/8) About to get train cuts +2024-08-06 08:02:20,455 INFO [datamodule.py:434] (2/8) About to get dev cuts +2024-08-06 08:02:20,462 INFO [datamodule.py:292] (2/8) Disable SpecAugment +2024-08-06 08:02:20,462 INFO [datamodule.py:294] (2/8) About to create train dataset +2024-08-06 08:02:20,464 INFO [datamodule.py:323] (2/8) Using DynamicBucketingSampler +2024-08-06 08:02:21,076 INFO [datamodule.py:344] (2/8) About to create train dataloader +2024-08-06 08:02:21,076 INFO [datamodule.py:367] (2/8) About to create dev dataset +2024-08-06 08:02:21,400 INFO [datamodule.py:388] (2/8) About to create dev dataloader diff --git a/libritts-r/log/log-train-2024-08-06-08-02-16-3 b/libritts-r/log/log-train-2024-08-06-08-02-16-3 new file mode 100644 index 0000000000000000000000000000000000000000..cb87d1d143c234ad6cff0ed2518daa6d8014a8b0 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-02-16-3 @@ -0,0 +1,14 @@ +2024-08-06 08:02:16,435 INFO [trainer.py:870] (3/8) Training started +2024-08-06 08:02:16,436 INFO [trainer.py:889] (3/8) Device: cuda:3 +2024-08-06 08:02:16,436 INFO [trainer.py:890] (3/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:02:16,436 INFO [trainer.py:892] (3/8) About to create model +2024-08-06 08:02:17,492 INFO [trainer.py:899] (3/8) Number of model parameters: 367386628 +2024-08-06 08:02:18,433 INFO [trainer.py:914] (3/8) Using DDP +2024-08-06 08:02:20,445 INFO [datamodule.py:427] (3/8) About to get train cuts +2024-08-06 08:02:20,455 INFO [datamodule.py:434] (3/8) About to get dev cuts +2024-08-06 08:02:20,462 INFO [datamodule.py:292] (3/8) Disable SpecAugment +2024-08-06 08:02:20,462 INFO [datamodule.py:294] (3/8) About to create train dataset +2024-08-06 08:02:20,464 INFO [datamodule.py:323] (3/8) Using DynamicBucketingSampler +2024-08-06 08:02:21,078 INFO [datamodule.py:344] (3/8) About to create train dataloader +2024-08-06 08:02:21,078 INFO [datamodule.py:367] (3/8) About to create dev dataset +2024-08-06 08:02:21,410 INFO [datamodule.py:388] (3/8) About to create dev dataloader diff --git a/libritts-r/log/log-train-2024-08-06-08-02-16-4 b/libritts-r/log/log-train-2024-08-06-08-02-16-4 new file mode 100644 index 0000000000000000000000000000000000000000..f6ccbc88f914aac84cf562c1c952cc3e3299db13 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-02-16-4 @@ -0,0 +1,15 @@ +2024-08-06 08:02:16,446 INFO [trainer.py:870] (4/8) Training started +2024-08-06 08:02:16,447 INFO [trainer.py:889] (4/8) Device: cuda:4 +2024-08-06 08:02:16,447 INFO [trainer.py:890] (4/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:02:16,447 INFO [trainer.py:892] (4/8) About to create model +2024-08-06 08:02:17,470 INFO [trainer.py:899] (4/8) Number of model parameters: 367386628 +2024-08-06 08:02:18,312 INFO [trainer.py:914] (4/8) Using DDP +2024-08-06 08:02:20,442 INFO [datamodule.py:427] (4/8) About to get train cuts +2024-08-06 08:02:20,456 INFO [datamodule.py:434] (4/8) About to get dev cuts +2024-08-06 08:02:20,462 INFO [datamodule.py:292] (4/8) Disable SpecAugment +2024-08-06 08:02:20,463 INFO [datamodule.py:294] (4/8) About to create train dataset +2024-08-06 08:02:20,464 INFO [datamodule.py:323] (4/8) Using DynamicBucketingSampler +2024-08-06 08:02:21,079 INFO [datamodule.py:344] (4/8) About to create train dataloader +2024-08-06 08:02:21,080 INFO [datamodule.py:367] (4/8) About to create dev dataset +2024-08-06 08:02:21,408 INFO [datamodule.py:388] (4/8) About to create dev dataloader +2024-08-06 08:02:39,869 INFO [trainer.py:1092] (4/8) Saving batch to exp/valle/batch-bdd640fb-0667-1ad1-1c80-317fa3b1799d.pt diff --git a/libritts-r/log/log-train-2024-08-06-08-02-16-5 b/libritts-r/log/log-train-2024-08-06-08-02-16-5 new file mode 100644 index 0000000000000000000000000000000000000000..b6b3fb32f6275fe4cfb75981f1e5b0f8a9546c49 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-02-16-5 @@ -0,0 +1,14 @@ +2024-08-06 08:02:16,470 INFO [trainer.py:870] (5/8) Training started +2024-08-06 08:02:16,471 INFO [trainer.py:889] (5/8) Device: cuda:5 +2024-08-06 08:02:16,471 INFO [trainer.py:890] (5/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:02:16,471 INFO [trainer.py:892] (5/8) About to create model +2024-08-06 08:02:17,212 INFO [trainer.py:899] (5/8) Number of model parameters: 367386628 +2024-08-06 08:02:17,934 INFO [trainer.py:914] (5/8) Using DDP +2024-08-06 08:02:20,445 INFO [datamodule.py:427] (5/8) About to get train cuts +2024-08-06 08:02:20,455 INFO [datamodule.py:434] (5/8) About to get dev cuts +2024-08-06 08:02:20,462 INFO [datamodule.py:292] (5/8) Disable SpecAugment +2024-08-06 08:02:20,462 INFO [datamodule.py:294] (5/8) About to create train dataset +2024-08-06 08:02:20,464 INFO [datamodule.py:323] (5/8) Using DynamicBucketingSampler +2024-08-06 08:02:21,086 INFO [datamodule.py:344] (5/8) About to create train dataloader +2024-08-06 08:02:21,086 INFO [datamodule.py:367] (5/8) About to create dev dataset +2024-08-06 08:02:21,424 INFO [datamodule.py:388] (5/8) About to create dev dataloader diff --git a/libritts-r/log/log-train-2024-08-06-08-02-16-6 b/libritts-r/log/log-train-2024-08-06-08-02-16-6 new file mode 100644 index 0000000000000000000000000000000000000000..dc603148cd05944fba64a16cc79a312390496910 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-02-16-6 @@ -0,0 +1,14 @@ +2024-08-06 08:02:16,468 INFO [trainer.py:870] (6/8) Training started +2024-08-06 08:02:16,469 INFO [trainer.py:889] (6/8) Device: cuda:6 +2024-08-06 08:02:16,469 INFO [trainer.py:890] (6/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:02:16,469 INFO [trainer.py:892] (6/8) About to create model +2024-08-06 08:02:17,495 INFO [trainer.py:899] (6/8) Number of model parameters: 367386628 +2024-08-06 08:02:18,427 INFO [trainer.py:914] (6/8) Using DDP +2024-08-06 08:02:20,443 INFO [datamodule.py:427] (6/8) About to get train cuts +2024-08-06 08:02:20,455 INFO [datamodule.py:434] (6/8) About to get dev cuts +2024-08-06 08:02:20,462 INFO [datamodule.py:292] (6/8) Disable SpecAugment +2024-08-06 08:02:20,462 INFO [datamodule.py:294] (6/8) About to create train dataset +2024-08-06 08:02:20,464 INFO [datamodule.py:323] (6/8) Using DynamicBucketingSampler +2024-08-06 08:02:21,075 INFO [datamodule.py:344] (6/8) About to create train dataloader +2024-08-06 08:02:21,076 INFO [datamodule.py:367] (6/8) About to create dev dataset +2024-08-06 08:02:21,402 INFO [datamodule.py:388] (6/8) About to create dev dataloader diff --git a/libritts-r/log/log-train-2024-08-06-08-02-16-7 b/libritts-r/log/log-train-2024-08-06-08-02-16-7 new file mode 100644 index 0000000000000000000000000000000000000000..6483af1f5d4f590e585e606689041285ff767fc1 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-02-16-7 @@ -0,0 +1,14 @@ +2024-08-06 08:02:16,467 INFO [trainer.py:870] (7/8) Training started +2024-08-06 08:02:16,468 INFO [trainer.py:889] (7/8) Device: cuda:7 +2024-08-06 08:02:16,469 INFO [trainer.py:890] (7/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:02:16,469 INFO [trainer.py:892] (7/8) About to create model +2024-08-06 08:02:17,493 INFO [trainer.py:899] (7/8) Number of model parameters: 367386628 +2024-08-06 08:02:18,432 INFO [trainer.py:914] (7/8) Using DDP +2024-08-06 08:02:20,442 INFO [datamodule.py:427] (7/8) About to get train cuts +2024-08-06 08:02:20,455 INFO [datamodule.py:434] (7/8) About to get dev cuts +2024-08-06 08:02:20,462 INFO [datamodule.py:292] (7/8) Disable SpecAugment +2024-08-06 08:02:20,462 INFO [datamodule.py:294] (7/8) About to create train dataset +2024-08-06 08:02:20,464 INFO [datamodule.py:323] (7/8) Using DynamicBucketingSampler +2024-08-06 08:02:21,073 INFO [datamodule.py:344] (7/8) About to create train dataloader +2024-08-06 08:02:21,074 INFO [datamodule.py:367] (7/8) About to create dev dataset +2024-08-06 08:02:21,402 INFO [datamodule.py:388] (7/8) About to create dev dataloader diff --git a/libritts-r/log/log-train-2024-08-06-08-03-57-0 b/libritts-r/log/log-train-2024-08-06-08-03-57-0 new file mode 100644 index 0000000000000000000000000000000000000000..56af22cef9d2c3907447de5f4a798d94465b2561 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-03-57-0 @@ -0,0 +1,7 @@ +2024-08-06 08:03:57,516 INFO [trainer.py:870] (0/8) Training started +2024-08-06 08:03:57,521 INFO [trainer.py:889] (0/8) Device: cuda:0 +2024-08-06 08:03:57,521 INFO [trainer.py:890] (0/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:03:57,521 INFO [trainer.py:892] (0/8) About to create model +2024-08-06 08:03:58,244 INFO [trainer.py:899] (0/8) Number of model parameters: 367386628 +2024-08-06 08:03:59,498 INFO [trainer.py:914] (0/8) Using DDP +2024-08-06 08:04:02,291 INFO [datamodule.py:427] (0/8) About to get train cuts diff --git a/libritts-r/log/log-train-2024-08-06-08-03-57-1 b/libritts-r/log/log-train-2024-08-06-08-03-57-1 new file mode 100644 index 0000000000000000000000000000000000000000..15d8446116bd47f30ed9fb17e738e012db92911c --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-03-57-1 @@ -0,0 +1,7 @@ +2024-08-06 08:03:57,570 INFO [trainer.py:870] (1/8) Training started +2024-08-06 08:03:57,571 INFO [trainer.py:889] (1/8) Device: cuda:1 +2024-08-06 08:03:57,571 INFO [trainer.py:890] (1/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:03:57,571 INFO [trainer.py:892] (1/8) About to create model +2024-08-06 08:03:58,276 INFO [trainer.py:899] (1/8) Number of model parameters: 367386628 +2024-08-06 08:03:59,916 INFO [trainer.py:914] (1/8) Using DDP +2024-08-06 08:04:02,295 INFO [datamodule.py:427] (1/8) About to get train cuts diff --git a/libritts-r/log/log-train-2024-08-06-08-03-57-2 b/libritts-r/log/log-train-2024-08-06-08-03-57-2 new file mode 100644 index 0000000000000000000000000000000000000000..8e4f131bb0d8de1b83980944f63a33676310f79c --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-03-57-2 @@ -0,0 +1,7 @@ +2024-08-06 08:03:57,568 INFO [trainer.py:870] (2/8) Training started +2024-08-06 08:03:57,569 INFO [trainer.py:889] (2/8) Device: cuda:2 +2024-08-06 08:03:57,569 INFO [trainer.py:890] (2/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:03:57,569 INFO [trainer.py:892] (2/8) About to create model +2024-08-06 08:03:58,294 INFO [trainer.py:899] (2/8) Number of model parameters: 367386628 +2024-08-06 08:03:59,503 INFO [trainer.py:914] (2/8) Using DDP +2024-08-06 08:04:02,295 INFO [datamodule.py:427] (2/8) About to get train cuts diff --git a/libritts-r/log/log-train-2024-08-06-08-03-57-3 b/libritts-r/log/log-train-2024-08-06-08-03-57-3 new file mode 100644 index 0000000000000000000000000000000000000000..1bdbfdae55ccd44dc8e46f645cf2d713970753c0 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-03-57-3 @@ -0,0 +1,7 @@ +2024-08-06 08:03:57,566 INFO [trainer.py:870] (3/8) Training started +2024-08-06 08:03:57,567 INFO [trainer.py:889] (3/8) Device: cuda:3 +2024-08-06 08:03:57,567 INFO [trainer.py:890] (3/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:03:57,567 INFO [trainer.py:892] (3/8) About to create model +2024-08-06 08:03:58,258 INFO [trainer.py:899] (3/8) Number of model parameters: 367386628 +2024-08-06 08:03:59,929 INFO [trainer.py:914] (3/8) Using DDP +2024-08-06 08:04:02,295 INFO [datamodule.py:427] (3/8) About to get train cuts diff --git a/libritts-r/log/log-train-2024-08-06-08-03-57-4 b/libritts-r/log/log-train-2024-08-06-08-03-57-4 new file mode 100644 index 0000000000000000000000000000000000000000..600409dde09ac5cb6841105977a6d0992b65ef38 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-03-57-4 @@ -0,0 +1,7 @@ +2024-08-06 08:03:57,571 INFO [trainer.py:870] (4/8) Training started +2024-08-06 08:03:57,572 INFO [trainer.py:889] (4/8) Device: cuda:4 +2024-08-06 08:03:57,572 INFO [trainer.py:890] (4/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:03:57,573 INFO [trainer.py:892] (4/8) About to create model +2024-08-06 08:03:58,346 INFO [trainer.py:899] (4/8) Number of model parameters: 367386628 +2024-08-06 08:03:59,592 INFO [trainer.py:914] (4/8) Using DDP +2024-08-06 08:04:02,295 INFO [datamodule.py:427] (4/8) About to get train cuts diff --git a/libritts-r/log/log-train-2024-08-06-08-03-57-5 b/libritts-r/log/log-train-2024-08-06-08-03-57-5 new file mode 100644 index 0000000000000000000000000000000000000000..68dca32329cf84e57acfa3d6bd6e0c49f06263cb --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-03-57-5 @@ -0,0 +1,7 @@ +2024-08-06 08:03:57,572 INFO [trainer.py:870] (5/8) Training started +2024-08-06 08:03:57,573 INFO [trainer.py:889] (5/8) Device: cuda:5 +2024-08-06 08:03:57,573 INFO [trainer.py:890] (5/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:03:57,573 INFO [trainer.py:892] (5/8) About to create model +2024-08-06 08:03:58,346 INFO [trainer.py:899] (5/8) Number of model parameters: 367386628 +2024-08-06 08:03:59,588 INFO [trainer.py:914] (5/8) Using DDP +2024-08-06 08:04:02,295 INFO [datamodule.py:427] (5/8) About to get train cuts diff --git a/libritts-r/log/log-train-2024-08-06-08-03-57-6 b/libritts-r/log/log-train-2024-08-06-08-03-57-6 new file mode 100644 index 0000000000000000000000000000000000000000..0cfa0894d541163e6f95f7fea31f62b10ea26ab3 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-03-57-6 @@ -0,0 +1,7 @@ +2024-08-06 08:03:57,572 INFO [trainer.py:870] (6/8) Training started +2024-08-06 08:03:57,573 INFO [trainer.py:889] (6/8) Device: cuda:6 +2024-08-06 08:03:57,573 INFO [trainer.py:890] (6/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:03:57,573 INFO [trainer.py:892] (6/8) About to create model +2024-08-06 08:03:58,337 INFO [trainer.py:899] (6/8) Number of model parameters: 367386628 +2024-08-06 08:03:59,524 INFO [trainer.py:914] (6/8) Using DDP +2024-08-06 08:04:02,293 INFO [datamodule.py:427] (6/8) About to get train cuts diff --git a/libritts-r/log/log-train-2024-08-06-08-03-57-7 b/libritts-r/log/log-train-2024-08-06-08-03-57-7 new file mode 100644 index 0000000000000000000000000000000000000000..18c6bd417824869d4eee4cfe11971f00caa3f1b0 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-03-57-7 @@ -0,0 +1,7 @@ +2024-08-06 08:03:57,571 INFO [trainer.py:870] (7/8) Training started +2024-08-06 08:03:57,572 INFO [trainer.py:889] (7/8) Device: cuda:7 +2024-08-06 08:03:57,572 INFO [trainer.py:890] (7/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:03:57,572 INFO [trainer.py:892] (7/8) About to create model +2024-08-06 08:03:58,300 INFO [trainer.py:899] (7/8) Number of model parameters: 367386628 +2024-08-06 08:03:59,957 INFO [trainer.py:914] (7/8) Using DDP +2024-08-06 08:04:02,295 INFO [datamodule.py:427] (7/8) About to get train cuts diff --git a/libritts-r/log/log-train-2024-08-06-08-06-14-0 b/libritts-r/log/log-train-2024-08-06-08-06-14-0 new file mode 100644 index 0000000000000000000000000000000000000000..d5a0a05f72d068e3d163f492fa34ab86b7a8366c --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-06-14-0 @@ -0,0 +1,357 @@ +2024-08-06 08:06:14,316 INFO [trainer.py:870] (0/8) Training started +2024-08-06 08:06:14,320 INFO [trainer.py:889] (0/8) Device: cuda:0 +2024-08-06 08:06:14,320 INFO [trainer.py:890] (0/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:06:14,320 INFO [trainer.py:892] (0/8) About to create model +2024-08-06 08:06:15,058 INFO [trainer.py:899] (0/8) Number of model parameters: 367386628 +2024-08-06 08:06:16,197 INFO [trainer.py:914] (0/8) Using DDP +2024-08-06 08:06:19,148 INFO [datamodule.py:427] (0/8) About to get train cuts +2024-08-06 08:06:19,149 INFO [datamodule.py:434] (0/8) About to get dev cuts +2024-08-06 08:06:19,151 INFO [datamodule.py:292] (0/8) Disable SpecAugment +2024-08-06 08:06:19,151 INFO [datamodule.py:294] (0/8) About to create train dataset +2024-08-06 08:06:19,152 INFO [datamodule.py:323] (0/8) Using DynamicBucketingSampler +2024-08-06 08:06:19,772 INFO [datamodule.py:344] (0/8) About to create train dataloader +2024-08-06 08:06:19,772 INFO [datamodule.py:367] (0/8) About to create dev dataset +2024-08-06 08:06:20,101 INFO [datamodule.py:388] (0/8) About to create dev dataloader +2024-08-06 08:08:02,122 INFO [trainer.py:765] (0/8) Epoch 1, batch 100, train_loss[loss=4.278, ArTop10Accuracy=0.5092, over 14148.00 frames. ], tot_loss[loss=5.044, ArTop10Accuracy=0.3756, over 4763.57 frames. ], batch size: 62, lr: 2.25e-02 +2024-08-06 08:09:28,829 INFO [trainer.py:765] (0/8) Epoch 1, batch 200, train_loss[loss=4.012, ArTop10Accuracy=0.5509, over 13728.00 frames. ], tot_loss[loss=4.485, ArTop10Accuracy=0.4688, over 7742.94 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 08:10:52,430 INFO [trainer.py:765] (0/8) Epoch 1, batch 300, train_loss[loss=3.906, ArTop10Accuracy=0.5625, over 14160.00 frames. ], tot_loss[loss=4.212, ArTop10Accuracy=0.5139, over 9378.07 frames. ], batch size: 44, lr: 3.00e-02 +2024-08-06 08:12:12,698 INFO [trainer.py:765] (0/8) Epoch 1, batch 400, train_loss[loss=3.687, ArTop10Accuracy=0.6066, over 10701.00 frames. ], tot_loss[loss=4.03, ArTop10Accuracy=0.5447, over 10279.72 frames. ], batch size: 15, lr: 3.00e-02 +2024-08-06 08:13:40,049 INFO [trainer.py:765] (0/8) Epoch 1, batch 500, train_loss[loss=3.62, ArTop10Accuracy=0.6184, over 12078.00 frames. ], tot_loss[loss=3.882, ArTop10Accuracy=0.5706, over 10862.69 frames. ], batch size: 22, lr: 2.99e-02 +2024-08-06 08:15:00,242 INFO [trainer.py:765] (0/8) Epoch 1, batch 600, train_loss[loss=3.472, ArTop10Accuracy=0.6423, over 11520.00 frames. ], tot_loss[loss=3.767, ArTop10Accuracy=0.5908, over 11393.62 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 08:16:26,423 INFO [trainer.py:765] (0/8) Epoch 1, batch 700, train_loss[loss=3.358, ArTop10Accuracy=0.672, over 10137.00 frames. ], tot_loss[loss=3.689, ArTop10Accuracy=0.6048, over 11532.89 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 08:17:43,016 INFO [trainer.py:765] (0/8) Epoch 1, batch 800, train_loss[loss=3.327, ArTop10Accuracy=0.6665, over 10185.00 frames. ], tot_loss[loss=3.625, ArTop10Accuracy=0.6163, over 11642.41 frames. ], batch size: 12, lr: 2.98e-02 +2024-08-06 08:18:56,150 INFO [trainer.py:765] (0/8) Epoch 1, batch 900, train_loss[loss=3.441, ArTop10Accuracy=0.6465, over 12888.00 frames. ], tot_loss[loss=3.567, ArTop10Accuracy=0.6273, over 11696.36 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 08:20:12,861 INFO [trainer.py:765] (0/8) Epoch 1, batch 1000, train_loss[loss=3.339, ArTop10Accuracy=0.6726, over 13041.00 frames. ], tot_loss[loss=3.523, ArTop10Accuracy=0.6352, over 11871.89 frames. ], batch size: 27, lr: 2.97e-02 +2024-08-06 08:20:13,538 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 9.300e+01 1.871e+02 2.675e+02 4.030e+02 9.119e+03, threshold=5.351e+02, percent-clipped=0.0 +2024-08-06 08:21:29,154 INFO [trainer.py:765] (0/8) Epoch 1, batch 1100, train_loss[loss=3.447, ArTop10Accuracy=0.6526, over 13650.00 frames. ], tot_loss[loss=3.488, ArTop10Accuracy=0.6416, over 11935.79 frames. ], batch size: 34, lr: 2.96e-02 +2024-08-06 08:22:45,413 INFO [trainer.py:765] (0/8) Epoch 1, batch 1200, train_loss[loss=3.399, ArTop10Accuracy=0.6582, over 12078.00 frames. ], tot_loss[loss=3.456, ArTop10Accuracy=0.6476, over 11841.59 frames. ], batch size: 101, lr: 2.96e-02 +2024-08-06 08:23:45,268 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 08:23:45,272 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-1.pt +2024-08-06 08:25:36,238 INFO [trainer.py:765] (0/8) Epoch 2, batch 100, train_loss[loss=3.464, ArTop10Accuracy=0.6425, over 14586.00 frames. ], tot_loss[loss=3.422, ArTop10Accuracy=0.6529, over 4763.48 frames. ], batch size: 62, lr: 2.90e-02 +2024-08-06 08:26:58,957 INFO [trainer.py:765] (0/8) Epoch 2, batch 200, train_loss[loss=3.368, ArTop10Accuracy=0.6675, over 13398.00 frames. ], tot_loss[loss=3.39, ArTop10Accuracy=0.6587, over 7742.95 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 08:28:25,534 INFO [trainer.py:765] (0/8) Epoch 2, batch 300, train_loss[loss=3.316, ArTop10Accuracy=0.6735, over 14268.00 frames. ], tot_loss[loss=3.369, ArTop10Accuracy=0.6629, over 9373.85 frames. ], batch size: 44, lr: 2.89e-02 +2024-08-06 08:29:48,638 INFO [trainer.py:765] (0/8) Epoch 2, batch 400, train_loss[loss=3.324, ArTop10Accuracy=0.6711, over 10158.00 frames. ], tot_loss[loss=3.357, ArTop10Accuracy=0.6656, over 10297.01 frames. ], batch size: 14, lr: 2.88e-02 +2024-08-06 08:31:22,899 INFO [trainer.py:765] (0/8) Epoch 2, batch 500, train_loss[loss=3.281, ArTop10Accuracy=0.6779, over 12660.00 frames. ], tot_loss[loss=3.346, ArTop10Accuracy=0.6677, over 10864.71 frames. ], batch size: 23, lr: 2.87e-02 +2024-08-06 08:32:45,689 INFO [trainer.py:765] (0/8) Epoch 2, batch 600, train_loss[loss=3.325, ArTop10Accuracy=0.6746, over 11358.00 frames. ], tot_loss[loss=3.334, ArTop10Accuracy=0.6699, over 11387.56 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 08:34:13,583 INFO [trainer.py:765] (0/8) Epoch 2, batch 700, train_loss[loss=3.228, ArTop10Accuracy=0.6954, over 10290.00 frames. ], tot_loss[loss=3.325, ArTop10Accuracy=0.6716, over 11532.16 frames. ], batch size: 12, lr: 2.85e-02 +2024-08-06 08:34:31,174 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 08:34:40,887 INFO [trainer.py:811] (0/8) Epoch 2, validation: loss=3.277, ArTop10Accuracy=0.6803, over 1827537.00 frames. +2024-08-06 08:34:40,888 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 28892MB +2024-08-06 08:34:41,700 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 7.953e+01 1.592e+02 2.200e+02 3.344e+02 2.949e+03, threshold=4.400e+02, percent-clipped=8.6 +2024-08-06 08:35:39,879 INFO [trainer.py:765] (0/8) Epoch 2, batch 800, train_loss[loss=3.349, ArTop10Accuracy=0.6662, over 10188.00 frames. ], tot_loss[loss=3.321, ArTop10Accuracy=0.6725, over 11635.67 frames. ], batch size: 12, lr: 2.84e-02 +2024-08-06 08:36:56,371 INFO [trainer.py:765] (0/8) Epoch 2, batch 900, train_loss[loss=3.325, ArTop10Accuracy=0.6663, over 12789.00 frames. ], tot_loss[loss=3.308, ArTop10Accuracy=0.6752, over 11673.27 frames. ], batch size: 27, lr: 2.83e-02 +2024-08-06 08:38:10,512 INFO [trainer.py:765] (0/8) Epoch 2, batch 1000, train_loss[loss=3.315, ArTop10Accuracy=0.6742, over 12846.00 frames. ], tot_loss[loss=3.298, ArTop10Accuracy=0.677, over 11874.71 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 08:39:25,060 INFO [trainer.py:765] (0/8) Epoch 2, batch 1100, train_loss[loss=3.25, ArTop10Accuracy=0.6829, over 13677.00 frames. ], tot_loss[loss=3.29, ArTop10Accuracy=0.6784, over 11958.14 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 08:40:38,220 INFO [trainer.py:765] (0/8) Epoch 2, batch 1200, train_loss[loss=3.32, ArTop10Accuracy=0.6759, over 12444.00 frames. ], tot_loss[loss=3.281, ArTop10Accuracy=0.6801, over 11872.51 frames. ], batch size: 103, lr: 2.80e-02 +2024-08-06 08:41:38,460 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 08:41:38,463 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-2.pt +2024-08-06 08:43:36,651 INFO [trainer.py:765] (0/8) Epoch 3, batch 100, train_loss[loss=3.221, ArTop10Accuracy=0.6956, over 14391.00 frames. ], tot_loss[loss=3.251, ArTop10Accuracy=0.6852, over 4763.04 frames. ], batch size: 63, lr: 2.67e-02 +2024-08-06 08:45:10,502 INFO [trainer.py:765] (0/8) Epoch 3, batch 200, train_loss[loss=3.149, ArTop10Accuracy=0.7102, over 13716.00 frames. ], tot_loss[loss=3.222, ArTop10Accuracy=0.6905, over 7746.96 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 08:46:29,258 INFO [trainer.py:765] (0/8) Epoch 3, batch 300, train_loss[loss=3.237, ArTop10Accuracy=0.6852, over 14136.00 frames. ], tot_loss[loss=3.205, ArTop10Accuracy=0.6942, over 9365.48 frames. ], batch size: 44, lr: 2.64e-02 +2024-08-06 08:48:04,219 INFO [trainer.py:765] (0/8) Epoch 3, batch 400, train_loss[loss=3.123, ArTop10Accuracy=0.7122, over 10929.00 frames. ], tot_loss[loss=3.19, ArTop10Accuracy=0.6973, over 10272.52 frames. ], batch size: 15, lr: 2.63e-02 +2024-08-06 08:48:40,881 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 9.282e+01 1.561e+02 1.981e+02 2.686e+02 1.768e+03, threshold=3.962e+02, percent-clipped=7.6 +2024-08-06 08:49:25,542 INFO [trainer.py:765] (0/8) Epoch 3, batch 500, train_loss[loss=3.102, ArTop10Accuracy=0.715, over 12600.00 frames. ], tot_loss[loss=3.169, ArTop10Accuracy=0.7016, over 10831.61 frames. ], batch size: 23, lr: 2.62e-02 +2024-08-06 08:51:00,477 INFO [trainer.py:765] (0/8) Epoch 3, batch 600, train_loss[loss=3.068, ArTop10Accuracy=0.7223, over 11331.00 frames. ], tot_loss[loss=3.154, ArTop10Accuracy=0.7042, over 11373.76 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 08:52:31,618 INFO [trainer.py:765] (0/8) Epoch 3, batch 700, train_loss[loss=3.132, ArTop10Accuracy=0.7058, over 9480.00 frames. ], tot_loss[loss=3.145, ArTop10Accuracy=0.7061, over 11509.58 frames. ], batch size: 11, lr: 2.60e-02 +2024-08-06 08:53:57,388 INFO [trainer.py:765] (0/8) Epoch 3, batch 800, train_loss[loss=3.117, ArTop10Accuracy=0.7134, over 9261.00 frames. ], tot_loss[loss=3.138, ArTop10Accuracy=0.7073, over 11647.27 frames. ], batch size: 11, lr: 2.59e-02 +2024-08-06 08:55:15,118 INFO [trainer.py:765] (0/8) Epoch 3, batch 900, train_loss[loss=3.036, ArTop10Accuracy=0.7285, over 12813.00 frames. ], tot_loss[loss=3.12, ArTop10Accuracy=0.7107, over 11687.90 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 08:56:31,558 INFO [trainer.py:765] (0/8) Epoch 3, batch 1000, train_loss[loss=3.046, ArTop10Accuracy=0.725, over 13272.00 frames. ], tot_loss[loss=3.111, ArTop10Accuracy=0.7124, over 11871.56 frames. ], batch size: 28, lr: 2.56e-02 +2024-08-06 08:57:46,506 INFO [trainer.py:765] (0/8) Epoch 3, batch 1100, train_loss[loss=2.998, ArTop10Accuracy=0.7314, over 13731.00 frames. ], tot_loss[loss=3.104, ArTop10Accuracy=0.7135, over 11943.01 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 08:59:01,400 INFO [trainer.py:765] (0/8) Epoch 3, batch 1200, train_loss[loss=3.119, ArTop10Accuracy=0.7097, over 12366.00 frames. ], tot_loss[loss=3.097, ArTop10Accuracy=0.7148, over 11874.89 frames. ], batch size: 101, lr: 2.54e-02 +2024-08-06 09:00:02,053 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 09:00:02,056 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-3.pt +2024-08-06 09:01:50,742 INFO [trainer.py:765] (0/8) Epoch 4, batch 100, train_loss[loss=3.096, ArTop10Accuracy=0.7157, over 14289.00 frames. ], tot_loss[loss=3.065, ArTop10Accuracy=0.72, over 4767.49 frames. ], batch size: 63, lr: 2.38e-02 +2024-08-06 09:02:52,858 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 09:03:02,384 INFO [trainer.py:811] (0/8) Epoch 4, validation: loss=2.997, ArTop10Accuracy=0.7338, over 1827537.00 frames. +2024-08-06 09:03:02,384 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29208MB +2024-08-06 09:03:03,364 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.499e+02 1.782e+02 2.273e+02 1.100e+03, threshold=3.565e+02, percent-clipped=4.7 +2024-08-06 09:03:29,274 INFO [trainer.py:765] (0/8) Epoch 4, batch 200, train_loss[loss=3.021, ArTop10Accuracy=0.7286, over 13689.00 frames. ], tot_loss[loss=3.042, ArTop10Accuracy=0.7243, over 7737.91 frames. ], batch size: 34, lr: 2.37e-02 +2024-08-06 09:05:01,733 INFO [trainer.py:765] (0/8) Epoch 4, batch 300, train_loss[loss=3.066, ArTop10Accuracy=0.7201, over 14166.00 frames. ], tot_loss[loss=3.037, ArTop10Accuracy=0.7256, over 9341.46 frames. ], batch size: 45, lr: 2.36e-02 +2024-08-06 09:06:28,148 INFO [trainer.py:765] (0/8) Epoch 4, batch 400, train_loss[loss=2.954, ArTop10Accuracy=0.7482, over 11040.00 frames. ], tot_loss[loss=3.034, ArTop10Accuracy=0.7265, over 10259.58 frames. ], batch size: 15, lr: 2.34e-02 +2024-08-06 09:08:01,927 INFO [trainer.py:765] (0/8) Epoch 4, batch 500, train_loss[loss=2.944, ArTop10Accuracy=0.742, over 12204.00 frames. ], tot_loss[loss=3.023, ArTop10Accuracy=0.7286, over 10800.93 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 09:09:28,540 INFO [trainer.py:765] (0/8) Epoch 4, batch 600, train_loss[loss=2.976, ArTop10Accuracy=0.7362, over 12120.00 frames. ], tot_loss[loss=3.02, ArTop10Accuracy=0.7291, over 11356.71 frames. ], batch size: 19, lr: 2.32e-02 +2024-08-06 09:10:59,865 INFO [trainer.py:765] (0/8) Epoch 4, batch 700, train_loss[loss=2.918, ArTop10Accuracy=0.7454, over 10062.00 frames. ], tot_loss[loss=3.021, ArTop10Accuracy=0.7289, over 11509.51 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 09:12:17,513 INFO [trainer.py:765] (0/8) Epoch 4, batch 800, train_loss[loss=2.966, ArTop10Accuracy=0.7337, over 9609.00 frames. ], tot_loss[loss=3.023, ArTop10Accuracy=0.7287, over 11636.06 frames. ], batch size: 11, lr: 2.30e-02 +2024-08-06 09:13:33,212 INFO [trainer.py:765] (0/8) Epoch 4, batch 900, train_loss[loss=3.013, ArTop10Accuracy=0.7299, over 12831.00 frames. ], tot_loss[loss=3.013, ArTop10Accuracy=0.7305, over 11687.30 frames. ], batch size: 27, lr: 2.29e-02 +2024-08-06 09:14:47,520 INFO [trainer.py:765] (0/8) Epoch 4, batch 1000, train_loss[loss=3.014, ArTop10Accuracy=0.7309, over 13050.00 frames. ], tot_loss[loss=3.013, ArTop10Accuracy=0.7305, over 11894.91 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 09:16:02,982 INFO [trainer.py:765] (0/8) Epoch 4, batch 1100, train_loss[loss=3.08, ArTop10Accuracy=0.7138, over 13473.00 frames. ], tot_loss[loss=3.013, ArTop10Accuracy=0.7305, over 11959.77 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 09:16:53,291 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.440e+02 1.636e+02 1.968e+02 7.702e+02, threshold=3.273e+02, percent-clipped=1.3 +2024-08-06 09:17:18,344 INFO [trainer.py:765] (0/8) Epoch 4, batch 1200, train_loss[loss=3.102, ArTop10Accuracy=0.7123, over 12105.00 frames. ], tot_loss[loss=3.012, ArTop10Accuracy=0.7306, over 11874.25 frames. ], batch size: 101, lr: 2.25e-02 +2024-08-06 09:18:17,203 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 09:18:17,206 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-4.pt +2024-08-06 09:20:17,173 INFO [trainer.py:765] (0/8) Epoch 5, batch 100, train_loss[loss=2.964, ArTop10Accuracy=0.7386, over 14166.00 frames. ], tot_loss[loss=2.993, ArTop10Accuracy=0.7338, over 4763.83 frames. ], batch size: 62, lr: 2.10e-02 +2024-08-06 09:21:52,291 INFO [trainer.py:765] (0/8) Epoch 5, batch 200, train_loss[loss=2.948, ArTop10Accuracy=0.742, over 13764.00 frames. ], tot_loss[loss=2.985, ArTop10Accuracy=0.7353, over 7747.26 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 09:23:19,241 INFO [trainer.py:765] (0/8) Epoch 5, batch 300, train_loss[loss=2.968, ArTop10Accuracy=0.7409, over 14202.00 frames. ], tot_loss[loss=2.975, ArTop10Accuracy=0.7374, over 9374.58 frames. ], batch size: 44, lr: 2.08e-02 +2024-08-06 09:24:53,537 INFO [trainer.py:765] (0/8) Epoch 5, batch 400, train_loss[loss=2.865, ArTop10Accuracy=0.759, over 10353.00 frames. ], tot_loss[loss=2.971, ArTop10Accuracy=0.7383, over 10278.00 frames. ], batch size: 14, lr: 2.07e-02 +2024-08-06 09:26:19,418 INFO [trainer.py:765] (0/8) Epoch 5, batch 500, train_loss[loss=2.904, ArTop10Accuracy=0.7532, over 12828.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.7399, over 10863.61 frames. ], batch size: 23, lr: 2.06e-02 +2024-08-06 09:27:49,537 INFO [trainer.py:765] (0/8) Epoch 5, batch 600, train_loss[loss=2.948, ArTop10Accuracy=0.7446, over 11325.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7389, over 11367.62 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 09:29:21,670 INFO [trainer.py:765] (0/8) Epoch 5, batch 700, train_loss[loss=2.924, ArTop10Accuracy=0.7441, over 10278.00 frames. ], tot_loss[loss=2.968, ArTop10Accuracy=0.7386, over 11525.90 frames. ], batch size: 12, lr: 2.04e-02 +2024-08-06 09:30:44,693 INFO [trainer.py:765] (0/8) Epoch 5, batch 800, train_loss[loss=2.783, ArTop10Accuracy=0.78, over 10116.00 frames. ], tot_loss[loss=2.969, ArTop10Accuracy=0.7385, over 11608.55 frames. ], batch size: 12, lr: 2.03e-02 +2024-08-06 09:31:51,239 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 09:32:00,760 INFO [trainer.py:811] (0/8) Epoch 5, validation: loss=2.926, ArTop10Accuracy=0.7466, over 1827537.00 frames. +2024-08-06 09:32:00,761 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29301MB +2024-08-06 09:32:01,710 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.060e+02 1.349e+02 1.525e+02 1.806e+02 1.007e+03, threshold=3.049e+02, percent-clipped=2.3 +2024-08-06 09:32:10,554 INFO [trainer.py:765] (0/8) Epoch 5, batch 900, train_loss[loss=2.973, ArTop10Accuracy=0.7398, over 12834.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.74, over 11677.29 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 09:33:27,322 INFO [trainer.py:765] (0/8) Epoch 5, batch 1000, train_loss[loss=2.965, ArTop10Accuracy=0.7397, over 12891.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7404, over 11884.96 frames. ], batch size: 27, lr: 2.01e-02 +2024-08-06 09:34:42,300 INFO [trainer.py:765] (0/8) Epoch 5, batch 1100, train_loss[loss=2.896, ArTop10Accuracy=0.7578, over 13686.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.7399, over 11934.83 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 09:35:56,332 INFO [trainer.py:765] (0/8) Epoch 5, batch 1200, train_loss[loss=3.075, ArTop10Accuracy=0.7164, over 12558.00 frames. ], tot_loss[loss=2.96, ArTop10Accuracy=0.7403, over 11853.83 frames. ], batch size: 101, lr: 1.99e-02 +2024-08-06 09:36:54,969 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 09:36:54,973 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-5.pt +2024-08-06 09:38:52,662 INFO [trainer.py:765] (0/8) Epoch 6, batch 100, train_loss[loss=2.91, ArTop10Accuracy=0.7488, over 14733.00 frames. ], tot_loss[loss=2.956, ArTop10Accuracy=0.7406, over 4774.76 frames. ], batch size: 62, lr: 1.85e-02 +2024-08-06 09:40:19,834 INFO [trainer.py:765] (0/8) Epoch 6, batch 200, train_loss[loss=2.939, ArTop10Accuracy=0.7433, over 13842.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.7444, over 7770.99 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 09:41:52,967 INFO [trainer.py:765] (0/8) Epoch 6, batch 300, train_loss[loss=2.942, ArTop10Accuracy=0.7465, over 14082.00 frames. ], tot_loss[loss=2.928, ArTop10Accuracy=0.7464, over 9388.00 frames. ], batch size: 44, lr: 1.83e-02 +2024-08-06 09:43:17,829 INFO [trainer.py:765] (0/8) Epoch 6, batch 400, train_loss[loss=2.955, ArTop10Accuracy=0.7367, over 10458.00 frames. ], tot_loss[loss=2.924, ArTop10Accuracy=0.7473, over 10284.90 frames. ], batch size: 14, lr: 1.83e-02 +2024-08-06 09:44:54,130 INFO [trainer.py:765] (0/8) Epoch 6, batch 500, train_loss[loss=2.873, ArTop10Accuracy=0.7609, over 12111.00 frames. ], tot_loss[loss=2.92, ArTop10Accuracy=0.7479, over 10842.93 frames. ], batch size: 22, lr: 1.82e-02 +2024-08-06 09:46:22,873 INFO [trainer.py:765] (0/8) Epoch 6, batch 600, train_loss[loss=2.85, ArTop10Accuracy=0.7588, over 11493.00 frames. ], tot_loss[loss=2.919, ArTop10Accuracy=0.7482, over 11346.39 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 09:46:37,217 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.339e+02 1.480e+02 1.701e+02 7.506e+02, threshold=2.959e+02, percent-clipped=1.1 +2024-08-06 09:47:57,871 INFO [trainer.py:765] (0/8) Epoch 6, batch 700, train_loss[loss=2.809, ArTop10Accuracy=0.7689, over 9942.00 frames. ], tot_loss[loss=2.924, ArTop10Accuracy=0.7472, over 11515.67 frames. ], batch size: 12, lr: 1.80e-02 +2024-08-06 09:49:15,955 INFO [trainer.py:765] (0/8) Epoch 6, batch 800, train_loss[loss=2.98, ArTop10Accuracy=0.7345, over 10128.00 frames. ], tot_loss[loss=2.93, ArTop10Accuracy=0.7461, over 11608.70 frames. ], batch size: 12, lr: 1.79e-02 +2024-08-06 09:50:32,135 INFO [trainer.py:765] (0/8) Epoch 6, batch 900, train_loss[loss=2.967, ArTop10Accuracy=0.7375, over 12921.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7477, over 11657.07 frames. ], batch size: 27, lr: 1.78e-02 +2024-08-06 09:51:47,297 INFO [trainer.py:765] (0/8) Epoch 6, batch 1000, train_loss[loss=2.928, ArTop10Accuracy=0.7449, over 12840.00 frames. ], tot_loss[loss=2.926, ArTop10Accuracy=0.747, over 11871.01 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 09:53:00,921 INFO [trainer.py:765] (0/8) Epoch 6, batch 1100, train_loss[loss=2.869, ArTop10Accuracy=0.7588, over 13548.00 frames. ], tot_loss[loss=2.93, ArTop10Accuracy=0.7462, over 11945.67 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 09:54:14,336 INFO [trainer.py:765] (0/8) Epoch 6, batch 1200, train_loss[loss=3.067, ArTop10Accuracy=0.7191, over 11925.00 frames. ], tot_loss[loss=2.927, ArTop10Accuracy=0.7467, over 11849.90 frames. ], batch size: 101, lr: 1.76e-02 +2024-08-06 09:55:13,161 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 09:55:13,166 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-6.pt +2024-08-06 09:57:06,699 INFO [trainer.py:765] (0/8) Epoch 7, batch 100, train_loss[loss=3.021, ArTop10Accuracy=0.7334, over 14799.00 frames. ], tot_loss[loss=2.908, ArTop10Accuracy=0.7499, over 4751.91 frames. ], batch size: 62, lr: 1.64e-02 +2024-08-06 09:58:39,426 INFO [trainer.py:765] (0/8) Epoch 7, batch 200, train_loss[loss=2.914, ArTop10Accuracy=0.747, over 13647.00 frames. ], tot_loss[loss=2.895, ArTop10Accuracy=0.7525, over 7761.94 frames. ], batch size: 34, lr: 1.64e-02 +2024-08-06 10:00:06,082 INFO [trainer.py:765] (0/8) Epoch 7, batch 300, train_loss[loss=2.962, ArTop10Accuracy=0.7351, over 14187.00 frames. ], tot_loss[loss=2.891, ArTop10Accuracy=0.7533, over 9377.67 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 10:00:40,508 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 10:00:50,245 INFO [trainer.py:811] (0/8) Epoch 7, validation: loss=2.88, ArTop10Accuracy=0.7554, over 1827537.00 frames. +2024-08-06 10:00:50,246 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29301MB +2024-08-06 10:00:50,977 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.002e+02 1.286e+02 1.429e+02 1.605e+02 1.020e+03, threshold=2.857e+02, percent-clipped=1.5 +2024-08-06 10:01:49,117 INFO [trainer.py:765] (0/8) Epoch 7, batch 400, train_loss[loss=2.834, ArTop10Accuracy=0.7614, over 10248.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7525, over 10301.33 frames. ], batch size: 14, lr: 1.62e-02 +2024-08-06 10:03:21,458 INFO [trainer.py:765] (0/8) Epoch 7, batch 500, train_loss[loss=2.807, ArTop10Accuracy=0.7693, over 12213.00 frames. ], tot_loss[loss=2.891, ArTop10Accuracy=0.7533, over 10851.85 frames. ], batch size: 22, lr: 1.61e-02 +2024-08-06 10:04:51,882 INFO [trainer.py:765] (0/8) Epoch 7, batch 600, train_loss[loss=2.822, ArTop10Accuracy=0.7734, over 11343.00 frames. ], tot_loss[loss=2.892, ArTop10Accuracy=0.7531, over 11367.18 frames. ], batch size: 18, lr: 1.61e-02 +2024-08-06 10:06:25,111 INFO [trainer.py:765] (0/8) Epoch 7, batch 700, train_loss[loss=2.937, ArTop10Accuracy=0.7484, over 10293.00 frames. ], tot_loss[loss=2.898, ArTop10Accuracy=0.7521, over 11508.82 frames. ], batch size: 12, lr: 1.60e-02 +2024-08-06 10:07:46,950 INFO [trainer.py:765] (0/8) Epoch 7, batch 800, train_loss[loss=2.873, ArTop10Accuracy=0.7592, over 10146.00 frames. ], tot_loss[loss=2.898, ArTop10Accuracy=0.7523, over 11629.16 frames. ], batch size: 12, lr: 1.59e-02 +2024-08-06 10:09:02,824 INFO [trainer.py:765] (0/8) Epoch 7, batch 900, train_loss[loss=2.971, ArTop10Accuracy=0.7367, over 12774.00 frames. ], tot_loss[loss=2.89, ArTop10Accuracy=0.7537, over 11679.75 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 10:10:19,635 INFO [trainer.py:765] (0/8) Epoch 7, batch 1000, train_loss[loss=2.904, ArTop10Accuracy=0.7487, over 12768.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7523, over 11877.49 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 10:11:35,207 INFO [trainer.py:765] (0/8) Epoch 7, batch 1100, train_loss[loss=2.97, ArTop10Accuracy=0.7417, over 13638.00 frames. ], tot_loss[loss=2.902, ArTop10Accuracy=0.7512, over 11956.00 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 10:12:48,204 INFO [trainer.py:765] (0/8) Epoch 7, batch 1200, train_loss[loss=3.043, ArTop10Accuracy=0.7302, over 12201.00 frames. ], tot_loss[loss=2.901, ArTop10Accuracy=0.7514, over 11879.95 frames. ], batch size: 101, lr: 1.57e-02 +2024-08-06 10:13:46,785 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 10:13:46,788 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-7.pt +2024-08-06 10:15:03,600 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.017e+02 1.283e+02 1.410e+02 1.601e+02 1.017e+03, threshold=2.820e+02, percent-clipped=0.9 +2024-08-06 10:15:40,820 INFO [trainer.py:765] (0/8) Epoch 8, batch 100, train_loss[loss=2.903, ArTop10Accuracy=0.7486, over 14670.00 frames. ], tot_loss[loss=2.889, ArTop10Accuracy=0.7534, over 4768.92 frames. ], batch size: 62, lr: 1.47e-02 +2024-08-06 10:17:12,862 INFO [trainer.py:765] (0/8) Epoch 8, batch 200, train_loss[loss=2.86, ArTop10Accuracy=0.7606, over 13779.00 frames. ], tot_loss[loss=2.879, ArTop10Accuracy=0.7555, over 7751.17 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 10:18:37,898 INFO [trainer.py:765] (0/8) Epoch 8, batch 300, train_loss[loss=2.875, ArTop10Accuracy=0.7556, over 14097.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7568, over 9353.66 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 10:20:06,341 INFO [trainer.py:765] (0/8) Epoch 8, batch 400, train_loss[loss=2.701, ArTop10Accuracy=0.7881, over 10956.00 frames. ], tot_loss[loss=2.869, ArTop10Accuracy=0.7574, over 10265.29 frames. ], batch size: 15, lr: 1.45e-02 +2024-08-06 10:21:32,411 INFO [trainer.py:765] (0/8) Epoch 8, batch 500, train_loss[loss=2.816, ArTop10Accuracy=0.7739, over 12225.00 frames. ], tot_loss[loss=2.862, ArTop10Accuracy=0.7587, over 10847.20 frames. ], batch size: 22, lr: 1.45e-02 +2024-08-06 10:23:00,974 INFO [trainer.py:765] (0/8) Epoch 8, batch 600, train_loss[loss=2.888, ArTop10Accuracy=0.7598, over 11340.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.7583, over 11377.64 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 10:24:37,787 INFO [trainer.py:765] (0/8) Epoch 8, batch 700, train_loss[loss=2.898, ArTop10Accuracy=0.7523, over 9450.00 frames. ], tot_loss[loss=2.869, ArTop10Accuracy=0.7574, over 11524.35 frames. ], batch size: 11, lr: 1.43e-02 +2024-08-06 10:25:56,088 INFO [trainer.py:765] (0/8) Epoch 8, batch 800, train_loss[loss=2.921, ArTop10Accuracy=0.7523, over 10218.00 frames. ], tot_loss[loss=2.874, ArTop10Accuracy=0.7566, over 11645.69 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 10:27:12,246 INFO [trainer.py:765] (0/8) Epoch 8, batch 900, train_loss[loss=2.885, ArTop10Accuracy=0.7531, over 12756.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.7585, over 11705.50 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 10:28:25,263 INFO [trainer.py:765] (0/8) Epoch 8, batch 1000, train_loss[loss=2.886, ArTop10Accuracy=0.7564, over 12960.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7571, over 11890.41 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 10:29:07,155 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 10:29:16,830 INFO [trainer.py:811] (0/8) Epoch 8, validation: loss=2.858, ArTop10Accuracy=0.7594, over 1827537.00 frames. +2024-08-06 10:29:16,831 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29519MB +2024-08-06 10:29:17,490 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.275e+02 1.390e+02 1.547e+02 3.717e+02, threshold=2.781e+02, percent-clipped=0.7 +2024-08-06 10:29:51,730 INFO [trainer.py:765] (0/8) Epoch 8, batch 1100, train_loss[loss=2.87, ArTop10Accuracy=0.7579, over 13614.00 frames. ], tot_loss[loss=2.875, ArTop10Accuracy=0.7563, over 11953.41 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 10:31:05,948 INFO [trainer.py:765] (0/8) Epoch 8, batch 1200, train_loss[loss=2.996, ArTop10Accuracy=0.7309, over 12624.00 frames. ], tot_loss[loss=2.878, ArTop10Accuracy=0.7558, over 11901.69 frames. ], batch size: 103, lr: 1.40e-02 +2024-08-06 10:32:05,402 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 10:32:05,407 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-8.pt +2024-08-06 10:34:01,256 INFO [trainer.py:765] (0/8) Epoch 9, batch 100, train_loss[loss=2.882, ArTop10Accuracy=0.7575, over 14805.00 frames. ], tot_loss[loss=2.858, ArTop10Accuracy=0.7592, over 4758.64 frames. ], batch size: 63, lr: 1.32e-02 +2024-08-06 10:35:31,772 INFO [trainer.py:765] (0/8) Epoch 9, batch 200, train_loss[loss=2.787, ArTop10Accuracy=0.7743, over 13638.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7607, over 7747.34 frames. ], batch size: 34, lr: 1.32e-02 +2024-08-06 10:36:57,926 INFO [trainer.py:765] (0/8) Epoch 9, batch 300, train_loss[loss=2.883, ArTop10Accuracy=0.7545, over 14226.00 frames. ], tot_loss[loss=2.849, ArTop10Accuracy=0.7606, over 9353.90 frames. ], batch size: 45, lr: 1.31e-02 +2024-08-06 10:38:32,698 INFO [trainer.py:765] (0/8) Epoch 9, batch 400, train_loss[loss=2.729, ArTop10Accuracy=0.7845, over 10419.00 frames. ], tot_loss[loss=2.844, ArTop10Accuracy=0.762, over 10282.65 frames. ], batch size: 14, lr: 1.31e-02 +2024-08-06 10:39:59,256 INFO [trainer.py:765] (0/8) Epoch 9, batch 500, train_loss[loss=2.871, ArTop10Accuracy=0.7572, over 11979.00 frames. ], tot_loss[loss=2.838, ArTop10Accuracy=0.7632, over 10858.06 frames. ], batch size: 22, lr: 1.30e-02 +2024-08-06 10:41:29,690 INFO [trainer.py:765] (0/8) Epoch 9, batch 600, train_loss[loss=2.819, ArTop10Accuracy=0.7655, over 11466.00 frames. ], tot_loss[loss=2.842, ArTop10Accuracy=0.7628, over 11377.95 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 10:42:58,440 INFO [trainer.py:765] (0/8) Epoch 9, batch 700, train_loss[loss=2.632, ArTop10Accuracy=0.7985, over 10293.00 frames. ], tot_loss[loss=2.841, ArTop10Accuracy=0.7629, over 11523.81 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 10:44:02,952 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.039e+02 1.253e+02 1.352e+02 1.493e+02 7.010e+02, threshold=2.704e+02, percent-clipped=0.6 +2024-08-06 10:44:19,669 INFO [trainer.py:765] (0/8) Epoch 9, batch 800, train_loss[loss=2.764, ArTop10Accuracy=0.7822, over 9246.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.762, over 11613.97 frames. ], batch size: 11, lr: 1.29e-02 +2024-08-06 10:45:35,718 INFO [trainer.py:765] (0/8) Epoch 9, batch 900, train_loss[loss=2.91, ArTop10Accuracy=0.7467, over 13092.00 frames. ], tot_loss[loss=2.84, ArTop10Accuracy=0.7629, over 11694.74 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 10:46:51,271 INFO [trainer.py:765] (0/8) Epoch 9, batch 1000, train_loss[loss=2.834, ArTop10Accuracy=0.7649, over 12852.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.762, over 11888.03 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 10:48:06,247 INFO [trainer.py:765] (0/8) Epoch 9, batch 1100, train_loss[loss=2.846, ArTop10Accuracy=0.7591, over 13386.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7603, over 11952.87 frames. ], batch size: 34, lr: 1.28e-02 +2024-08-06 10:49:21,054 INFO [trainer.py:765] (0/8) Epoch 9, batch 1200, train_loss[loss=2.929, ArTop10Accuracy=0.7448, over 12195.00 frames. ], tot_loss[loss=2.855, ArTop10Accuracy=0.7599, over 11873.16 frames. ], batch size: 101, lr: 1.27e-02 +2024-08-06 10:50:22,708 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 10:50:22,712 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-9.pt +2024-08-06 10:52:12,325 INFO [trainer.py:765] (0/8) Epoch 10, batch 100, train_loss[loss=2.889, ArTop10Accuracy=0.7574, over 14289.00 frames. ], tot_loss[loss=2.85, ArTop10Accuracy=0.7603, over 4750.13 frames. ], batch size: 62, lr: 1.20e-02 +2024-08-06 10:53:44,584 INFO [trainer.py:765] (0/8) Epoch 10, batch 200, train_loss[loss=2.83, ArTop10Accuracy=0.7625, over 13602.00 frames. ], tot_loss[loss=2.834, ArTop10Accuracy=0.7638, over 7744.10 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 10:55:08,089 INFO [trainer.py:765] (0/8) Epoch 10, batch 300, train_loss[loss=2.874, ArTop10Accuracy=0.7542, over 13893.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.7651, over 9372.24 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 10:56:41,177 INFO [trainer.py:765] (0/8) Epoch 10, batch 400, train_loss[loss=2.776, ArTop10Accuracy=0.7748, over 10425.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.7658, over 10286.81 frames. ], batch size: 14, lr: 1.19e-02 +2024-08-06 10:58:04,937 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 10:58:14,555 INFO [trainer.py:811] (0/8) Epoch 10, validation: loss=2.842, ArTop10Accuracy=0.7624, over 1827537.00 frames. +2024-08-06 10:58:14,556 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29519MB +2024-08-06 10:58:15,574 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.228e+02 1.320e+02 1.458e+02 6.096e+02, threshold=2.641e+02, percent-clipped=0.6 +2024-08-06 10:58:15,579 INFO [trainer.py:765] (0/8) Epoch 10, batch 500, train_loss[loss=2.791, ArTop10Accuracy=0.7754, over 12168.00 frames. ], tot_loss[loss=2.824, ArTop10Accuracy=0.766, over 10844.50 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 10:59:42,817 INFO [trainer.py:765] (0/8) Epoch 10, batch 600, train_loss[loss=2.772, ArTop10Accuracy=0.7802, over 11367.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7664, over 11362.90 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 11:01:18,109 INFO [trainer.py:765] (0/8) Epoch 10, batch 700, train_loss[loss=2.798, ArTop10Accuracy=0.769, over 10206.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7656, over 11497.81 frames. ], batch size: 12, lr: 1.18e-02 +2024-08-06 11:02:36,918 INFO [trainer.py:765] (0/8) Epoch 10, batch 800, train_loss[loss=2.694, ArTop10Accuracy=0.7909, over 9990.00 frames. ], tot_loss[loss=2.833, ArTop10Accuracy=0.764, over 11622.09 frames. ], batch size: 12, lr: 1.17e-02 +2024-08-06 11:03:51,212 INFO [trainer.py:765] (0/8) Epoch 10, batch 900, train_loss[loss=2.837, ArTop10Accuracy=0.7643, over 13029.00 frames. ], tot_loss[loss=2.829, ArTop10Accuracy=0.765, over 11669.36 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 11:05:06,352 INFO [trainer.py:765] (0/8) Epoch 10, batch 1000, train_loss[loss=2.861, ArTop10Accuracy=0.7555, over 13305.00 frames. ], tot_loss[loss=2.829, ArTop10Accuracy=0.7652, over 11865.57 frames. ], batch size: 28, lr: 1.17e-02 +2024-08-06 11:06:21,724 INFO [trainer.py:765] (0/8) Epoch 10, batch 1100, train_loss[loss=2.84, ArTop10Accuracy=0.7659, over 13539.00 frames. ], tot_loss[loss=2.834, ArTop10Accuracy=0.7642, over 11938.55 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 11:07:34,772 INFO [trainer.py:765] (0/8) Epoch 10, batch 1200, train_loss[loss=2.951, ArTop10Accuracy=0.7414, over 12210.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7635, over 11860.87 frames. ], batch size: 101, lr: 1.16e-02 +2024-08-06 11:08:33,817 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 11:08:33,820 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-10.pt +2024-08-06 11:10:29,953 INFO [trainer.py:765] (0/8) Epoch 11, batch 100, train_loss[loss=2.853, ArTop10Accuracy=0.7606, over 14718.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.766, over 4759.47 frames. ], batch size: 62, lr: 1.10e-02 +2024-08-06 11:12:04,673 INFO [trainer.py:765] (0/8) Epoch 11, batch 200, train_loss[loss=2.845, ArTop10Accuracy=0.7641, over 14085.00 frames. ], tot_loss[loss=2.814, ArTop10Accuracy=0.7672, over 7737.29 frames. ], batch size: 35, lr: 1.10e-02 +2024-08-06 11:12:22,823 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 9.884e+01 1.240e+02 1.333e+02 1.457e+02 6.939e+02, threshold=2.667e+02, percent-clipped=0.1 +2024-08-06 11:13:31,549 INFO [trainer.py:765] (0/8) Epoch 11, batch 300, train_loss[loss=2.914, ArTop10Accuracy=0.749, over 14385.00 frames. ], tot_loss[loss=2.81, ArTop10Accuracy=0.7683, over 9360.87 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 11:15:03,268 INFO [trainer.py:765] (0/8) Epoch 11, batch 400, train_loss[loss=2.742, ArTop10Accuracy=0.7831, over 10377.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.769, over 10296.01 frames. ], batch size: 14, lr: 1.09e-02 +2024-08-06 11:16:29,637 INFO [trainer.py:765] (0/8) Epoch 11, batch 500, train_loss[loss=2.801, ArTop10Accuracy=0.7716, over 12243.00 frames. ], tot_loss[loss=2.802, ArTop10Accuracy=0.7699, over 10869.51 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 11:18:00,516 INFO [trainer.py:765] (0/8) Epoch 11, batch 600, train_loss[loss=2.718, ArTop10Accuracy=0.7889, over 11457.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7698, over 11377.43 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 11:19:34,512 INFO [trainer.py:765] (0/8) Epoch 11, batch 700, train_loss[loss=2.702, ArTop10Accuracy=0.7909, over 10167.00 frames. ], tot_loss[loss=2.81, ArTop10Accuracy=0.7684, over 11520.97 frames. ], batch size: 12, lr: 1.08e-02 +2024-08-06 11:20:55,482 INFO [trainer.py:765] (0/8) Epoch 11, batch 800, train_loss[loss=2.676, ArTop10Accuracy=0.795, over 10086.00 frames. ], tot_loss[loss=2.813, ArTop10Accuracy=0.7681, over 11629.35 frames. ], batch size: 12, lr: 1.07e-02 +2024-08-06 11:22:13,704 INFO [trainer.py:765] (0/8) Epoch 11, batch 900, train_loss[loss=2.802, ArTop10Accuracy=0.771, over 12939.00 frames. ], tot_loss[loss=2.81, ArTop10Accuracy=0.7685, over 11682.50 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 11:23:31,797 INFO [trainer.py:765] (0/8) Epoch 11, batch 1000, train_loss[loss=2.76, ArTop10Accuracy=0.7754, over 12987.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.7675, over 11877.90 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 11:24:46,901 INFO [trainer.py:765] (0/8) Epoch 11, batch 1100, train_loss[loss=2.779, ArTop10Accuracy=0.7775, over 13578.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7666, over 11962.65 frames. ], batch size: 34, lr: 1.06e-02 +2024-08-06 11:26:00,733 INFO [trainer.py:765] (0/8) Epoch 11, batch 1200, train_loss[loss=2.934, ArTop10Accuracy=0.7458, over 12288.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7664, over 11874.75 frames. ], batch size: 103, lr: 1.06e-02 +2024-08-06 11:26:15,845 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 11:26:25,556 INFO [trainer.py:811] (0/8) Epoch 11, validation: loss=2.831, ArTop10Accuracy=0.7643, over 1827537.00 frames. +2024-08-06 11:26:25,557 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29519MB +2024-08-06 11:26:26,185 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.251e+02 1.335e+02 1.441e+02 2.942e+02, threshold=2.669e+02, percent-clipped=0.1 +2024-08-06 11:27:09,747 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 11:27:09,754 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-11.pt +2024-08-06 11:29:03,450 INFO [trainer.py:765] (0/8) Epoch 12, batch 100, train_loss[loss=2.881, ArTop10Accuracy=0.7544, over 14667.00 frames. ], tot_loss[loss=2.797, ArTop10Accuracy=0.7704, over 4747.48 frames. ], batch size: 62, lr: 1.01e-02 +2024-08-06 11:30:30,673 INFO [trainer.py:765] (0/8) Epoch 12, batch 200, train_loss[loss=2.785, ArTop10Accuracy=0.7731, over 13518.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7705, over 7756.10 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 11:31:57,654 INFO [trainer.py:765] (0/8) Epoch 12, batch 300, train_loss[loss=2.832, ArTop10Accuracy=0.7641, over 14247.00 frames. ], tot_loss[loss=2.792, ArTop10Accuracy=0.7719, over 9392.76 frames. ], batch size: 44, lr: 1.01e-02 +2024-08-06 11:33:30,737 INFO [trainer.py:765] (0/8) Epoch 12, batch 400, train_loss[loss=2.739, ArTop10Accuracy=0.7777, over 10179.00 frames. ], tot_loss[loss=2.794, ArTop10Accuracy=0.7714, over 10292.52 frames. ], batch size: 14, lr: 1.00e-02 +2024-08-06 11:34:55,734 INFO [trainer.py:765] (0/8) Epoch 12, batch 500, train_loss[loss=2.801, ArTop10Accuracy=0.7709, over 12150.00 frames. ], tot_loss[loss=2.788, ArTop10Accuracy=0.7728, over 10850.93 frames. ], batch size: 22, lr: 1.00e-02 +2024-08-06 11:36:29,361 INFO [trainer.py:765] (0/8) Epoch 12, batch 600, train_loss[loss=2.741, ArTop10Accuracy=0.7815, over 11487.00 frames. ], tot_loss[loss=2.79, ArTop10Accuracy=0.7724, over 11362.82 frames. ], batch size: 18, lr: 9.97e-03 +2024-08-06 11:38:00,343 INFO [trainer.py:765] (0/8) Epoch 12, batch 700, train_loss[loss=2.741, ArTop10Accuracy=0.7811, over 10062.00 frames. ], tot_loss[loss=2.796, ArTop10Accuracy=0.7713, over 11517.08 frames. ], batch size: 12, lr: 9.93e-03 +2024-08-06 11:39:23,610 INFO [trainer.py:765] (0/8) Epoch 12, batch 800, train_loss[loss=2.759, ArTop10Accuracy=0.7732, over 10080.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7705, over 11636.73 frames. ], batch size: 12, lr: 9.90e-03 +2024-08-06 11:40:39,888 INFO [trainer.py:765] (0/8) Epoch 12, batch 900, train_loss[loss=2.823, ArTop10Accuracy=0.7693, over 12876.00 frames. ], tot_loss[loss=2.79, ArTop10Accuracy=0.7724, over 11678.76 frames. ], batch size: 27, lr: 9.87e-03 +2024-08-06 11:41:13,995 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.041e+02 1.248e+02 1.348e+02 1.459e+02 5.540e+02, threshold=2.695e+02, percent-clipped=0.3 +2024-08-06 11:41:56,188 INFO [trainer.py:765] (0/8) Epoch 12, batch 1000, train_loss[loss=2.811, ArTop10Accuracy=0.7647, over 12993.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7713, over 11886.62 frames. ], batch size: 27, lr: 9.85e-03 +2024-08-06 11:43:14,319 INFO [trainer.py:765] (0/8) Epoch 12, batch 1100, train_loss[loss=2.781, ArTop10Accuracy=0.7739, over 13596.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7707, over 11956.72 frames. ], batch size: 34, lr: 9.82e-03 +2024-08-06 11:44:26,155 INFO [trainer.py:765] (0/8) Epoch 12, batch 1200, train_loss[loss=2.9, ArTop10Accuracy=0.7534, over 12429.00 frames. ], tot_loss[loss=2.803, ArTop10Accuracy=0.7699, over 11861.85 frames. ], batch size: 103, lr: 9.79e-03 +2024-08-06 11:45:26,924 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 11:45:26,927 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-12.pt +2024-08-06 11:47:26,603 INFO [trainer.py:765] (0/8) Epoch 13, batch 100, train_loss[loss=2.873, ArTop10Accuracy=0.7586, over 14676.00 frames. ], tot_loss[loss=2.786, ArTop10Accuracy=0.7726, over 4752.95 frames. ], batch size: 63, lr: 9.37e-03 +2024-08-06 11:48:54,779 INFO [trainer.py:765] (0/8) Epoch 13, batch 200, train_loss[loss=2.687, ArTop10Accuracy=0.7937, over 13491.00 frames. ], tot_loss[loss=2.78, ArTop10Accuracy=0.7742, over 7726.95 frames. ], batch size: 34, lr: 9.34e-03 +2024-08-06 11:50:20,515 INFO [trainer.py:765] (0/8) Epoch 13, batch 300, train_loss[loss=2.866, ArTop10Accuracy=0.7566, over 14184.00 frames. ], tot_loss[loss=2.777, ArTop10Accuracy=0.7748, over 9347.37 frames. ], batch size: 44, lr: 9.31e-03 +2024-08-06 11:51:48,764 INFO [trainer.py:765] (0/8) Epoch 13, batch 400, train_loss[loss=2.725, ArTop10Accuracy=0.7881, over 10398.00 frames. ], tot_loss[loss=2.771, ArTop10Accuracy=0.7757, over 10274.62 frames. ], batch size: 14, lr: 9.28e-03 +2024-08-06 11:53:13,408 INFO [trainer.py:765] (0/8) Epoch 13, batch 500, train_loss[loss=2.662, ArTop10Accuracy=0.7992, over 12588.00 frames. ], tot_loss[loss=2.77, ArTop10Accuracy=0.7759, over 10848.23 frames. ], batch size: 23, lr: 9.26e-03 +2024-08-06 11:54:52,223 INFO [trainer.py:765] (0/8) Epoch 13, batch 600, train_loss[loss=2.762, ArTop10Accuracy=0.7758, over 11412.00 frames. ], tot_loss[loss=2.773, ArTop10Accuracy=0.7756, over 11379.70 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 11:55:47,082 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 11:55:56,835 INFO [trainer.py:811] (0/8) Epoch 13, validation: loss=2.824, ArTop10Accuracy=0.7662, over 1827537.00 frames. +2024-08-06 11:55:56,835 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29519MB +2024-08-06 11:55:57,712 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.064e+02 1.255e+02 1.343e+02 1.452e+02 4.888e+02, threshold=2.687e+02, percent-clipped=0.1 +2024-08-06 11:56:28,465 INFO [trainer.py:765] (0/8) Epoch 13, batch 700, train_loss[loss=2.707, ArTop10Accuracy=0.7891, over 10317.00 frames. ], tot_loss[loss=2.776, ArTop10Accuracy=0.7749, over 11514.39 frames. ], batch size: 12, lr: 9.20e-03 +2024-08-06 11:57:46,684 INFO [trainer.py:765] (0/8) Epoch 13, batch 800, train_loss[loss=2.67, ArTop10Accuracy=0.7936, over 9534.00 frames. ], tot_loss[loss=2.78, ArTop10Accuracy=0.7743, over 11614.14 frames. ], batch size: 11, lr: 9.18e-03 +2024-08-06 11:59:03,289 INFO [trainer.py:765] (0/8) Epoch 13, batch 900, train_loss[loss=2.81, ArTop10Accuracy=0.7719, over 13218.00 frames. ], tot_loss[loss=2.777, ArTop10Accuracy=0.775, over 11658.17 frames. ], batch size: 28, lr: 9.15e-03 +2024-08-06 12:00:19,174 INFO [trainer.py:765] (0/8) Epoch 13, batch 1000, train_loss[loss=2.737, ArTop10Accuracy=0.7798, over 13002.00 frames. ], tot_loss[loss=2.781, ArTop10Accuracy=0.7743, over 11880.17 frames. ], batch size: 27, lr: 9.13e-03 +2024-08-06 12:01:34,883 INFO [trainer.py:765] (0/8) Epoch 13, batch 1100, train_loss[loss=2.79, ArTop10Accuracy=0.7723, over 13695.00 frames. ], tot_loss[loss=2.789, ArTop10Accuracy=0.7728, over 11953.89 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 12:02:48,662 INFO [trainer.py:765] (0/8) Epoch 13, batch 1200, train_loss[loss=2.97, ArTop10Accuracy=0.7374, over 12612.00 frames. ], tot_loss[loss=2.79, ArTop10Accuracy=0.7723, over 11864.76 frames. ], batch size: 101, lr: 9.08e-03 +2024-08-06 12:03:47,909 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 12:03:47,912 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-13.pt +2024-08-06 12:05:45,336 INFO [trainer.py:765] (0/8) Epoch 14, batch 100, train_loss[loss=2.841, ArTop10Accuracy=0.7652, over 14853.00 frames. ], tot_loss[loss=2.769, ArTop10Accuracy=0.7759, over 4768.63 frames. ], batch size: 64, lr: 8.71e-03 +2024-08-06 12:07:16,605 INFO [trainer.py:765] (0/8) Epoch 14, batch 200, train_loss[loss=2.748, ArTop10Accuracy=0.7779, over 13905.00 frames. ], tot_loss[loss=2.765, ArTop10Accuracy=0.7762, over 7750.37 frames. ], batch size: 35, lr: 8.69e-03 +2024-08-06 12:08:44,312 INFO [trainer.py:765] (0/8) Epoch 14, batch 300, train_loss[loss=2.778, ArTop10Accuracy=0.772, over 14355.00 frames. ], tot_loss[loss=2.764, ArTop10Accuracy=0.7769, over 9379.42 frames. ], batch size: 44, lr: 8.66e-03 +2024-08-06 12:10:01,132 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.266e+02 1.374e+02 1.483e+02 6.480e+02, threshold=2.748e+02, percent-clipped=0.2 +2024-08-06 12:10:10,226 INFO [trainer.py:765] (0/8) Epoch 14, batch 400, train_loss[loss=2.741, ArTop10Accuracy=0.7836, over 10218.00 frames. ], tot_loss[loss=2.766, ArTop10Accuracy=0.7765, over 10280.49 frames. ], batch size: 14, lr: 8.64e-03 +2024-08-06 12:11:36,150 INFO [trainer.py:765] (0/8) Epoch 14, batch 500, train_loss[loss=2.676, ArTop10Accuracy=0.7933, over 12192.00 frames. ], tot_loss[loss=2.759, ArTop10Accuracy=0.7781, over 10852.25 frames. ], batch size: 22, lr: 8.62e-03 +2024-08-06 12:13:05,993 INFO [trainer.py:765] (0/8) Epoch 14, batch 600, train_loss[loss=2.752, ArTop10Accuracy=0.7771, over 11457.00 frames. ], tot_loss[loss=2.763, ArTop10Accuracy=0.7771, over 11369.19 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 12:14:38,553 INFO [trainer.py:765] (0/8) Epoch 14, batch 700, train_loss[loss=2.705, ArTop10Accuracy=0.7961, over 10176.00 frames. ], tot_loss[loss=2.769, ArTop10Accuracy=0.7761, over 11536.24 frames. ], batch size: 12, lr: 8.57e-03 +2024-08-06 12:15:58,070 INFO [trainer.py:765] (0/8) Epoch 14, batch 800, train_loss[loss=2.689, ArTop10Accuracy=0.7909, over 10209.00 frames. ], tot_loss[loss=2.772, ArTop10Accuracy=0.7753, over 11651.71 frames. ], batch size: 12, lr: 8.55e-03 +2024-08-06 12:17:12,865 INFO [trainer.py:765] (0/8) Epoch 14, batch 900, train_loss[loss=2.732, ArTop10Accuracy=0.7826, over 12996.00 frames. ], tot_loss[loss=2.768, ArTop10Accuracy=0.7763, over 11690.20 frames. ], batch size: 27, lr: 8.52e-03 +2024-08-06 12:18:29,613 INFO [trainer.py:765] (0/8) Epoch 14, batch 1000, train_loss[loss=2.803, ArTop10Accuracy=0.7661, over 12777.00 frames. ], tot_loss[loss=2.775, ArTop10Accuracy=0.7751, over 11882.08 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 12:19:45,376 INFO [trainer.py:765] (0/8) Epoch 14, batch 1100, train_loss[loss=2.777, ArTop10Accuracy=0.7773, over 13431.00 frames. ], tot_loss[loss=2.781, ArTop10Accuracy=0.7741, over 11942.94 frames. ], batch size: 34, lr: 8.48e-03 +2024-08-06 12:20:59,278 INFO [trainer.py:765] (0/8) Epoch 14, batch 1200, train_loss[loss=2.906, ArTop10Accuracy=0.75, over 12942.00 frames. ], tot_loss[loss=2.779, ArTop10Accuracy=0.7745, over 11861.52 frames. ], batch size: 102, lr: 8.46e-03 +2024-08-06 12:21:58,346 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 12:21:58,348 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-14.pt +2024-08-06 12:23:51,960 INFO [trainer.py:765] (0/8) Epoch 15, batch 100, train_loss[loss=2.836, ArTop10Accuracy=0.7586, over 14583.00 frames. ], tot_loss[loss=2.768, ArTop10Accuracy=0.7759, over 4768.34 frames. ], batch size: 62, lr: 8.14e-03 +2024-08-06 12:24:00,597 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 12:24:10,290 INFO [trainer.py:811] (0/8) Epoch 15, validation: loss=2.819, ArTop10Accuracy=0.7675, over 1827537.00 frames. +2024-08-06 12:24:10,291 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29519MB +2024-08-06 12:24:11,094 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.284e+02 1.371e+02 1.488e+02 4.667e+02, threshold=2.743e+02, percent-clipped=0.2 +2024-08-06 12:25:29,990 INFO [trainer.py:765] (0/8) Epoch 15, batch 200, train_loss[loss=2.826, ArTop10Accuracy=0.7627, over 13767.00 frames. ], tot_loss[loss=2.757, ArTop10Accuracy=0.7783, over 7773.81 frames. ], batch size: 34, lr: 8.12e-03 +2024-08-06 12:26:58,695 INFO [trainer.py:765] (0/8) Epoch 15, batch 300, train_loss[loss=2.819, ArTop10Accuracy=0.7639, over 13935.00 frames. ], tot_loss[loss=2.753, ArTop10Accuracy=0.7791, over 9378.32 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 12:28:28,536 INFO [trainer.py:765] (0/8) Epoch 15, batch 400, train_loss[loss=2.684, ArTop10Accuracy=0.7938, over 10281.00 frames. ], tot_loss[loss=2.751, ArTop10Accuracy=0.7796, over 10275.42 frames. ], batch size: 14, lr: 8.07e-03 +2024-08-06 12:29:54,033 INFO [trainer.py:765] (0/8) Epoch 15, batch 500, train_loss[loss=2.691, ArTop10Accuracy=0.7942, over 12264.00 frames. ], tot_loss[loss=2.746, ArTop10Accuracy=0.7804, over 10831.07 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 12:31:23,293 INFO [trainer.py:765] (0/8) Epoch 15, batch 600, train_loss[loss=2.735, ArTop10Accuracy=0.7826, over 11409.00 frames. ], tot_loss[loss=2.753, ArTop10Accuracy=0.7791, over 11367.63 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 12:32:53,176 INFO [trainer.py:765] (0/8) Epoch 15, batch 700, train_loss[loss=2.686, ArTop10Accuracy=0.7965, over 9513.00 frames. ], tot_loss[loss=2.757, ArTop10Accuracy=0.7784, over 11512.36 frames. ], batch size: 11, lr: 8.01e-03 +2024-08-06 12:34:18,254 INFO [trainer.py:765] (0/8) Epoch 15, batch 800, train_loss[loss=2.696, ArTop10Accuracy=0.7923, over 9231.00 frames. ], tot_loss[loss=2.759, ArTop10Accuracy=0.7779, over 11623.54 frames. ], batch size: 11, lr: 7.99e-03 +2024-08-06 12:35:34,727 INFO [trainer.py:765] (0/8) Epoch 15, batch 900, train_loss[loss=2.797, ArTop10Accuracy=0.7713, over 12987.00 frames. ], tot_loss[loss=2.756, ArTop10Accuracy=0.7785, over 11662.82 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 12:36:50,540 INFO [trainer.py:765] (0/8) Epoch 15, batch 1000, train_loss[loss=2.785, ArTop10Accuracy=0.7808, over 12819.00 frames. ], tot_loss[loss=2.76, ArTop10Accuracy=0.7779, over 11884.84 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 12:38:05,181 INFO [trainer.py:765] (0/8) Epoch 15, batch 1100, train_loss[loss=2.821, ArTop10Accuracy=0.7663, over 13737.00 frames. ], tot_loss[loss=2.766, ArTop10Accuracy=0.7766, over 11951.41 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 12:38:12,841 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.293e+02 1.379e+02 1.467e+02 2.824e+02, threshold=2.759e+02, percent-clipped=0.1 +2024-08-06 12:39:18,789 INFO [trainer.py:765] (0/8) Epoch 15, batch 1200, train_loss[loss=2.888, ArTop10Accuracy=0.7535, over 12417.00 frames. ], tot_loss[loss=2.769, ArTop10Accuracy=0.776, over 11865.99 frames. ], batch size: 101, lr: 7.91e-03 +2024-08-06 12:40:18,830 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 12:40:18,833 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-15.pt +2024-08-06 12:42:17,620 INFO [trainer.py:765] (0/8) Epoch 16, batch 100, train_loss[loss=2.796, ArTop10Accuracy=0.7693, over 14427.00 frames. ], tot_loss[loss=2.744, ArTop10Accuracy=0.7806, over 4772.64 frames. ], batch size: 62, lr: 7.63e-03 +2024-08-06 12:43:49,565 INFO [trainer.py:765] (0/8) Epoch 16, batch 200, train_loss[loss=2.744, ArTop10Accuracy=0.7763, over 13896.00 frames. ], tot_loss[loss=2.744, ArTop10Accuracy=0.7806, over 7760.92 frames. ], batch size: 35, lr: 7.61e-03 +2024-08-06 12:45:18,501 INFO [trainer.py:765] (0/8) Epoch 16, batch 300, train_loss[loss=2.8, ArTop10Accuracy=0.7674, over 14070.00 frames. ], tot_loss[loss=2.742, ArTop10Accuracy=0.7808, over 9372.95 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 12:46:45,208 INFO [trainer.py:765] (0/8) Epoch 16, batch 400, train_loss[loss=2.716, ArTop10Accuracy=0.7876, over 10245.00 frames. ], tot_loss[loss=2.743, ArTop10Accuracy=0.7806, over 10296.61 frames. ], batch size: 14, lr: 7.58e-03 +2024-08-06 12:48:16,312 INFO [trainer.py:765] (0/8) Epoch 16, batch 500, train_loss[loss=2.686, ArTop10Accuracy=0.7917, over 12087.00 frames. ], tot_loss[loss=2.738, ArTop10Accuracy=0.7818, over 10853.59 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 12:49:46,641 INFO [trainer.py:765] (0/8) Epoch 16, batch 600, train_loss[loss=2.69, ArTop10Accuracy=0.792, over 11271.00 frames. ], tot_loss[loss=2.741, ArTop10Accuracy=0.7811, over 11366.38 frames. ], batch size: 18, lr: 7.54e-03 +2024-08-06 12:51:23,680 INFO [trainer.py:765] (0/8) Epoch 16, batch 700, train_loss[loss=2.612, ArTop10Accuracy=0.8047, over 9414.00 frames. ], tot_loss[loss=2.743, ArTop10Accuracy=0.7808, over 11501.99 frames. ], batch size: 11, lr: 7.52e-03 +2024-08-06 12:52:43,500 INFO [trainer.py:765] (0/8) Epoch 16, batch 800, train_loss[loss=2.757, ArTop10Accuracy=0.7792, over 9558.00 frames. ], tot_loss[loss=2.748, ArTop10Accuracy=0.7797, over 11628.70 frames. ], batch size: 11, lr: 7.51e-03 +2024-08-06 12:53:06,014 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-20000.pt +2024-08-06 12:53:08,969 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 12:53:15,494 INFO [trainer.py:811] (0/8) Epoch 16, validation: loss=2.816, ArTop10Accuracy=0.7678, over 1827537.00 frames. +2024-08-06 12:53:15,495 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29524MB +2024-08-06 12:53:16,187 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.112e+02 1.291e+02 1.391e+02 1.487e+02 3.459e+02, threshold=2.783e+02, percent-clipped=0.1 +2024-08-06 12:54:06,480 INFO [trainer.py:765] (0/8) Epoch 16, batch 900, train_loss[loss=2.719, ArTop10Accuracy=0.7879, over 12855.00 frames. ], tot_loss[loss=2.748, ArTop10Accuracy=0.7799, over 11666.65 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 12:55:19,791 INFO [trainer.py:765] (0/8) Epoch 16, batch 1000, train_loss[loss=2.721, ArTop10Accuracy=0.7857, over 12909.00 frames. ], tot_loss[loss=2.75, ArTop10Accuracy=0.78, over 11872.94 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 12:56:33,165 INFO [trainer.py:765] (0/8) Epoch 16, batch 1100, train_loss[loss=2.78, ArTop10Accuracy=0.7767, over 13479.00 frames. ], tot_loss[loss=2.758, ArTop10Accuracy=0.7784, over 11947.93 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 12:57:48,485 INFO [trainer.py:765] (0/8) Epoch 16, batch 1200, train_loss[loss=2.835, ArTop10Accuracy=0.7596, over 12075.00 frames. ], tot_loss[loss=2.756, ArTop10Accuracy=0.7786, over 11864.13 frames. ], batch size: 103, lr: 7.44e-03 +2024-08-06 12:58:48,289 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 12:58:48,292 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-16.pt +2024-08-06 13:00:47,900 INFO [trainer.py:765] (0/8) Epoch 17, batch 100, train_loss[loss=2.789, ArTop10Accuracy=0.7719, over 14058.00 frames. ], tot_loss[loss=2.741, ArTop10Accuracy=0.7811, over 4756.29 frames. ], batch size: 62, lr: 7.18e-03 +2024-08-06 13:02:19,302 INFO [trainer.py:765] (0/8) Epoch 17, batch 200, train_loss[loss=2.781, ArTop10Accuracy=0.7758, over 14010.00 frames. ], tot_loss[loss=2.737, ArTop10Accuracy=0.7818, over 7771.17 frames. ], batch size: 35, lr: 7.17e-03 +2024-08-06 13:03:45,518 INFO [trainer.py:765] (0/8) Epoch 17, batch 300, train_loss[loss=2.776, ArTop10Accuracy=0.7769, over 14058.00 frames. ], tot_loss[loss=2.73, ArTop10Accuracy=0.7832, over 9383.89 frames. ], batch size: 44, lr: 7.15e-03 +2024-08-06 13:05:21,760 INFO [trainer.py:765] (0/8) Epoch 17, batch 400, train_loss[loss=2.661, ArTop10Accuracy=0.8014, over 10203.00 frames. ], tot_loss[loss=2.725, ArTop10Accuracy=0.7843, over 10293.21 frames. ], batch size: 14, lr: 7.14e-03 +2024-08-06 13:06:47,021 INFO [trainer.py:765] (0/8) Epoch 17, batch 500, train_loss[loss=2.668, ArTop10Accuracy=0.7993, over 11934.00 frames. ], tot_loss[loss=2.723, ArTop10Accuracy=0.7846, over 10850.62 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 13:07:39,880 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.140e+02 1.293e+02 1.386e+02 1.488e+02 3.253e+02, threshold=2.772e+02, percent-clipped=0.1 +2024-08-06 13:08:22,689 INFO [trainer.py:765] (0/8) Epoch 17, batch 600, train_loss[loss=2.689, ArTop10Accuracy=0.7944, over 11556.00 frames. ], tot_loss[loss=2.725, ArTop10Accuracy=0.7845, over 11380.73 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 13:09:54,835 INFO [trainer.py:765] (0/8) Epoch 17, batch 700, train_loss[loss=2.51, ArTop10Accuracy=0.8276, over 10056.00 frames. ], tot_loss[loss=2.732, ArTop10Accuracy=0.7833, over 11512.80 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 13:11:19,481 INFO [trainer.py:765] (0/8) Epoch 17, batch 800, train_loss[loss=2.601, ArTop10Accuracy=0.8115, over 10008.00 frames. ], tot_loss[loss=2.733, ArTop10Accuracy=0.7831, over 11634.34 frames. ], batch size: 12, lr: 7.07e-03 +2024-08-06 13:12:35,670 INFO [trainer.py:765] (0/8) Epoch 17, batch 900, train_loss[loss=2.754, ArTop10Accuracy=0.7803, over 13182.00 frames. ], tot_loss[loss=2.731, ArTop10Accuracy=0.7836, over 11691.70 frames. ], batch size: 28, lr: 7.06e-03 +2024-08-06 13:13:53,063 INFO [trainer.py:765] (0/8) Epoch 17, batch 1000, train_loss[loss=2.715, ArTop10Accuracy=0.79, over 13344.00 frames. ], tot_loss[loss=2.737, ArTop10Accuracy=0.7822, over 11909.14 frames. ], batch size: 28, lr: 7.04e-03 +2024-08-06 13:15:08,483 INFO [trainer.py:765] (0/8) Epoch 17, batch 1100, train_loss[loss=2.693, ArTop10Accuracy=0.7931, over 13545.00 frames. ], tot_loss[loss=2.744, ArTop10Accuracy=0.7808, over 11965.37 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 13:16:22,389 INFO [trainer.py:765] (0/8) Epoch 17, batch 1200, train_loss[loss=2.875, ArTop10Accuracy=0.7577, over 12714.00 frames. ], tot_loss[loss=2.744, ArTop10Accuracy=0.7807, over 11871.85 frames. ], batch size: 101, lr: 7.01e-03 +2024-08-06 13:17:21,130 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 13:17:21,134 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-17.pt +2024-08-06 13:19:15,995 INFO [trainer.py:765] (0/8) Epoch 18, batch 100, train_loss[loss=2.79, ArTop10Accuracy=0.7737, over 14730.00 frames. ], tot_loss[loss=2.738, ArTop10Accuracy=0.7815, over 4768.08 frames. ], batch size: 62, lr: 6.78e-03 +2024-08-06 13:20:46,597 INFO [trainer.py:765] (0/8) Epoch 18, batch 200, train_loss[loss=2.71, ArTop10Accuracy=0.7899, over 13821.00 frames. ], tot_loss[loss=2.73, ArTop10Accuracy=0.7832, over 7767.37 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 13:21:55,105 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 13:22:04,751 INFO [trainer.py:811] (0/8) Epoch 18, validation: loss=2.817, ArTop10Accuracy=0.768, over 1827537.00 frames. +2024-08-06 13:22:04,752 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29524MB +2024-08-06 13:22:05,474 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.323e+02 1.409e+02 1.514e+02 3.209e+02, threshold=2.818e+02, percent-clipped=0.1 +2024-08-06 13:22:26,581 INFO [trainer.py:765] (0/8) Epoch 18, batch 300, train_loss[loss=2.789, ArTop10Accuracy=0.7743, over 14439.00 frames. ], tot_loss[loss=2.721, ArTop10Accuracy=0.7848, over 9396.72 frames. ], batch size: 45, lr: 6.76e-03 +2024-08-06 13:23:57,928 INFO [trainer.py:765] (0/8) Epoch 18, batch 400, train_loss[loss=2.672, ArTop10Accuracy=0.7928, over 10332.00 frames. ], tot_loss[loss=2.718, ArTop10Accuracy=0.7856, over 10300.70 frames. ], batch size: 14, lr: 6.74e-03 +2024-08-06 13:25:34,012 INFO [trainer.py:765] (0/8) Epoch 18, batch 500, train_loss[loss=2.645, ArTop10Accuracy=0.8047, over 12105.00 frames. ], tot_loss[loss=2.714, ArTop10Accuracy=0.7864, over 10847.39 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 13:27:00,632 INFO [trainer.py:765] (0/8) Epoch 18, batch 600, train_loss[loss=2.638, ArTop10Accuracy=0.7969, over 12132.00 frames. ], tot_loss[loss=2.716, ArTop10Accuracy=0.7859, over 11382.46 frames. ], batch size: 19, lr: 6.71e-03 +2024-08-06 13:28:33,583 INFO [trainer.py:765] (0/8) Epoch 18, batch 700, train_loss[loss=2.602, ArTop10Accuracy=0.8091, over 10143.00 frames. ], tot_loss[loss=2.724, ArTop10Accuracy=0.7847, over 11529.41 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 13:29:54,986 INFO [trainer.py:765] (0/8) Epoch 18, batch 800, train_loss[loss=2.684, ArTop10Accuracy=0.7945, over 10206.00 frames. ], tot_loss[loss=2.727, ArTop10Accuracy=0.7842, over 11648.40 frames. ], batch size: 12, lr: 6.68e-03 +2024-08-06 13:31:12,518 INFO [trainer.py:765] (0/8) Epoch 18, batch 900, train_loss[loss=2.679, ArTop10Accuracy=0.7971, over 12792.00 frames. ], tot_loss[loss=2.721, ArTop10Accuracy=0.7852, over 11695.39 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 13:32:26,550 INFO [trainer.py:765] (0/8) Epoch 18, batch 1000, train_loss[loss=2.67, ArTop10Accuracy=0.7925, over 12756.00 frames. ], tot_loss[loss=2.727, ArTop10Accuracy=0.7841, over 11888.08 frames. ], batch size: 27, lr: 6.66e-03 +2024-08-06 13:33:41,496 INFO [trainer.py:765] (0/8) Epoch 18, batch 1100, train_loss[loss=2.722, ArTop10Accuracy=0.7837, over 13665.00 frames. ], tot_loss[loss=2.732, ArTop10Accuracy=0.7832, over 11958.37 frames. ], batch size: 34, lr: 6.64e-03 +2024-08-06 13:34:54,675 INFO [trainer.py:765] (0/8) Epoch 18, batch 1200, train_loss[loss=2.85, ArTop10Accuracy=0.7681, over 12753.00 frames. ], tot_loss[loss=2.731, ArTop10Accuracy=0.7834, over 11867.53 frames. ], batch size: 101, lr: 6.63e-03 +2024-08-06 13:35:51,064 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.124e+02 1.340e+02 1.433e+02 1.533e+02 2.444e+02, threshold=2.867e+02, percent-clipped=0.0 +2024-08-06 13:35:54,972 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 13:35:54,974 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-18.pt +2024-08-06 13:37:48,624 INFO [trainer.py:765] (0/8) Epoch 19, batch 100, train_loss[loss=2.798, ArTop10Accuracy=0.7718, over 14607.00 frames. ], tot_loss[loss=2.719, ArTop10Accuracy=0.7848, over 4757.47 frames. ], batch size: 62, lr: 6.43e-03 +2024-08-06 13:39:23,256 INFO [trainer.py:765] (0/8) Epoch 19, batch 200, train_loss[loss=2.751, ArTop10Accuracy=0.7775, over 13698.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.7858, over 7745.97 frames. ], batch size: 34, lr: 6.41e-03 +2024-08-06 13:40:48,358 INFO [trainer.py:765] (0/8) Epoch 19, batch 300, train_loss[loss=2.768, ArTop10Accuracy=0.7791, over 14661.00 frames. ], tot_loss[loss=2.708, ArTop10Accuracy=0.7874, over 9366.48 frames. ], batch size: 45, lr: 6.40e-03 +2024-08-06 13:42:21,068 INFO [trainer.py:765] (0/8) Epoch 19, batch 400, train_loss[loss=2.615, ArTop10Accuracy=0.8009, over 10503.00 frames. ], tot_loss[loss=2.706, ArTop10Accuracy=0.7879, over 10283.24 frames. ], batch size: 14, lr: 6.39e-03 +2024-08-06 13:43:44,955 INFO [trainer.py:765] (0/8) Epoch 19, batch 500, train_loss[loss=2.719, ArTop10Accuracy=0.7864, over 12033.00 frames. ], tot_loss[loss=2.704, ArTop10Accuracy=0.7883, over 10843.55 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 13:45:16,681 INFO [trainer.py:765] (0/8) Epoch 19, batch 600, train_loss[loss=2.701, ArTop10Accuracy=0.7897, over 11457.00 frames. ], tot_loss[loss=2.706, ArTop10Accuracy=0.788, over 11364.68 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 13:46:48,323 INFO [trainer.py:765] (0/8) Epoch 19, batch 700, train_loss[loss=2.563, ArTop10Accuracy=0.8169, over 10290.00 frames. ], tot_loss[loss=2.709, ArTop10Accuracy=0.7874, over 11523.58 frames. ], batch size: 12, lr: 6.35e-03 +2024-08-06 13:48:11,883 INFO [trainer.py:765] (0/8) Epoch 19, batch 800, train_loss[loss=2.58, ArTop10Accuracy=0.8112, over 10350.00 frames. ], tot_loss[loss=2.713, ArTop10Accuracy=0.7865, over 11661.89 frames. ], batch size: 12, lr: 6.34e-03 +2024-08-06 13:49:27,256 INFO [trainer.py:765] (0/8) Epoch 19, batch 900, train_loss[loss=2.645, ArTop10Accuracy=0.8003, over 13215.00 frames. ], tot_loss[loss=2.708, ArTop10Accuracy=0.7877, over 11704.49 frames. ], batch size: 27, lr: 6.32e-03 +2024-08-06 13:50:40,654 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 13:50:50,535 INFO [trainer.py:811] (0/8) Epoch 19, validation: loss=2.818, ArTop10Accuracy=0.7679, over 1827537.00 frames. +2024-08-06 13:50:50,535 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29524MB +2024-08-06 13:50:51,491 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.371e+02 1.455e+02 1.550e+02 3.697e+02, threshold=2.909e+02, percent-clipped=0.2 +2024-08-06 13:50:52,917 INFO [trainer.py:765] (0/8) Epoch 19, batch 1000, train_loss[loss=2.755, ArTop10Accuracy=0.7868, over 13050.00 frames. ], tot_loss[loss=2.713, ArTop10Accuracy=0.7866, over 11906.01 frames. ], batch size: 27, lr: 6.31e-03 +2024-08-06 13:52:08,264 INFO [trainer.py:765] (0/8) Epoch 19, batch 1100, train_loss[loss=2.738, ArTop10Accuracy=0.7811, over 13623.00 frames. ], tot_loss[loss=2.72, ArTop10Accuracy=0.7853, over 11973.95 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 13:53:22,314 INFO [trainer.py:765] (0/8) Epoch 19, batch 1200, train_loss[loss=2.835, ArTop10Accuracy=0.7578, over 12549.00 frames. ], tot_loss[loss=2.723, ArTop10Accuracy=0.7847, over 11876.88 frames. ], batch size: 103, lr: 6.28e-03 +2024-08-06 13:54:21,954 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 13:54:21,958 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-19.pt +2024-08-06 13:56:12,902 INFO [trainer.py:765] (0/8) Epoch 20, batch 100, train_loss[loss=2.747, ArTop10Accuracy=0.7778, over 14466.00 frames. ], tot_loss[loss=2.71, ArTop10Accuracy=0.7868, over 4752.63 frames. ], batch size: 63, lr: 6.10e-03 +2024-08-06 13:57:42,495 INFO [trainer.py:765] (0/8) Epoch 20, batch 200, train_loss[loss=2.695, ArTop10Accuracy=0.7915, over 13728.00 frames. ], tot_loss[loss=2.708, ArTop10Accuracy=0.7873, over 7731.34 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 13:59:15,430 INFO [trainer.py:765] (0/8) Epoch 20, batch 300, train_loss[loss=2.759, ArTop10Accuracy=0.775, over 14556.00 frames. ], tot_loss[loss=2.706, ArTop10Accuracy=0.7876, over 9388.49 frames. ], batch size: 45, lr: 6.08e-03 +2024-08-06 14:00:44,358 INFO [trainer.py:765] (0/8) Epoch 20, batch 400, train_loss[loss=2.515, ArTop10Accuracy=0.8197, over 10503.00 frames. ], tot_loss[loss=2.699, ArTop10Accuracy=0.7887, over 10287.72 frames. ], batch size: 14, lr: 6.07e-03 +2024-08-06 14:02:14,855 INFO [trainer.py:765] (0/8) Epoch 20, batch 500, train_loss[loss=2.665, ArTop10Accuracy=0.7981, over 12078.00 frames. ], tot_loss[loss=2.693, ArTop10Accuracy=0.7902, over 10843.66 frames. ], batch size: 22, lr: 6.06e-03 +2024-08-06 14:03:40,853 INFO [trainer.py:765] (0/8) Epoch 20, batch 600, train_loss[loss=2.729, ArTop10Accuracy=0.7816, over 11331.00 frames. ], tot_loss[loss=2.695, ArTop10Accuracy=0.7897, over 11359.53 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 14:05:13,864 INFO [trainer.py:765] (0/8) Epoch 20, batch 700, train_loss[loss=2.575, ArTop10Accuracy=0.8131, over 10071.00 frames. ], tot_loss[loss=2.7, ArTop10Accuracy=0.789, over 11524.60 frames. ], batch size: 12, lr: 6.03e-03 +2024-08-06 14:05:30,792 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.365e+02 1.456e+02 1.550e+02 3.525e+02, threshold=2.913e+02, percent-clipped=0.1 +2024-08-06 14:06:34,509 INFO [trainer.py:765] (0/8) Epoch 20, batch 800, train_loss[loss=2.737, ArTop10Accuracy=0.7809, over 10116.00 frames. ], tot_loss[loss=2.704, ArTop10Accuracy=0.7881, over 11642.44 frames. ], batch size: 12, lr: 6.02e-03 +2024-08-06 14:07:50,944 INFO [trainer.py:765] (0/8) Epoch 20, batch 900, train_loss[loss=2.648, ArTop10Accuracy=0.796, over 12945.00 frames. ], tot_loss[loss=2.701, ArTop10Accuracy=0.7887, over 11672.81 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 14:09:07,174 INFO [trainer.py:765] (0/8) Epoch 20, batch 1000, train_loss[loss=2.709, ArTop10Accuracy=0.7859, over 13047.00 frames. ], tot_loss[loss=2.708, ArTop10Accuracy=0.7876, over 11885.74 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 14:10:21,209 INFO [trainer.py:765] (0/8) Epoch 20, batch 1100, train_loss[loss=2.764, ArTop10Accuracy=0.7796, over 13680.00 frames. ], tot_loss[loss=2.716, ArTop10Accuracy=0.7862, over 11935.78 frames. ], batch size: 34, lr: 5.99e-03 +2024-08-06 14:11:37,813 INFO [trainer.py:765] (0/8) Epoch 20, batch 1200, train_loss[loss=2.803, ArTop10Accuracy=0.7718, over 11997.00 frames. ], tot_loss[loss=2.718, ArTop10Accuracy=0.7858, over 11870.84 frames. ], batch size: 101, lr: 5.98e-03 +2024-08-06 14:12:37,148 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 14:12:37,151 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-20.pt +2024-08-06 14:12:43,011 INFO [trainer.py:1069] (0/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-08-06-14-1 b/libritts-r/log/log-train-2024-08-06-08-06-14-1 new file mode 100644 index 0000000000000000000000000000000000000000..a7149f495c28335d15ca044eb7f7bfcc4f87499f --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-06-14-1 @@ -0,0 +1,336 @@ +2024-08-06 08:06:14,314 INFO [trainer.py:870] (1/8) Training started +2024-08-06 08:06:14,315 INFO [trainer.py:889] (1/8) Device: cuda:1 +2024-08-06 08:06:14,315 INFO [trainer.py:890] (1/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:06:14,315 INFO [trainer.py:892] (1/8) About to create model +2024-08-06 08:06:15,030 INFO [trainer.py:899] (1/8) Number of model parameters: 367386628 +2024-08-06 08:06:16,712 INFO [trainer.py:914] (1/8) Using DDP +2024-08-06 08:06:19,149 INFO [datamodule.py:427] (1/8) About to get train cuts +2024-08-06 08:06:19,151 INFO [datamodule.py:434] (1/8) About to get dev cuts +2024-08-06 08:06:19,152 INFO [datamodule.py:292] (1/8) Disable SpecAugment +2024-08-06 08:06:19,152 INFO [datamodule.py:294] (1/8) About to create train dataset +2024-08-06 08:06:19,153 INFO [datamodule.py:323] (1/8) Using DynamicBucketingSampler +2024-08-06 08:06:19,769 INFO [datamodule.py:344] (1/8) About to create train dataloader +2024-08-06 08:06:19,769 INFO [datamodule.py:367] (1/8) About to create dev dataset +2024-08-06 08:06:20,100 INFO [datamodule.py:388] (1/8) About to create dev dataloader +2024-08-06 08:08:02,125 INFO [trainer.py:765] (1/8) Epoch 1, batch 100, train_loss[loss=4.313, ArTop10Accuracy=0.499, over 14373.00 frames. ], tot_loss[loss=5.051, ArTop10Accuracy=0.3736, over 4747.16 frames. ], batch size: 63, lr: 2.25e-02 +2024-08-06 08:09:28,831 INFO [trainer.py:765] (1/8) Epoch 1, batch 200, train_loss[loss=4.082, ArTop10Accuracy=0.5339, over 13701.00 frames. ], tot_loss[loss=4.494, ArTop10Accuracy=0.4669, over 7740.47 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 08:10:52,432 INFO [trainer.py:765] (1/8) Epoch 1, batch 300, train_loss[loss=3.827, ArTop10Accuracy=0.5819, over 14076.00 frames. ], tot_loss[loss=4.214, ArTop10Accuracy=0.5136, over 9378.41 frames. ], batch size: 44, lr: 3.00e-02 +2024-08-06 08:12:12,703 INFO [trainer.py:765] (1/8) Epoch 1, batch 400, train_loss[loss=3.646, ArTop10Accuracy=0.6151, over 10353.00 frames. ], tot_loss[loss=4.028, ArTop10Accuracy=0.5453, over 10284.75 frames. ], batch size: 14, lr: 3.00e-02 +2024-08-06 08:13:40,054 INFO [trainer.py:765] (1/8) Epoch 1, batch 500, train_loss[loss=3.622, ArTop10Accuracy=0.6179, over 12669.00 frames. ], tot_loss[loss=3.883, ArTop10Accuracy=0.5706, over 10856.75 frames. ], batch size: 23, lr: 2.99e-02 +2024-08-06 08:15:00,247 INFO [trainer.py:765] (1/8) Epoch 1, batch 600, train_loss[loss=3.602, ArTop10Accuracy=0.6197, over 11541.00 frames. ], tot_loss[loss=3.77, ArTop10Accuracy=0.5906, over 11363.67 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 08:16:26,429 INFO [trainer.py:765] (1/8) Epoch 1, batch 700, train_loss[loss=3.496, ArTop10Accuracy=0.6398, over 10332.00 frames. ], tot_loss[loss=3.689, ArTop10Accuracy=0.6051, over 11510.86 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 08:17:43,022 INFO [trainer.py:765] (1/8) Epoch 1, batch 800, train_loss[loss=3.526, ArTop10Accuracy=0.635, over 10014.00 frames. ], tot_loss[loss=3.625, ArTop10Accuracy=0.6167, over 11655.02 frames. ], batch size: 12, lr: 2.98e-02 +2024-08-06 08:18:56,155 INFO [trainer.py:765] (1/8) Epoch 1, batch 900, train_loss[loss=3.49, ArTop10Accuracy=0.6426, over 12882.00 frames. ], tot_loss[loss=3.567, ArTop10Accuracy=0.6274, over 11695.58 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 08:20:12,867 INFO [trainer.py:765] (1/8) Epoch 1, batch 1000, train_loss[loss=3.434, ArTop10Accuracy=0.6517, over 13002.00 frames. ], tot_loss[loss=3.525, ArTop10Accuracy=0.635, over 11887.94 frames. ], batch size: 27, lr: 2.97e-02 +2024-08-06 08:20:13,547 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 9.300e+01 1.871e+02 2.675e+02 4.030e+02 9.119e+03, threshold=5.351e+02, percent-clipped=0.0 +2024-08-06 08:21:29,161 INFO [trainer.py:765] (1/8) Epoch 1, batch 1100, train_loss[loss=3.453, ArTop10Accuracy=0.6489, over 14007.00 frames. ], tot_loss[loss=3.488, ArTop10Accuracy=0.6419, over 11969.79 frames. ], batch size: 35, lr: 2.96e-02 +2024-08-06 08:22:45,417 INFO [trainer.py:765] (1/8) Epoch 1, batch 1200, train_loss[loss=3.476, ArTop10Accuracy=0.644, over 12039.00 frames. ], tot_loss[loss=3.463, ArTop10Accuracy=0.6461, over 11878.23 frames. ], batch size: 101, lr: 2.96e-02 +2024-08-06 08:23:45,310 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 08:25:36,244 INFO [trainer.py:765] (1/8) Epoch 2, batch 100, train_loss[loss=3.4, ArTop10Accuracy=0.6565, over 14508.00 frames. ], tot_loss[loss=3.423, ArTop10Accuracy=0.6528, over 4764.29 frames. ], batch size: 62, lr: 2.90e-02 +2024-08-06 08:26:58,962 INFO [trainer.py:765] (1/8) Epoch 2, batch 200, train_loss[loss=3.323, ArTop10Accuracy=0.6679, over 13725.00 frames. ], tot_loss[loss=3.385, ArTop10Accuracy=0.6599, over 7749.22 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 08:28:25,539 INFO [trainer.py:765] (1/8) Epoch 2, batch 300, train_loss[loss=3.403, ArTop10Accuracy=0.6592, over 14277.00 frames. ], tot_loss[loss=3.366, ArTop10Accuracy=0.6635, over 9375.13 frames. ], batch size: 44, lr: 2.89e-02 +2024-08-06 08:29:48,644 INFO [trainer.py:765] (1/8) Epoch 2, batch 400, train_loss[loss=3.391, ArTop10Accuracy=0.6543, over 11046.00 frames. ], tot_loss[loss=3.354, ArTop10Accuracy=0.666, over 10294.61 frames. ], batch size: 15, lr: 2.88e-02 +2024-08-06 08:31:22,906 INFO [trainer.py:765] (1/8) Epoch 2, batch 500, train_loss[loss=3.266, ArTop10Accuracy=0.6837, over 12363.00 frames. ], tot_loss[loss=3.337, ArTop10Accuracy=0.6696, over 10867.29 frames. ], batch size: 22, lr: 2.87e-02 +2024-08-06 08:32:45,694 INFO [trainer.py:765] (1/8) Epoch 2, batch 600, train_loss[loss=3.294, ArTop10Accuracy=0.6813, over 11376.00 frames. ], tot_loss[loss=3.327, ArTop10Accuracy=0.6714, over 11379.06 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 08:34:13,587 INFO [trainer.py:765] (1/8) Epoch 2, batch 700, train_loss[loss=3.278, ArTop10Accuracy=0.6789, over 10239.00 frames. ], tot_loss[loss=3.322, ArTop10Accuracy=0.6723, over 11515.89 frames. ], batch size: 12, lr: 2.85e-02 +2024-08-06 08:34:31,179 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 08:34:40,887 INFO [trainer.py:811] (1/8) Epoch 2, validation: loss=3.277, ArTop10Accuracy=0.6803, over 1827537.00 frames. +2024-08-06 08:34:40,888 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 31570MB +2024-08-06 08:34:41,706 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 7.953e+01 1.592e+02 2.200e+02 3.344e+02 2.949e+03, threshold=4.400e+02, percent-clipped=8.6 +2024-08-06 08:35:39,883 INFO [trainer.py:765] (1/8) Epoch 2, batch 800, train_loss[loss=3.314, ArTop10Accuracy=0.6699, over 9348.00 frames. ], tot_loss[loss=3.319, ArTop10Accuracy=0.673, over 11636.96 frames. ], batch size: 11, lr: 2.84e-02 +2024-08-06 08:36:56,377 INFO [trainer.py:765] (1/8) Epoch 2, batch 900, train_loss[loss=3.373, ArTop10Accuracy=0.6616, over 12846.00 frames. ], tot_loss[loss=3.305, ArTop10Accuracy=0.6758, over 11683.18 frames. ], batch size: 27, lr: 2.83e-02 +2024-08-06 08:38:10,518 INFO [trainer.py:765] (1/8) Epoch 2, batch 1000, train_loss[loss=3.233, ArTop10Accuracy=0.6893, over 12870.00 frames. ], tot_loss[loss=3.296, ArTop10Accuracy=0.6774, over 11873.81 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 08:39:25,065 INFO [trainer.py:765] (1/8) Epoch 2, batch 1100, train_loss[loss=3.28, ArTop10Accuracy=0.6837, over 13569.00 frames. ], tot_loss[loss=3.291, ArTop10Accuracy=0.6783, over 11963.75 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 08:40:38,225 INFO [trainer.py:765] (1/8) Epoch 2, batch 1200, train_loss[loss=3.337, ArTop10Accuracy=0.6672, over 12903.00 frames. ], tot_loss[loss=3.281, ArTop10Accuracy=0.6802, over 11861.92 frames. ], batch size: 101, lr: 2.80e-02 +2024-08-06 08:41:38,205 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 08:43:36,655 INFO [trainer.py:765] (1/8) Epoch 3, batch 100, train_loss[loss=3.334, ArTop10Accuracy=0.6681, over 14691.00 frames. ], tot_loss[loss=3.254, ArTop10Accuracy=0.6846, over 4778.14 frames. ], batch size: 62, lr: 2.67e-02 +2024-08-06 08:45:10,505 INFO [trainer.py:765] (1/8) Epoch 3, batch 200, train_loss[loss=3.187, ArTop10Accuracy=0.6983, over 13692.00 frames. ], tot_loss[loss=3.223, ArTop10Accuracy=0.6906, over 7780.48 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 08:46:29,264 INFO [trainer.py:765] (1/8) Epoch 3, batch 300, train_loss[loss=3.197, ArTop10Accuracy=0.7005, over 14133.00 frames. ], tot_loss[loss=3.206, ArTop10Accuracy=0.6938, over 9395.78 frames. ], batch size: 44, lr: 2.64e-02 +2024-08-06 08:48:04,223 INFO [trainer.py:765] (1/8) Epoch 3, batch 400, train_loss[loss=3.106, ArTop10Accuracy=0.7194, over 10431.00 frames. ], tot_loss[loss=3.191, ArTop10Accuracy=0.6968, over 10278.07 frames. ], batch size: 14, lr: 2.63e-02 +2024-08-06 08:48:40,887 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 9.282e+01 1.561e+02 1.981e+02 2.686e+02 1.768e+03, threshold=3.962e+02, percent-clipped=7.6 +2024-08-06 08:49:25,548 INFO [trainer.py:765] (1/8) Epoch 3, batch 500, train_loss[loss=3.143, ArTop10Accuracy=0.7066, over 12162.00 frames. ], tot_loss[loss=3.171, ArTop10Accuracy=0.7005, over 10856.70 frames. ], batch size: 22, lr: 2.62e-02 +2024-08-06 08:51:00,483 INFO [trainer.py:765] (1/8) Epoch 3, batch 600, train_loss[loss=3.082, ArTop10Accuracy=0.723, over 11733.00 frames. ], tot_loss[loss=3.156, ArTop10Accuracy=0.7034, over 11384.37 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 08:52:31,624 INFO [trainer.py:765] (1/8) Epoch 3, batch 700, train_loss[loss=3.085, ArTop10Accuracy=0.721, over 10002.00 frames. ], tot_loss[loss=3.15, ArTop10Accuracy=0.7044, over 11517.63 frames. ], batch size: 12, lr: 2.60e-02 +2024-08-06 08:53:57,395 INFO [trainer.py:765] (1/8) Epoch 3, batch 800, train_loss[loss=3.078, ArTop10Accuracy=0.7226, over 10086.00 frames. ], tot_loss[loss=3.142, ArTop10Accuracy=0.7064, over 11639.54 frames. ], batch size: 12, lr: 2.59e-02 +2024-08-06 08:55:15,124 INFO [trainer.py:765] (1/8) Epoch 3, batch 900, train_loss[loss=3.066, ArTop10Accuracy=0.7258, over 12849.00 frames. ], tot_loss[loss=3.12, ArTop10Accuracy=0.7104, over 11684.21 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 08:56:31,564 INFO [trainer.py:765] (1/8) Epoch 3, batch 1000, train_loss[loss=3.051, ArTop10Accuracy=0.7245, over 12855.00 frames. ], tot_loss[loss=3.112, ArTop10Accuracy=0.7118, over 11895.25 frames. ], batch size: 27, lr: 2.56e-02 +2024-08-06 08:57:46,510 INFO [trainer.py:765] (1/8) Epoch 3, batch 1100, train_loss[loss=3.066, ArTop10Accuracy=0.7182, over 13584.00 frames. ], tot_loss[loss=3.104, ArTop10Accuracy=0.7134, over 11977.87 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 08:59:01,403 INFO [trainer.py:765] (1/8) Epoch 3, batch 1200, train_loss[loss=3.159, ArTop10Accuracy=0.702, over 11196.00 frames. ], tot_loss[loss=3.095, ArTop10Accuracy=0.715, over 11868.07 frames. ], batch size: 103, lr: 2.54e-02 +2024-08-06 09:00:01,941 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 09:01:50,745 INFO [trainer.py:765] (1/8) Epoch 4, batch 100, train_loss[loss=3.027, ArTop10Accuracy=0.7318, over 14526.00 frames. ], tot_loss[loss=3.07, ArTop10Accuracy=0.7198, over 4767.61 frames. ], batch size: 62, lr: 2.38e-02 +2024-08-06 09:02:52,864 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 09:03:02,383 INFO [trainer.py:811] (1/8) Epoch 4, validation: loss=2.997, ArTop10Accuracy=0.7338, over 1827537.00 frames. +2024-08-06 09:03:02,384 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 31570MB +2024-08-06 09:03:03,368 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.499e+02 1.782e+02 2.273e+02 1.100e+03, threshold=3.565e+02, percent-clipped=4.7 +2024-08-06 09:03:29,277 INFO [trainer.py:765] (1/8) Epoch 4, batch 200, train_loss[loss=2.974, ArTop10Accuracy=0.7396, over 13569.00 frames. ], tot_loss[loss=3.051, ArTop10Accuracy=0.7232, over 7747.56 frames. ], batch size: 34, lr: 2.37e-02 +2024-08-06 09:05:01,738 INFO [trainer.py:765] (1/8) Epoch 4, batch 300, train_loss[loss=3.089, ArTop10Accuracy=0.7124, over 14157.00 frames. ], tot_loss[loss=3.041, ArTop10Accuracy=0.7252, over 9375.36 frames. ], batch size: 44, lr: 2.36e-02 +2024-08-06 09:06:28,155 INFO [trainer.py:765] (1/8) Epoch 4, batch 400, train_loss[loss=2.98, ArTop10Accuracy=0.7353, over 10224.00 frames. ], tot_loss[loss=3.034, ArTop10Accuracy=0.7266, over 10275.30 frames. ], batch size: 14, lr: 2.34e-02 +2024-08-06 09:08:01,929 INFO [trainer.py:765] (1/8) Epoch 4, batch 500, train_loss[loss=2.934, ArTop10Accuracy=0.7454, over 12306.00 frames. ], tot_loss[loss=3.025, ArTop10Accuracy=0.7283, over 10830.06 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 09:09:28,546 INFO [trainer.py:765] (1/8) Epoch 4, batch 600, train_loss[loss=3.117, ArTop10Accuracy=0.7081, over 11433.00 frames. ], tot_loss[loss=3.018, ArTop10Accuracy=0.7296, over 11350.44 frames. ], batch size: 18, lr: 2.32e-02 +2024-08-06 09:10:59,871 INFO [trainer.py:765] (1/8) Epoch 4, batch 700, train_loss[loss=3.038, ArTop10Accuracy=0.7271, over 10323.00 frames. ], tot_loss[loss=3.024, ArTop10Accuracy=0.7283, over 11501.78 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 09:12:17,518 INFO [trainer.py:765] (1/8) Epoch 4, batch 800, train_loss[loss=3.003, ArTop10Accuracy=0.7298, over 9342.00 frames. ], tot_loss[loss=3.023, ArTop10Accuracy=0.7284, over 11621.48 frames. ], batch size: 11, lr: 2.30e-02 +2024-08-06 09:13:33,218 INFO [trainer.py:765] (1/8) Epoch 4, batch 900, train_loss[loss=3.1, ArTop10Accuracy=0.7141, over 12897.00 frames. ], tot_loss[loss=3.016, ArTop10Accuracy=0.7296, over 11659.32 frames. ], batch size: 27, lr: 2.29e-02 +2024-08-06 09:14:47,526 INFO [trainer.py:765] (1/8) Epoch 4, batch 1000, train_loss[loss=3.042, ArTop10Accuracy=0.7206, over 12765.00 frames. ], tot_loss[loss=3.013, ArTop10Accuracy=0.7304, over 11873.50 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 09:16:02,987 INFO [trainer.py:765] (1/8) Epoch 4, batch 1100, train_loss[loss=3.001, ArTop10Accuracy=0.7331, over 13710.00 frames. ], tot_loss[loss=3.014, ArTop10Accuracy=0.7303, over 11946.58 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 09:16:53,297 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.440e+02 1.636e+02 1.968e+02 7.702e+02, threshold=3.273e+02, percent-clipped=1.3 +2024-08-06 09:17:18,350 INFO [trainer.py:765] (1/8) Epoch 4, batch 1200, train_loss[loss=3.076, ArTop10Accuracy=0.7165, over 12258.00 frames. ], tot_loss[loss=3.011, ArTop10Accuracy=0.7307, over 11834.91 frames. ], batch size: 101, lr: 2.25e-02 +2024-08-06 09:18:17,461 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 09:20:17,177 INFO [trainer.py:765] (1/8) Epoch 5, batch 100, train_loss[loss=2.968, ArTop10Accuracy=0.7418, over 14499.00 frames. ], tot_loss[loss=2.991, ArTop10Accuracy=0.7345, over 4753.20 frames. ], batch size: 62, lr: 2.10e-02 +2024-08-06 09:21:52,300 INFO [trainer.py:765] (1/8) Epoch 5, batch 200, train_loss[loss=3.047, ArTop10Accuracy=0.7255, over 13533.00 frames. ], tot_loss[loss=2.983, ArTop10Accuracy=0.7362, over 7733.64 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 09:23:19,245 INFO [trainer.py:765] (1/8) Epoch 5, batch 300, train_loss[loss=3, ArTop10Accuracy=0.7324, over 14067.00 frames. ], tot_loss[loss=2.971, ArTop10Accuracy=0.7382, over 9372.02 frames. ], batch size: 44, lr: 2.08e-02 +2024-08-06 09:24:53,543 INFO [trainer.py:765] (1/8) Epoch 5, batch 400, train_loss[loss=2.851, ArTop10Accuracy=0.7643, over 10191.00 frames. ], tot_loss[loss=2.965, ArTop10Accuracy=0.7392, over 10286.55 frames. ], batch size: 14, lr: 2.07e-02 +2024-08-06 09:26:19,424 INFO [trainer.py:765] (1/8) Epoch 5, batch 500, train_loss[loss=2.986, ArTop10Accuracy=0.7376, over 12162.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.7397, over 10872.84 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 09:27:49,543 INFO [trainer.py:765] (1/8) Epoch 5, batch 600, train_loss[loss=2.903, ArTop10Accuracy=0.7532, over 11931.00 frames. ], tot_loss[loss=2.964, ArTop10Accuracy=0.7397, over 11399.96 frames. ], batch size: 19, lr: 2.05e-02 +2024-08-06 09:29:21,676 INFO [trainer.py:765] (1/8) Epoch 5, batch 700, train_loss[loss=2.898, ArTop10Accuracy=0.7536, over 9321.00 frames. ], tot_loss[loss=2.972, ArTop10Accuracy=0.7382, over 11531.87 frames. ], batch size: 11, lr: 2.04e-02 +2024-08-06 09:30:44,699 INFO [trainer.py:765] (1/8) Epoch 5, batch 800, train_loss[loss=3.077, ArTop10Accuracy=0.7202, over 9351.00 frames. ], tot_loss[loss=2.974, ArTop10Accuracy=0.738, over 11643.11 frames. ], batch size: 11, lr: 2.03e-02 +2024-08-06 09:31:51,245 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 09:32:00,761 INFO [trainer.py:811] (1/8) Epoch 5, validation: loss=2.926, ArTop10Accuracy=0.7466, over 1827537.00 frames. +2024-08-06 09:32:00,761 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 31570MB +2024-08-06 09:32:01,712 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.060e+02 1.349e+02 1.525e+02 1.806e+02 1.007e+03, threshold=3.049e+02, percent-clipped=2.3 +2024-08-06 09:32:10,557 INFO [trainer.py:765] (1/8) Epoch 5, batch 900, train_loss[loss=2.939, ArTop10Accuracy=0.7484, over 12774.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7404, over 11677.07 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 09:33:27,329 INFO [trainer.py:765] (1/8) Epoch 5, batch 1000, train_loss[loss=3, ArTop10Accuracy=0.7307, over 13125.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7405, over 11870.34 frames. ], batch size: 28, lr: 2.01e-02 +2024-08-06 09:34:42,306 INFO [trainer.py:765] (1/8) Epoch 5, batch 1100, train_loss[loss=2.929, ArTop10Accuracy=0.7503, over 13596.00 frames. ], tot_loss[loss=2.964, ArTop10Accuracy=0.7399, over 11943.56 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 09:35:56,339 INFO [trainer.py:765] (1/8) Epoch 5, batch 1200, train_loss[loss=3.058, ArTop10Accuracy=0.7186, over 13242.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.7399, over 11871.80 frames. ], batch size: 101, lr: 1.99e-02 +2024-08-06 09:36:55,360 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 09:38:52,668 INFO [trainer.py:765] (1/8) Epoch 6, batch 100, train_loss[loss=2.962, ArTop10Accuracy=0.7441, over 14187.00 frames. ], tot_loss[loss=2.946, ArTop10Accuracy=0.7427, over 4761.42 frames. ], batch size: 62, lr: 1.85e-02 +2024-08-06 09:40:19,840 INFO [trainer.py:765] (1/8) Epoch 6, batch 200, train_loss[loss=2.889, ArTop10Accuracy=0.7544, over 13533.00 frames. ], tot_loss[loss=2.934, ArTop10Accuracy=0.7452, over 7753.64 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 09:41:52,971 INFO [trainer.py:765] (1/8) Epoch 6, batch 300, train_loss[loss=2.977, ArTop10Accuracy=0.7367, over 14202.00 frames. ], tot_loss[loss=2.928, ArTop10Accuracy=0.7462, over 9400.74 frames. ], batch size: 44, lr: 1.83e-02 +2024-08-06 09:43:17,833 INFO [trainer.py:765] (1/8) Epoch 6, batch 400, train_loss[loss=2.822, ArTop10Accuracy=0.7726, over 10521.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.747, over 10303.55 frames. ], batch size: 14, lr: 1.83e-02 +2024-08-06 09:44:54,134 INFO [trainer.py:765] (1/8) Epoch 6, batch 500, train_loss[loss=2.937, ArTop10Accuracy=0.7418, over 12210.00 frames. ], tot_loss[loss=2.918, ArTop10Accuracy=0.7481, over 10854.68 frames. ], batch size: 22, lr: 1.82e-02 +2024-08-06 09:46:22,879 INFO [trainer.py:765] (1/8) Epoch 6, batch 600, train_loss[loss=2.901, ArTop10Accuracy=0.7563, over 11883.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.7468, over 11386.09 frames. ], batch size: 19, lr: 1.81e-02 +2024-08-06 09:46:37,225 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.339e+02 1.480e+02 1.701e+02 7.506e+02, threshold=2.959e+02, percent-clipped=1.1 +2024-08-06 09:47:57,875 INFO [trainer.py:765] (1/8) Epoch 6, batch 700, train_loss[loss=2.899, ArTop10Accuracy=0.7566, over 10038.00 frames. ], tot_loss[loss=2.929, ArTop10Accuracy=0.7462, over 11537.98 frames. ], batch size: 12, lr: 1.80e-02 +2024-08-06 09:49:15,961 INFO [trainer.py:765] (1/8) Epoch 6, batch 800, train_loss[loss=2.845, ArTop10Accuracy=0.7569, over 9402.00 frames. ], tot_loss[loss=2.932, ArTop10Accuracy=0.7455, over 11662.00 frames. ], batch size: 11, lr: 1.79e-02 +2024-08-06 09:50:32,141 INFO [trainer.py:765] (1/8) Epoch 6, batch 900, train_loss[loss=2.907, ArTop10Accuracy=0.748, over 12954.00 frames. ], tot_loss[loss=2.927, ArTop10Accuracy=0.7466, over 11709.57 frames. ], batch size: 27, lr: 1.78e-02 +2024-08-06 09:51:47,303 INFO [trainer.py:765] (1/8) Epoch 6, batch 1000, train_loss[loss=2.976, ArTop10Accuracy=0.7375, over 12882.00 frames. ], tot_loss[loss=2.929, ArTop10Accuracy=0.7461, over 11880.37 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 09:53:00,927 INFO [trainer.py:765] (1/8) Epoch 6, batch 1100, train_loss[loss=2.896, ArTop10Accuracy=0.7569, over 13659.00 frames. ], tot_loss[loss=2.929, ArTop10Accuracy=0.7462, over 11936.22 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 09:54:14,343 INFO [trainer.py:765] (1/8) Epoch 6, batch 1200, train_loss[loss=3.025, ArTop10Accuracy=0.7304, over 12231.00 frames. ], tot_loss[loss=2.928, ArTop10Accuracy=0.7461, over 11865.74 frames. ], batch size: 101, lr: 1.76e-02 +2024-08-06 09:55:13,177 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 09:57:06,705 INFO [trainer.py:765] (1/8) Epoch 7, batch 100, train_loss[loss=3.002, ArTop10Accuracy=0.7374, over 14385.00 frames. ], tot_loss[loss=2.913, ArTop10Accuracy=0.7486, over 4745.63 frames. ], batch size: 62, lr: 1.64e-02 +2024-08-06 09:58:39,429 INFO [trainer.py:765] (1/8) Epoch 7, batch 200, train_loss[loss=2.902, ArTop10Accuracy=0.7502, over 13656.00 frames. ], tot_loss[loss=2.9, ArTop10Accuracy=0.7515, over 7751.20 frames. ], batch size: 34, lr: 1.64e-02 +2024-08-06 10:00:06,090 INFO [trainer.py:765] (1/8) Epoch 7, batch 300, train_loss[loss=2.908, ArTop10Accuracy=0.755, over 14034.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7522, over 9344.26 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 10:00:40,514 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 10:00:50,245 INFO [trainer.py:811] (1/8) Epoch 7, validation: loss=2.88, ArTop10Accuracy=0.7554, over 1827537.00 frames. +2024-08-06 10:00:50,246 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 31570MB +2024-08-06 10:00:50,983 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.002e+02 1.286e+02 1.429e+02 1.605e+02 1.020e+03, threshold=2.857e+02, percent-clipped=1.5 +2024-08-06 10:01:49,123 INFO [trainer.py:765] (1/8) Epoch 7, batch 400, train_loss[loss=2.808, ArTop10Accuracy=0.769, over 10485.00 frames. ], tot_loss[loss=2.893, ArTop10Accuracy=0.7531, over 10278.27 frames. ], batch size: 14, lr: 1.62e-02 +2024-08-06 10:03:21,463 INFO [trainer.py:765] (1/8) Epoch 7, batch 500, train_loss[loss=2.828, ArTop10Accuracy=0.7612, over 12168.00 frames. ], tot_loss[loss=2.889, ArTop10Accuracy=0.754, over 10832.15 frames. ], batch size: 22, lr: 1.61e-02 +2024-08-06 10:04:51,889 INFO [trainer.py:765] (1/8) Epoch 7, batch 600, train_loss[loss=2.795, ArTop10Accuracy=0.7731, over 11178.00 frames. ], tot_loss[loss=2.888, ArTop10Accuracy=0.7539, over 11361.58 frames. ], batch size: 18, lr: 1.61e-02 +2024-08-06 10:06:25,117 INFO [trainer.py:765] (1/8) Epoch 7, batch 700, train_loss[loss=2.795, ArTop10Accuracy=0.766, over 9279.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7525, over 11491.31 frames. ], batch size: 11, lr: 1.60e-02 +2024-08-06 10:07:46,955 INFO [trainer.py:765] (1/8) Epoch 7, batch 800, train_loss[loss=2.803, ArTop10Accuracy=0.7747, over 10359.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7525, over 11632.01 frames. ], batch size: 12, lr: 1.59e-02 +2024-08-06 10:09:02,828 INFO [trainer.py:765] (1/8) Epoch 7, batch 900, train_loss[loss=2.792, ArTop10Accuracy=0.7744, over 12621.00 frames. ], tot_loss[loss=2.892, ArTop10Accuracy=0.7532, over 11686.51 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 10:10:19,642 INFO [trainer.py:765] (1/8) Epoch 7, batch 1000, train_loss[loss=2.938, ArTop10Accuracy=0.7412, over 13341.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7527, over 11857.32 frames. ], batch size: 28, lr: 1.58e-02 +2024-08-06 10:11:35,214 INFO [trainer.py:765] (1/8) Epoch 7, batch 1100, train_loss[loss=2.966, ArTop10Accuracy=0.7316, over 13683.00 frames. ], tot_loss[loss=2.902, ArTop10Accuracy=0.7512, over 11942.87 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 10:12:48,210 INFO [trainer.py:765] (1/8) Epoch 7, batch 1200, train_loss[loss=3.03, ArTop10Accuracy=0.7313, over 12375.00 frames. ], tot_loss[loss=2.901, ArTop10Accuracy=0.7515, over 11863.93 frames. ], batch size: 101, lr: 1.57e-02 +2024-08-06 10:13:46,697 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 10:15:03,607 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.017e+02 1.283e+02 1.410e+02 1.601e+02 1.017e+03, threshold=2.820e+02, percent-clipped=0.9 +2024-08-06 10:15:40,827 INFO [trainer.py:765] (1/8) Epoch 8, batch 100, train_loss[loss=2.922, ArTop10Accuracy=0.7479, over 14244.00 frames. ], tot_loss[loss=2.885, ArTop10Accuracy=0.7541, over 4746.16 frames. ], batch size: 62, lr: 1.47e-02 +2024-08-06 10:17:12,868 INFO [trainer.py:765] (1/8) Epoch 8, batch 200, train_loss[loss=2.894, ArTop10Accuracy=0.7525, over 13881.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7565, over 7754.49 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 10:18:37,904 INFO [trainer.py:765] (1/8) Epoch 8, batch 300, train_loss[loss=2.901, ArTop10Accuracy=0.7532, over 14163.00 frames. ], tot_loss[loss=2.87, ArTop10Accuracy=0.7571, over 9382.60 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 10:20:06,348 INFO [trainer.py:765] (1/8) Epoch 8, batch 400, train_loss[loss=2.749, ArTop10Accuracy=0.7818, over 10323.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.7585, over 10284.68 frames. ], batch size: 14, lr: 1.45e-02 +2024-08-06 10:21:32,417 INFO [trainer.py:765] (1/8) Epoch 8, batch 500, train_loss[loss=2.842, ArTop10Accuracy=0.7659, over 12006.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.7587, over 10864.33 frames. ], batch size: 22, lr: 1.45e-02 +2024-08-06 10:23:00,980 INFO [trainer.py:765] (1/8) Epoch 8, batch 600, train_loss[loss=2.892, ArTop10Accuracy=0.7495, over 11358.00 frames. ], tot_loss[loss=2.867, ArTop10Accuracy=0.7579, over 11374.38 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 10:24:37,794 INFO [trainer.py:765] (1/8) Epoch 8, batch 700, train_loss[loss=2.749, ArTop10Accuracy=0.7816, over 10002.00 frames. ], tot_loss[loss=2.87, ArTop10Accuracy=0.7573, over 11528.07 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 10:25:56,091 INFO [trainer.py:765] (1/8) Epoch 8, batch 800, train_loss[loss=2.752, ArTop10Accuracy=0.775, over 9339.00 frames. ], tot_loss[loss=2.875, ArTop10Accuracy=0.7566, over 11620.96 frames. ], batch size: 11, lr: 1.43e-02 +2024-08-06 10:27:12,249 INFO [trainer.py:765] (1/8) Epoch 8, batch 900, train_loss[loss=2.869, ArTop10Accuracy=0.7567, over 13035.00 frames. ], tot_loss[loss=2.867, ArTop10Accuracy=0.7579, over 11677.37 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 10:28:25,269 INFO [trainer.py:765] (1/8) Epoch 8, batch 1000, train_loss[loss=2.85, ArTop10Accuracy=0.7617, over 12864.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7572, over 11871.35 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 10:29:07,161 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 10:29:16,830 INFO [trainer.py:811] (1/8) Epoch 8, validation: loss=2.858, ArTop10Accuracy=0.7594, over 1827537.00 frames. +2024-08-06 10:29:16,831 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 31570MB +2024-08-06 10:29:17,496 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.275e+02 1.390e+02 1.547e+02 3.717e+02, threshold=2.781e+02, percent-clipped=0.7 +2024-08-06 10:29:51,738 INFO [trainer.py:765] (1/8) Epoch 8, batch 1100, train_loss[loss=2.851, ArTop10Accuracy=0.7608, over 13545.00 frames. ], tot_loss[loss=2.88, ArTop10Accuracy=0.7554, over 11946.85 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 10:31:05,952 INFO [trainer.py:765] (1/8) Epoch 8, batch 1200, train_loss[loss=3.013, ArTop10Accuracy=0.7242, over 12375.00 frames. ], tot_loss[loss=2.878, ArTop10Accuracy=0.756, over 11877.61 frames. ], batch size: 101, lr: 1.40e-02 +2024-08-06 10:32:05,554 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 10:34:01,262 INFO [trainer.py:765] (1/8) Epoch 9, batch 100, train_loss[loss=2.974, ArTop10Accuracy=0.7336, over 14712.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7598, over 4756.21 frames. ], batch size: 62, lr: 1.32e-02 +2024-08-06 10:35:31,778 INFO [trainer.py:765] (1/8) Epoch 9, batch 200, train_loss[loss=2.743, ArTop10Accuracy=0.782, over 13812.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7618, over 7742.43 frames. ], batch size: 34, lr: 1.32e-02 +2024-08-06 10:36:57,933 INFO [trainer.py:765] (1/8) Epoch 9, batch 300, train_loss[loss=2.905, ArTop10Accuracy=0.7537, over 14358.00 frames. ], tot_loss[loss=2.842, ArTop10Accuracy=0.7627, over 9372.16 frames. ], batch size: 45, lr: 1.31e-02 +2024-08-06 10:38:32,702 INFO [trainer.py:765] (1/8) Epoch 9, batch 400, train_loss[loss=2.77, ArTop10Accuracy=0.7783, over 10809.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.7625, over 10272.39 frames. ], batch size: 15, lr: 1.31e-02 +2024-08-06 10:39:59,262 INFO [trainer.py:765] (1/8) Epoch 9, batch 500, train_loss[loss=2.804, ArTop10Accuracy=0.7676, over 12750.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7636, over 10841.66 frames. ], batch size: 23, lr: 1.30e-02 +2024-08-06 10:41:29,694 INFO [trainer.py:765] (1/8) Epoch 9, batch 600, train_loss[loss=2.872, ArTop10Accuracy=0.7589, over 11277.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7634, over 11363.34 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 10:42:58,446 INFO [trainer.py:765] (1/8) Epoch 9, batch 700, train_loss[loss=2.794, ArTop10Accuracy=0.7749, over 10224.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7615, over 11506.27 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 10:44:02,958 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.039e+02 1.253e+02 1.352e+02 1.493e+02 7.010e+02, threshold=2.704e+02, percent-clipped=0.6 +2024-08-06 10:44:19,675 INFO [trainer.py:765] (1/8) Epoch 9, batch 800, train_loss[loss=2.758, ArTop10Accuracy=0.7845, over 9348.00 frames. ], tot_loss[loss=2.852, ArTop10Accuracy=0.7607, over 11633.24 frames. ], batch size: 11, lr: 1.29e-02 +2024-08-06 10:45:35,725 INFO [trainer.py:765] (1/8) Epoch 9, batch 900, train_loss[loss=2.776, ArTop10Accuracy=0.7739, over 12975.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7616, over 11688.38 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 10:46:51,277 INFO [trainer.py:765] (1/8) Epoch 9, batch 1000, train_loss[loss=2.904, ArTop10Accuracy=0.7464, over 12858.00 frames. ], tot_loss[loss=2.852, ArTop10Accuracy=0.7605, over 11884.24 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 10:48:06,253 INFO [trainer.py:765] (1/8) Epoch 9, batch 1100, train_loss[loss=2.846, ArTop10Accuracy=0.7651, over 13599.00 frames. ], tot_loss[loss=2.856, ArTop10Accuracy=0.7598, over 11944.83 frames. ], batch size: 34, lr: 1.28e-02 +2024-08-06 10:49:21,058 INFO [trainer.py:765] (1/8) Epoch 9, batch 1200, train_loss[loss=2.94, ArTop10Accuracy=0.7412, over 12285.00 frames. ], tot_loss[loss=2.852, ArTop10Accuracy=0.7606, over 11849.65 frames. ], batch size: 103, lr: 1.27e-02 +2024-08-06 10:50:22,395 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 10:52:12,332 INFO [trainer.py:765] (1/8) Epoch 10, batch 100, train_loss[loss=2.84, ArTop10Accuracy=0.7599, over 14454.00 frames. ], tot_loss[loss=2.84, ArTop10Accuracy=0.7628, over 4762.71 frames. ], batch size: 62, lr: 1.20e-02 +2024-08-06 10:53:44,591 INFO [trainer.py:765] (1/8) Epoch 10, batch 200, train_loss[loss=2.841, ArTop10Accuracy=0.7632, over 13776.00 frames. ], tot_loss[loss=2.832, ArTop10Accuracy=0.7643, over 7751.20 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 10:55:08,096 INFO [trainer.py:765] (1/8) Epoch 10, batch 300, train_loss[loss=2.908, ArTop10Accuracy=0.7502, over 13872.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.765, over 9380.76 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 10:56:41,181 INFO [trainer.py:765] (1/8) Epoch 10, batch 400, train_loss[loss=2.775, ArTop10Accuracy=0.7748, over 10197.00 frames. ], tot_loss[loss=2.824, ArTop10Accuracy=0.7658, over 10286.84 frames. ], batch size: 14, lr: 1.19e-02 +2024-08-06 10:58:04,944 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 10:58:14,557 INFO [trainer.py:811] (1/8) Epoch 10, validation: loss=2.842, ArTop10Accuracy=0.7624, over 1827537.00 frames. +2024-08-06 10:58:14,557 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 31570MB +2024-08-06 10:58:15,576 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.228e+02 1.320e+02 1.458e+02 6.096e+02, threshold=2.641e+02, percent-clipped=0.6 +2024-08-06 10:58:15,583 INFO [trainer.py:765] (1/8) Epoch 10, batch 500, train_loss[loss=2.86, ArTop10Accuracy=0.7601, over 12069.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7664, over 10851.85 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 10:59:42,821 INFO [trainer.py:765] (1/8) Epoch 10, batch 600, train_loss[loss=2.747, ArTop10Accuracy=0.7847, over 11316.00 frames. ], tot_loss[loss=2.823, ArTop10Accuracy=0.7663, over 11362.02 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 11:01:18,113 INFO [trainer.py:765] (1/8) Epoch 10, batch 700, train_loss[loss=2.815, ArTop10Accuracy=0.7639, over 10041.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.7651, over 11512.01 frames. ], batch size: 12, lr: 1.18e-02 +2024-08-06 11:02:36,923 INFO [trainer.py:765] (1/8) Epoch 10, batch 800, train_loss[loss=2.721, ArTop10Accuracy=0.7851, over 9291.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7648, over 11633.61 frames. ], batch size: 11, lr: 1.17e-02 +2024-08-06 11:03:51,218 INFO [trainer.py:765] (1/8) Epoch 10, batch 900, train_loss[loss=2.86, ArTop10Accuracy=0.7579, over 12807.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.7657, over 11681.50 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 11:05:06,358 INFO [trainer.py:765] (1/8) Epoch 10, batch 1000, train_loss[loss=2.904, ArTop10Accuracy=0.7478, over 12924.00 frames. ], tot_loss[loss=2.832, ArTop10Accuracy=0.7645, over 11872.55 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 11:06:21,725 INFO [trainer.py:765] (1/8) Epoch 10, batch 1100, train_loss[loss=2.817, ArTop10Accuracy=0.7649, over 13518.00 frames. ], tot_loss[loss=2.839, ArTop10Accuracy=0.763, over 11926.81 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 11:07:34,778 INFO [trainer.py:765] (1/8) Epoch 10, batch 1200, train_loss[loss=2.922, ArTop10Accuracy=0.7471, over 12081.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7634, over 11849.57 frames. ], batch size: 101, lr: 1.16e-02 +2024-08-06 11:08:33,905 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 11:10:29,960 INFO [trainer.py:765] (1/8) Epoch 11, batch 100, train_loss[loss=2.921, ArTop10Accuracy=0.7486, over 14277.00 frames. ], tot_loss[loss=2.819, ArTop10Accuracy=0.7666, over 4744.36 frames. ], batch size: 62, lr: 1.10e-02 +2024-08-06 11:12:04,679 INFO [trainer.py:765] (1/8) Epoch 11, batch 200, train_loss[loss=2.759, ArTop10Accuracy=0.7787, over 13740.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.7673, over 7726.69 frames. ], batch size: 34, lr: 1.10e-02 +2024-08-06 11:12:22,831 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 9.884e+01 1.240e+02 1.333e+02 1.457e+02 6.939e+02, threshold=2.667e+02, percent-clipped=0.1 +2024-08-06 11:13:31,551 INFO [trainer.py:765] (1/8) Epoch 11, batch 300, train_loss[loss=2.949, ArTop10Accuracy=0.739, over 14226.00 frames. ], tot_loss[loss=2.811, ArTop10Accuracy=0.7683, over 9344.10 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 11:15:03,275 INFO [trainer.py:765] (1/8) Epoch 11, batch 400, train_loss[loss=2.758, ArTop10Accuracy=0.7821, over 11040.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.7688, over 10269.43 frames. ], batch size: 15, lr: 1.09e-02 +2024-08-06 11:16:29,642 INFO [trainer.py:765] (1/8) Epoch 11, batch 500, train_loss[loss=2.739, ArTop10Accuracy=0.7819, over 12315.00 frames. ], tot_loss[loss=2.8, ArTop10Accuracy=0.7704, over 10842.51 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 11:18:00,523 INFO [trainer.py:765] (1/8) Epoch 11, batch 600, train_loss[loss=2.793, ArTop10Accuracy=0.7793, over 11529.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7689, over 11373.43 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 11:19:34,516 INFO [trainer.py:765] (1/8) Epoch 11, batch 700, train_loss[loss=2.696, ArTop10Accuracy=0.7903, over 9396.00 frames. ], tot_loss[loss=2.813, ArTop10Accuracy=0.768, over 11509.70 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 11:20:55,487 INFO [trainer.py:765] (1/8) Epoch 11, batch 800, train_loss[loss=2.761, ArTop10Accuracy=0.7746, over 10377.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.7673, over 11641.00 frames. ], batch size: 12, lr: 1.07e-02 +2024-08-06 11:22:13,712 INFO [trainer.py:765] (1/8) Epoch 11, batch 900, train_loss[loss=2.919, ArTop10Accuracy=0.7417, over 13068.00 frames. ], tot_loss[loss=2.811, ArTop10Accuracy=0.7683, over 11682.77 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 11:23:31,805 INFO [trainer.py:765] (1/8) Epoch 11, batch 1000, train_loss[loss=2.768, ArTop10Accuracy=0.7785, over 12609.00 frames. ], tot_loss[loss=2.813, ArTop10Accuracy=0.768, over 11877.30 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 11:24:46,908 INFO [trainer.py:765] (1/8) Epoch 11, batch 1100, train_loss[loss=2.88, ArTop10Accuracy=0.7552, over 13635.00 frames. ], tot_loss[loss=2.818, ArTop10Accuracy=0.767, over 11958.09 frames. ], batch size: 34, lr: 1.06e-02 +2024-08-06 11:26:00,739 INFO [trainer.py:765] (1/8) Epoch 11, batch 1200, train_loss[loss=2.993, ArTop10Accuracy=0.7277, over 12222.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7665, over 11887.81 frames. ], batch size: 101, lr: 1.06e-02 +2024-08-06 11:26:15,853 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 11:26:25,556 INFO [trainer.py:811] (1/8) Epoch 11, validation: loss=2.831, ArTop10Accuracy=0.7643, over 1827537.00 frames. +2024-08-06 11:26:25,557 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 31570MB +2024-08-06 11:26:26,191 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.251e+02 1.335e+02 1.441e+02 2.942e+02, threshold=2.669e+02, percent-clipped=0.1 +2024-08-06 11:27:09,715 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 11:29:03,456 INFO [trainer.py:765] (1/8) Epoch 12, batch 100, train_loss[loss=2.881, ArTop10Accuracy=0.7566, over 14679.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.769, over 4772.54 frames. ], batch size: 62, lr: 1.01e-02 +2024-08-06 11:30:30,679 INFO [trainer.py:765] (1/8) Epoch 12, batch 200, train_loss[loss=2.779, ArTop10Accuracy=0.7758, over 13632.00 frames. ], tot_loss[loss=2.798, ArTop10Accuracy=0.7707, over 7752.01 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 11:31:57,661 INFO [trainer.py:765] (1/8) Epoch 12, batch 300, train_loss[loss=2.833, ArTop10Accuracy=0.7634, over 14193.00 frames. ], tot_loss[loss=2.785, ArTop10Accuracy=0.7732, over 9379.82 frames. ], batch size: 45, lr: 1.01e-02 +2024-08-06 11:33:30,744 INFO [trainer.py:765] (1/8) Epoch 12, batch 400, train_loss[loss=2.727, ArTop10Accuracy=0.7805, over 10134.00 frames. ], tot_loss[loss=2.785, ArTop10Accuracy=0.7731, over 10292.35 frames. ], batch size: 14, lr: 1.00e-02 +2024-08-06 11:34:55,737 INFO [trainer.py:765] (1/8) Epoch 12, batch 500, train_loss[loss=2.804, ArTop10Accuracy=0.7718, over 11982.00 frames. ], tot_loss[loss=2.781, ArTop10Accuracy=0.7739, over 10839.09 frames. ], batch size: 22, lr: 1.00e-02 +2024-08-06 11:36:29,367 INFO [trainer.py:765] (1/8) Epoch 12, batch 600, train_loss[loss=2.87, ArTop10Accuracy=0.7539, over 11367.00 frames. ], tot_loss[loss=2.788, ArTop10Accuracy=0.7727, over 11335.89 frames. ], batch size: 18, lr: 9.97e-03 +2024-08-06 11:38:00,349 INFO [trainer.py:765] (1/8) Epoch 12, batch 700, train_loss[loss=2.716, ArTop10Accuracy=0.7844, over 10320.00 frames. ], tot_loss[loss=2.794, ArTop10Accuracy=0.7717, over 11503.62 frames. ], batch size: 12, lr: 9.93e-03 +2024-08-06 11:39:23,617 INFO [trainer.py:765] (1/8) Epoch 12, batch 800, train_loss[loss=2.846, ArTop10Accuracy=0.7584, over 10128.00 frames. ], tot_loss[loss=2.798, ArTop10Accuracy=0.7709, over 11629.35 frames. ], batch size: 12, lr: 9.90e-03 +2024-08-06 11:40:39,895 INFO [trainer.py:765] (1/8) Epoch 12, batch 900, train_loss[loss=2.831, ArTop10Accuracy=0.7651, over 12711.00 frames. ], tot_loss[loss=2.793, ArTop10Accuracy=0.772, over 11677.66 frames. ], batch size: 27, lr: 9.87e-03 +2024-08-06 11:41:13,999 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.041e+02 1.248e+02 1.348e+02 1.459e+02 5.540e+02, threshold=2.695e+02, percent-clipped=0.3 +2024-08-06 11:41:56,195 INFO [trainer.py:765] (1/8) Epoch 12, batch 1000, train_loss[loss=2.809, ArTop10Accuracy=0.7676, over 12882.00 frames. ], tot_loss[loss=2.797, ArTop10Accuracy=0.7709, over 11898.86 frames. ], batch size: 27, lr: 9.85e-03 +2024-08-06 11:43:14,326 INFO [trainer.py:765] (1/8) Epoch 12, batch 1100, train_loss[loss=2.782, ArTop10Accuracy=0.7774, over 13779.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7695, over 11949.96 frames. ], batch size: 34, lr: 9.82e-03 +2024-08-06 11:44:26,162 INFO [trainer.py:765] (1/8) Epoch 12, batch 1200, train_loss[loss=2.944, ArTop10Accuracy=0.7393, over 12054.00 frames. ], tot_loss[loss=2.803, ArTop10Accuracy=0.7701, over 11864.81 frames. ], batch size: 101, lr: 9.79e-03 +2024-08-06 11:45:26,869 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 11:47:26,604 INFO [trainer.py:765] (1/8) Epoch 13, batch 100, train_loss[loss=2.844, ArTop10Accuracy=0.7621, over 14415.00 frames. ], tot_loss[loss=2.788, ArTop10Accuracy=0.7719, over 4772.92 frames. ], batch size: 62, lr: 9.37e-03 +2024-08-06 11:48:54,785 INFO [trainer.py:765] (1/8) Epoch 13, batch 200, train_loss[loss=2.752, ArTop10Accuracy=0.7792, over 13590.00 frames. ], tot_loss[loss=2.782, ArTop10Accuracy=0.7732, over 7754.83 frames. ], batch size: 34, lr: 9.34e-03 +2024-08-06 11:50:20,521 INFO [trainer.py:765] (1/8) Epoch 13, batch 300, train_loss[loss=2.766, ArTop10Accuracy=0.7763, over 14256.00 frames. ], tot_loss[loss=2.776, ArTop10Accuracy=0.7748, over 9373.21 frames. ], batch size: 44, lr: 9.31e-03 +2024-08-06 11:51:48,771 INFO [trainer.py:765] (1/8) Epoch 13, batch 400, train_loss[loss=2.624, ArTop10Accuracy=0.8076, over 10278.00 frames. ], tot_loss[loss=2.773, ArTop10Accuracy=0.7757, over 10275.22 frames. ], batch size: 14, lr: 9.28e-03 +2024-08-06 11:53:13,412 INFO [trainer.py:765] (1/8) Epoch 13, batch 500, train_loss[loss=2.676, ArTop10Accuracy=0.798, over 12201.00 frames. ], tot_loss[loss=2.769, ArTop10Accuracy=0.7763, over 10857.58 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 11:54:52,229 INFO [trainer.py:765] (1/8) Epoch 13, batch 600, train_loss[loss=2.698, ArTop10Accuracy=0.7882, over 11418.00 frames. ], tot_loss[loss=2.776, ArTop10Accuracy=0.7749, over 11355.82 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 11:55:47,086 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 11:55:56,834 INFO [trainer.py:811] (1/8) Epoch 13, validation: loss=2.824, ArTop10Accuracy=0.7662, over 1827537.00 frames. +2024-08-06 11:55:56,835 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 33972MB +2024-08-06 11:55:57,718 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.064e+02 1.255e+02 1.343e+02 1.452e+02 4.888e+02, threshold=2.687e+02, percent-clipped=0.1 +2024-08-06 11:56:28,471 INFO [trainer.py:765] (1/8) Epoch 13, batch 700, train_loss[loss=2.772, ArTop10Accuracy=0.7774, over 9309.00 frames. ], tot_loss[loss=2.783, ArTop10Accuracy=0.7735, over 11488.70 frames. ], batch size: 11, lr: 9.20e-03 +2024-08-06 11:57:46,690 INFO [trainer.py:765] (1/8) Epoch 13, batch 800, train_loss[loss=2.713, ArTop10Accuracy=0.7875, over 10161.00 frames. ], tot_loss[loss=2.786, ArTop10Accuracy=0.773, over 11619.65 frames. ], batch size: 12, lr: 9.18e-03 +2024-08-06 11:59:03,290 INFO [trainer.py:765] (1/8) Epoch 13, batch 900, train_loss[loss=2.756, ArTop10Accuracy=0.7807, over 13041.00 frames. ], tot_loss[loss=2.782, ArTop10Accuracy=0.7737, over 11686.03 frames. ], batch size: 27, lr: 9.15e-03 +2024-08-06 12:00:19,180 INFO [trainer.py:765] (1/8) Epoch 13, batch 1000, train_loss[loss=2.81, ArTop10Accuracy=0.7706, over 12786.00 frames. ], tot_loss[loss=2.786, ArTop10Accuracy=0.773, over 11890.74 frames. ], batch size: 27, lr: 9.13e-03 +2024-08-06 12:01:34,885 INFO [trainer.py:765] (1/8) Epoch 13, batch 1100, train_loss[loss=2.775, ArTop10Accuracy=0.7717, over 13575.00 frames. ], tot_loss[loss=2.797, ArTop10Accuracy=0.7709, over 11954.47 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 12:02:48,669 INFO [trainer.py:765] (1/8) Epoch 13, batch 1200, train_loss[loss=2.981, ArTop10Accuracy=0.7357, over 12087.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7714, over 11877.57 frames. ], batch size: 101, lr: 9.08e-03 +2024-08-06 12:03:48,616 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 12:05:45,337 INFO [trainer.py:765] (1/8) Epoch 14, batch 100, train_loss[loss=2.83, ArTop10Accuracy=0.766, over 14226.00 frames. ], tot_loss[loss=2.772, ArTop10Accuracy=0.7751, over 4764.55 frames. ], batch size: 62, lr: 8.71e-03 +2024-08-06 12:07:16,607 INFO [trainer.py:765] (1/8) Epoch 14, batch 200, train_loss[loss=2.838, ArTop10Accuracy=0.7595, over 13674.00 frames. ], tot_loss[loss=2.762, ArTop10Accuracy=0.7773, over 7749.81 frames. ], batch size: 34, lr: 8.69e-03 +2024-08-06 12:08:44,317 INFO [trainer.py:765] (1/8) Epoch 14, batch 300, train_loss[loss=2.832, ArTop10Accuracy=0.7616, over 14316.00 frames. ], tot_loss[loss=2.762, ArTop10Accuracy=0.777, over 9372.28 frames. ], batch size: 44, lr: 8.66e-03 +2024-08-06 12:10:01,135 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.266e+02 1.374e+02 1.483e+02 6.480e+02, threshold=2.748e+02, percent-clipped=0.2 +2024-08-06 12:10:10,232 INFO [trainer.py:765] (1/8) Epoch 14, batch 400, train_loss[loss=2.79, ArTop10Accuracy=0.7714, over 10845.00 frames. ], tot_loss[loss=2.761, ArTop10Accuracy=0.7776, over 10295.62 frames. ], batch size: 15, lr: 8.64e-03 +2024-08-06 12:11:36,157 INFO [trainer.py:765] (1/8) Epoch 14, batch 500, train_loss[loss=2.754, ArTop10Accuracy=0.7758, over 11991.00 frames. ], tot_loss[loss=2.759, ArTop10Accuracy=0.778, over 10856.86 frames. ], batch size: 22, lr: 8.62e-03 +2024-08-06 12:13:06,000 INFO [trainer.py:765] (1/8) Epoch 14, batch 600, train_loss[loss=2.779, ArTop10Accuracy=0.7749, over 11514.00 frames. ], tot_loss[loss=2.762, ArTop10Accuracy=0.7771, over 11376.97 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 12:14:38,559 INFO [trainer.py:765] (1/8) Epoch 14, batch 700, train_loss[loss=2.725, ArTop10Accuracy=0.7884, over 9402.00 frames. ], tot_loss[loss=2.768, ArTop10Accuracy=0.776, over 11521.69 frames. ], batch size: 11, lr: 8.57e-03 +2024-08-06 12:15:58,076 INFO [trainer.py:765] (1/8) Epoch 14, batch 800, train_loss[loss=2.679, ArTop10Accuracy=0.795, over 9294.00 frames. ], tot_loss[loss=2.771, ArTop10Accuracy=0.7755, over 11635.91 frames. ], batch size: 11, lr: 8.55e-03 +2024-08-06 12:17:12,872 INFO [trainer.py:765] (1/8) Epoch 14, batch 900, train_loss[loss=2.759, ArTop10Accuracy=0.7819, over 13014.00 frames. ], tot_loss[loss=2.768, ArTop10Accuracy=0.7764, over 11670.90 frames. ], batch size: 27, lr: 8.52e-03 +2024-08-06 12:18:29,618 INFO [trainer.py:765] (1/8) Epoch 14, batch 1000, train_loss[loss=2.777, ArTop10Accuracy=0.7752, over 13023.00 frames. ], tot_loss[loss=2.771, ArTop10Accuracy=0.7758, over 11869.41 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 12:19:45,382 INFO [trainer.py:765] (1/8) Epoch 14, batch 1100, train_loss[loss=2.772, ArTop10Accuracy=0.7752, over 13365.00 frames. ], tot_loss[loss=2.781, ArTop10Accuracy=0.7738, over 11956.46 frames. ], batch size: 34, lr: 8.48e-03 +2024-08-06 12:20:59,284 INFO [trainer.py:765] (1/8) Epoch 14, batch 1200, train_loss[loss=2.913, ArTop10Accuracy=0.749, over 11832.00 frames. ], tot_loss[loss=2.779, ArTop10Accuracy=0.7742, over 11850.00 frames. ], batch size: 101, lr: 8.46e-03 +2024-08-06 12:21:57,643 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 12:23:51,968 INFO [trainer.py:765] (1/8) Epoch 15, batch 100, train_loss[loss=2.838, ArTop10Accuracy=0.7632, over 14451.00 frames. ], tot_loss[loss=2.759, ArTop10Accuracy=0.7777, over 4747.89 frames. ], batch size: 62, lr: 8.14e-03 +2024-08-06 12:24:00,605 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 12:24:10,290 INFO [trainer.py:811] (1/8) Epoch 15, validation: loss=2.819, ArTop10Accuracy=0.7675, over 1827537.00 frames. +2024-08-06 12:24:10,290 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 33972MB +2024-08-06 12:24:11,100 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.284e+02 1.371e+02 1.488e+02 4.667e+02, threshold=2.743e+02, percent-clipped=0.2 +2024-08-06 12:25:29,992 INFO [trainer.py:765] (1/8) Epoch 15, batch 200, train_loss[loss=2.771, ArTop10Accuracy=0.7773, over 13839.00 frames. ], tot_loss[loss=2.76, ArTop10Accuracy=0.7774, over 7753.74 frames. ], batch size: 35, lr: 8.12e-03 +2024-08-06 12:26:58,701 INFO [trainer.py:765] (1/8) Epoch 15, batch 300, train_loss[loss=2.794, ArTop10Accuracy=0.7749, over 13956.00 frames. ], tot_loss[loss=2.756, ArTop10Accuracy=0.7783, over 9373.07 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 12:28:28,538 INFO [trainer.py:765] (1/8) Epoch 15, batch 400, train_loss[loss=2.758, ArTop10Accuracy=0.777, over 10365.00 frames. ], tot_loss[loss=2.752, ArTop10Accuracy=0.7793, over 10266.45 frames. ], batch size: 14, lr: 8.07e-03 +2024-08-06 12:29:54,036 INFO [trainer.py:765] (1/8) Epoch 15, batch 500, train_loss[loss=2.73, ArTop10Accuracy=0.7861, over 12609.00 frames. ], tot_loss[loss=2.748, ArTop10Accuracy=0.7801, over 10847.41 frames. ], batch size: 23, lr: 8.05e-03 +2024-08-06 12:31:23,297 INFO [trainer.py:765] (1/8) Epoch 15, batch 600, train_loss[loss=2.736, ArTop10Accuracy=0.7852, over 11271.00 frames. ], tot_loss[loss=2.746, ArTop10Accuracy=0.7805, over 11369.05 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 12:32:53,180 INFO [trainer.py:765] (1/8) Epoch 15, batch 700, train_loss[loss=2.627, ArTop10Accuracy=0.8074, over 10299.00 frames. ], tot_loss[loss=2.752, ArTop10Accuracy=0.7793, over 11529.74 frames. ], batch size: 12, lr: 8.01e-03 +2024-08-06 12:34:18,260 INFO [trainer.py:765] (1/8) Epoch 15, batch 800, train_loss[loss=2.647, ArTop10Accuracy=0.8006, over 9471.00 frames. ], tot_loss[loss=2.759, ArTop10Accuracy=0.7781, over 11626.40 frames. ], batch size: 11, lr: 7.99e-03 +2024-08-06 12:35:34,733 INFO [trainer.py:765] (1/8) Epoch 15, batch 900, train_loss[loss=2.757, ArTop10Accuracy=0.7784, over 12828.00 frames. ], tot_loss[loss=2.753, ArTop10Accuracy=0.7794, over 11678.28 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 12:36:50,547 INFO [trainer.py:765] (1/8) Epoch 15, batch 1000, train_loss[loss=2.76, ArTop10Accuracy=0.7799, over 12825.00 frames. ], tot_loss[loss=2.76, ArTop10Accuracy=0.7781, over 11871.63 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 12:38:05,183 INFO [trainer.py:765] (1/8) Epoch 15, batch 1100, train_loss[loss=2.717, ArTop10Accuracy=0.785, over 13494.00 frames. ], tot_loss[loss=2.769, ArTop10Accuracy=0.7765, over 11950.15 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 12:38:12,847 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.293e+02 1.379e+02 1.467e+02 2.824e+02, threshold=2.759e+02, percent-clipped=0.1 +2024-08-06 12:39:18,795 INFO [trainer.py:765] (1/8) Epoch 15, batch 1200, train_loss[loss=2.892, ArTop10Accuracy=0.7571, over 12477.00 frames. ], tot_loss[loss=2.769, ArTop10Accuracy=0.7762, over 11866.65 frames. ], batch size: 101, lr: 7.91e-03 +2024-08-06 12:40:18,961 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 12:42:17,623 INFO [trainer.py:765] (1/8) Epoch 16, batch 100, train_loss[loss=2.815, ArTop10Accuracy=0.765, over 14853.00 frames. ], tot_loss[loss=2.743, ArTop10Accuracy=0.781, over 4763.41 frames. ], batch size: 62, lr: 7.63e-03 +2024-08-06 12:43:49,569 INFO [trainer.py:765] (1/8) Epoch 16, batch 200, train_loss[loss=2.731, ArTop10Accuracy=0.7824, over 13905.00 frames. ], tot_loss[loss=2.739, ArTop10Accuracy=0.7817, over 7738.07 frames. ], batch size: 35, lr: 7.61e-03 +2024-08-06 12:45:18,507 INFO [trainer.py:765] (1/8) Epoch 16, batch 300, train_loss[loss=2.766, ArTop10Accuracy=0.7766, over 14256.00 frames. ], tot_loss[loss=2.735, ArTop10Accuracy=0.7825, over 9366.92 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 12:46:45,212 INFO [trainer.py:765] (1/8) Epoch 16, batch 400, train_loss[loss=2.789, ArTop10Accuracy=0.7706, over 10821.00 frames. ], tot_loss[loss=2.733, ArTop10Accuracy=0.7829, over 10278.07 frames. ], batch size: 15, lr: 7.58e-03 +2024-08-06 12:48:16,316 INFO [trainer.py:765] (1/8) Epoch 16, batch 500, train_loss[loss=2.804, ArTop10Accuracy=0.771, over 12237.00 frames. ], tot_loss[loss=2.731, ArTop10Accuracy=0.7833, over 10829.44 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 12:49:46,648 INFO [trainer.py:765] (1/8) Epoch 16, batch 600, train_loss[loss=2.69, ArTop10Accuracy=0.794, over 11436.00 frames. ], tot_loss[loss=2.736, ArTop10Accuracy=0.7824, over 11359.23 frames. ], batch size: 18, lr: 7.54e-03 +2024-08-06 12:51:23,687 INFO [trainer.py:765] (1/8) Epoch 16, batch 700, train_loss[loss=2.575, ArTop10Accuracy=0.8169, over 10113.00 frames. ], tot_loss[loss=2.737, ArTop10Accuracy=0.7824, over 11499.13 frames. ], batch size: 12, lr: 7.52e-03 +2024-08-06 12:52:43,507 INFO [trainer.py:765] (1/8) Epoch 16, batch 800, train_loss[loss=2.739, ArTop10Accuracy=0.7811, over 9315.00 frames. ], tot_loss[loss=2.743, ArTop10Accuracy=0.7812, over 11618.42 frames. ], batch size: 11, lr: 7.51e-03 +2024-08-06 12:53:06,020 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 12:53:15,496 INFO [trainer.py:811] (1/8) Epoch 16, validation: loss=2.816, ArTop10Accuracy=0.7678, over 1827537.00 frames. +2024-08-06 12:53:15,496 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 33972MB +2024-08-06 12:53:16,191 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.112e+02 1.291e+02 1.391e+02 1.487e+02 3.459e+02, threshold=2.783e+02, percent-clipped=0.1 +2024-08-06 12:54:06,487 INFO [trainer.py:765] (1/8) Epoch 16, batch 900, train_loss[loss=2.656, ArTop10Accuracy=0.7962, over 12948.00 frames. ], tot_loss[loss=2.738, ArTop10Accuracy=0.7822, over 11658.62 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 12:55:19,797 INFO [trainer.py:765] (1/8) Epoch 16, batch 1000, train_loss[loss=2.713, ArTop10Accuracy=0.7918, over 12969.00 frames. ], tot_loss[loss=2.746, ArTop10Accuracy=0.7808, over 11870.63 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 12:56:33,168 INFO [trainer.py:765] (1/8) Epoch 16, batch 1100, train_loss[loss=2.718, ArTop10Accuracy=0.7869, over 13368.00 frames. ], tot_loss[loss=2.757, ArTop10Accuracy=0.7786, over 11929.12 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 12:57:48,491 INFO [trainer.py:765] (1/8) Epoch 16, batch 1200, train_loss[loss=2.885, ArTop10Accuracy=0.7546, over 12060.00 frames. ], tot_loss[loss=2.755, ArTop10Accuracy=0.7789, over 11851.65 frames. ], batch size: 101, lr: 7.44e-03 +2024-08-06 12:58:48,499 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 13:00:47,904 INFO [trainer.py:765] (1/8) Epoch 17, batch 100, train_loss[loss=2.817, ArTop10Accuracy=0.7671, over 14796.00 frames. ], tot_loss[loss=2.734, ArTop10Accuracy=0.7833, over 4759.58 frames. ], batch size: 62, lr: 7.18e-03 +2024-08-06 13:02:19,308 INFO [trainer.py:765] (1/8) Epoch 17, batch 200, train_loss[loss=2.825, ArTop10Accuracy=0.7651, over 13587.00 frames. ], tot_loss[loss=2.733, ArTop10Accuracy=0.7833, over 7743.12 frames. ], batch size: 34, lr: 7.17e-03 +2024-08-06 13:03:45,523 INFO [trainer.py:765] (1/8) Epoch 17, batch 300, train_loss[loss=2.757, ArTop10Accuracy=0.7768, over 14088.00 frames. ], tot_loss[loss=2.726, ArTop10Accuracy=0.7844, over 9362.71 frames. ], batch size: 44, lr: 7.15e-03 +2024-08-06 13:05:21,767 INFO [trainer.py:765] (1/8) Epoch 17, batch 400, train_loss[loss=2.723, ArTop10Accuracy=0.7856, over 10410.00 frames. ], tot_loss[loss=2.725, ArTop10Accuracy=0.7844, over 10281.41 frames. ], batch size: 14, lr: 7.14e-03 +2024-08-06 13:06:47,027 INFO [trainer.py:765] (1/8) Epoch 17, batch 500, train_loss[loss=2.711, ArTop10Accuracy=0.7892, over 12222.00 frames. ], tot_loss[loss=2.724, ArTop10Accuracy=0.7845, over 10840.39 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 13:07:39,882 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.140e+02 1.293e+02 1.386e+02 1.488e+02 3.253e+02, threshold=2.772e+02, percent-clipped=0.1 +2024-08-06 13:08:22,694 INFO [trainer.py:765] (1/8) Epoch 17, batch 600, train_loss[loss=2.759, ArTop10Accuracy=0.779, over 11331.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7837, over 11352.01 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 13:09:54,842 INFO [trainer.py:765] (1/8) Epoch 17, batch 700, train_loss[loss=2.614, ArTop10Accuracy=0.8014, over 10098.00 frames. ], tot_loss[loss=2.734, ArTop10Accuracy=0.7825, over 11507.09 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 13:11:19,487 INFO [trainer.py:765] (1/8) Epoch 17, batch 800, train_loss[loss=2.756, ArTop10Accuracy=0.7715, over 10443.00 frames. ], tot_loss[loss=2.739, ArTop10Accuracy=0.7816, over 11651.32 frames. ], batch size: 12, lr: 7.07e-03 +2024-08-06 13:12:35,676 INFO [trainer.py:765] (1/8) Epoch 17, batch 900, train_loss[loss=2.768, ArTop10Accuracy=0.7783, over 12885.00 frames. ], tot_loss[loss=2.733, ArTop10Accuracy=0.7827, over 11681.22 frames. ], batch size: 27, lr: 7.06e-03 +2024-08-06 13:13:53,068 INFO [trainer.py:765] (1/8) Epoch 17, batch 1000, train_loss[loss=2.674, ArTop10Accuracy=0.7989, over 12906.00 frames. ], tot_loss[loss=2.738, ArTop10Accuracy=0.782, over 11879.24 frames. ], batch size: 27, lr: 7.04e-03 +2024-08-06 13:15:08,490 INFO [trainer.py:765] (1/8) Epoch 17, batch 1100, train_loss[loss=2.717, ArTop10Accuracy=0.7841, over 13662.00 frames. ], tot_loss[loss=2.745, ArTop10Accuracy=0.7807, over 11971.73 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 13:16:22,394 INFO [trainer.py:765] (1/8) Epoch 17, batch 1200, train_loss[loss=2.911, ArTop10Accuracy=0.7513, over 11856.00 frames. ], tot_loss[loss=2.748, ArTop10Accuracy=0.7802, over 11884.24 frames. ], batch size: 103, lr: 7.01e-03 +2024-08-06 13:17:21,749 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 13:19:16,000 INFO [trainer.py:765] (1/8) Epoch 18, batch 100, train_loss[loss=2.769, ArTop10Accuracy=0.7795, over 14613.00 frames. ], tot_loss[loss=2.722, ArTop10Accuracy=0.7849, over 4769.80 frames. ], batch size: 63, lr: 6.78e-03 +2024-08-06 13:20:46,604 INFO [trainer.py:765] (1/8) Epoch 18, batch 200, train_loss[loss=2.692, ArTop10Accuracy=0.7902, over 13776.00 frames. ], tot_loss[loss=2.72, ArTop10Accuracy=0.7853, over 7773.05 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 13:21:55,111 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 13:22:04,751 INFO [trainer.py:811] (1/8) Epoch 18, validation: loss=2.817, ArTop10Accuracy=0.768, over 1827537.00 frames. +2024-08-06 13:22:04,752 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 33972MB +2024-08-06 13:22:05,479 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.323e+02 1.409e+02 1.514e+02 3.209e+02, threshold=2.818e+02, percent-clipped=0.1 +2024-08-06 13:22:26,587 INFO [trainer.py:765] (1/8) Epoch 18, batch 300, train_loss[loss=2.765, ArTop10Accuracy=0.7781, over 14328.00 frames. ], tot_loss[loss=2.717, ArTop10Accuracy=0.7859, over 9385.66 frames. ], batch size: 44, lr: 6.76e-03 +2024-08-06 13:23:57,935 INFO [trainer.py:765] (1/8) Epoch 18, batch 400, train_loss[loss=2.545, ArTop10Accuracy=0.8139, over 10287.00 frames. ], tot_loss[loss=2.717, ArTop10Accuracy=0.7857, over 10291.69 frames. ], batch size: 14, lr: 6.74e-03 +2024-08-06 13:25:34,019 INFO [trainer.py:765] (1/8) Epoch 18, batch 500, train_loss[loss=2.681, ArTop10Accuracy=0.7954, over 12234.00 frames. ], tot_loss[loss=2.713, ArTop10Accuracy=0.7866, over 10834.26 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 13:27:00,640 INFO [trainer.py:765] (1/8) Epoch 18, batch 600, train_loss[loss=2.674, ArTop10Accuracy=0.794, over 11292.00 frames. ], tot_loss[loss=2.717, ArTop10Accuracy=0.7857, over 11355.11 frames. ], batch size: 18, lr: 6.71e-03 +2024-08-06 13:28:33,588 INFO [trainer.py:765] (1/8) Epoch 18, batch 700, train_loss[loss=2.683, ArTop10Accuracy=0.7953, over 10089.00 frames. ], tot_loss[loss=2.721, ArTop10Accuracy=0.7849, over 11511.68 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 13:29:54,989 INFO [trainer.py:765] (1/8) Epoch 18, batch 800, train_loss[loss=2.677, ArTop10Accuracy=0.7978, over 10206.00 frames. ], tot_loss[loss=2.725, ArTop10Accuracy=0.7842, over 11652.90 frames. ], batch size: 12, lr: 6.68e-03 +2024-08-06 13:31:12,525 INFO [trainer.py:765] (1/8) Epoch 18, batch 900, train_loss[loss=2.734, ArTop10Accuracy=0.7881, over 12990.00 frames. ], tot_loss[loss=2.721, ArTop10Accuracy=0.7853, over 11691.99 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 13:32:26,557 INFO [trainer.py:765] (1/8) Epoch 18, batch 1000, train_loss[loss=2.804, ArTop10Accuracy=0.77, over 12660.00 frames. ], tot_loss[loss=2.727, ArTop10Accuracy=0.7841, over 11895.92 frames. ], batch size: 27, lr: 6.66e-03 +2024-08-06 13:33:41,503 INFO [trainer.py:765] (1/8) Epoch 18, batch 1100, train_loss[loss=2.752, ArTop10Accuracy=0.7808, over 13716.00 frames. ], tot_loss[loss=2.736, ArTop10Accuracy=0.7824, over 11960.66 frames. ], batch size: 35, lr: 6.64e-03 +2024-08-06 13:34:54,680 INFO [trainer.py:765] (1/8) Epoch 18, batch 1200, train_loss[loss=2.896, ArTop10Accuracy=0.753, over 12399.00 frames. ], tot_loss[loss=2.737, ArTop10Accuracy=0.7821, over 11856.69 frames. ], batch size: 101, lr: 6.63e-03 +2024-08-06 13:35:51,070 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.124e+02 1.340e+02 1.433e+02 1.533e+02 2.444e+02, threshold=2.867e+02, percent-clipped=0.0 +2024-08-06 13:35:54,948 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 13:37:48,630 INFO [trainer.py:765] (1/8) Epoch 19, batch 100, train_loss[loss=2.699, ArTop10Accuracy=0.7911, over 14262.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.7858, over 4758.72 frames. ], batch size: 63, lr: 6.43e-03 +2024-08-06 13:39:23,263 INFO [trainer.py:765] (1/8) Epoch 19, batch 200, train_loss[loss=2.776, ArTop10Accuracy=0.7724, over 13521.00 frames. ], tot_loss[loss=2.709, ArTop10Accuracy=0.7868, over 7762.49 frames. ], batch size: 34, lr: 6.41e-03 +2024-08-06 13:40:48,365 INFO [trainer.py:765] (1/8) Epoch 19, batch 300, train_loss[loss=2.801, ArTop10Accuracy=0.768, over 13980.00 frames. ], tot_loss[loss=2.712, ArTop10Accuracy=0.7867, over 9377.16 frames. ], batch size: 44, lr: 6.40e-03 +2024-08-06 13:42:21,073 INFO [trainer.py:765] (1/8) Epoch 19, batch 400, train_loss[loss=2.678, ArTop10Accuracy=0.7925, over 10728.00 frames. ], tot_loss[loss=2.705, ArTop10Accuracy=0.7881, over 10287.82 frames. ], batch size: 15, lr: 6.39e-03 +2024-08-06 13:43:44,961 INFO [trainer.py:765] (1/8) Epoch 19, batch 500, train_loss[loss=2.629, ArTop10Accuracy=0.8012, over 12252.00 frames. ], tot_loss[loss=2.701, ArTop10Accuracy=0.7885, over 10839.43 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 13:45:16,687 INFO [trainer.py:765] (1/8) Epoch 19, batch 600, train_loss[loss=2.795, ArTop10Accuracy=0.7709, over 11283.00 frames. ], tot_loss[loss=2.704, ArTop10Accuracy=0.7882, over 11371.27 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 13:46:48,328 INFO [trainer.py:765] (1/8) Epoch 19, batch 700, train_loss[loss=2.647, ArTop10Accuracy=0.7986, over 10188.00 frames. ], tot_loss[loss=2.712, ArTop10Accuracy=0.7867, over 11507.32 frames. ], batch size: 12, lr: 6.35e-03 +2024-08-06 13:48:11,890 INFO [trainer.py:765] (1/8) Epoch 19, batch 800, train_loss[loss=2.643, ArTop10Accuracy=0.8003, over 10218.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.7864, over 11619.30 frames. ], batch size: 12, lr: 6.34e-03 +2024-08-06 13:49:27,263 INFO [trainer.py:765] (1/8) Epoch 19, batch 900, train_loss[loss=2.758, ArTop10Accuracy=0.777, over 13005.00 frames. ], tot_loss[loss=2.712, ArTop10Accuracy=0.7869, over 11663.49 frames. ], batch size: 27, lr: 6.32e-03 +2024-08-06 13:50:40,658 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 13:50:50,535 INFO [trainer.py:811] (1/8) Epoch 19, validation: loss=2.818, ArTop10Accuracy=0.7679, over 1827537.00 frames. +2024-08-06 13:50:50,536 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 33972MB +2024-08-06 13:50:51,493 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.371e+02 1.455e+02 1.550e+02 3.697e+02, threshold=2.909e+02, percent-clipped=0.2 +2024-08-06 13:50:52,923 INFO [trainer.py:765] (1/8) Epoch 19, batch 1000, train_loss[loss=2.812, ArTop10Accuracy=0.7639, over 12567.00 frames. ], tot_loss[loss=2.718, ArTop10Accuracy=0.7857, over 11873.45 frames. ], batch size: 27, lr: 6.31e-03 +2024-08-06 13:52:08,273 INFO [trainer.py:765] (1/8) Epoch 19, batch 1100, train_loss[loss=2.733, ArTop10Accuracy=0.7851, over 13866.00 frames. ], tot_loss[loss=2.727, ArTop10Accuracy=0.7839, over 11972.86 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 13:53:22,319 INFO [trainer.py:765] (1/8) Epoch 19, batch 1200, train_loss[loss=2.845, ArTop10Accuracy=0.7572, over 12054.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7836, over 11871.82 frames. ], batch size: 101, lr: 6.28e-03 +2024-08-06 13:54:21,985 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 13:56:12,911 INFO [trainer.py:765] (1/8) Epoch 20, batch 100, train_loss[loss=2.773, ArTop10Accuracy=0.7772, over 14652.00 frames. ], tot_loss[loss=2.711, ArTop10Accuracy=0.7862, over 4761.24 frames. ], batch size: 62, lr: 6.10e-03 +2024-08-06 13:57:42,499 INFO [trainer.py:765] (1/8) Epoch 20, batch 200, train_loss[loss=2.695, ArTop10Accuracy=0.7874, over 13536.00 frames. ], tot_loss[loss=2.704, ArTop10Accuracy=0.7882, over 7748.16 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 13:59:15,436 INFO [trainer.py:765] (1/8) Epoch 20, batch 300, train_loss[loss=2.737, ArTop10Accuracy=0.779, over 14100.00 frames. ], tot_loss[loss=2.7, ArTop10Accuracy=0.7891, over 9381.75 frames. ], batch size: 44, lr: 6.08e-03 +2024-08-06 14:00:44,362 INFO [trainer.py:765] (1/8) Epoch 20, batch 400, train_loss[loss=2.589, ArTop10Accuracy=0.8097, over 10383.00 frames. ], tot_loss[loss=2.698, ArTop10Accuracy=0.7892, over 10302.26 frames. ], batch size: 14, lr: 6.07e-03 +2024-08-06 14:02:14,860 INFO [trainer.py:765] (1/8) Epoch 20, batch 500, train_loss[loss=2.766, ArTop10Accuracy=0.7745, over 12717.00 frames. ], tot_loss[loss=2.697, ArTop10Accuracy=0.7896, over 10858.59 frames. ], batch size: 23, lr: 6.06e-03 +2024-08-06 14:03:40,860 INFO [trainer.py:765] (1/8) Epoch 20, batch 600, train_loss[loss=2.662, ArTop10Accuracy=0.7981, over 11499.00 frames. ], tot_loss[loss=2.699, ArTop10Accuracy=0.7891, over 11367.87 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 14:05:13,872 INFO [trainer.py:765] (1/8) Epoch 20, batch 700, train_loss[loss=2.667, ArTop10Accuracy=0.7949, over 10287.00 frames. ], tot_loss[loss=2.706, ArTop10Accuracy=0.7879, over 11530.59 frames. ], batch size: 12, lr: 6.03e-03 +2024-08-06 14:05:30,795 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.365e+02 1.456e+02 1.550e+02 3.525e+02, threshold=2.913e+02, percent-clipped=0.1 +2024-08-06 14:06:34,515 INFO [trainer.py:765] (1/8) Epoch 20, batch 800, train_loss[loss=2.604, ArTop10Accuracy=0.8108, over 9273.00 frames. ], tot_loss[loss=2.713, ArTop10Accuracy=0.7865, over 11640.77 frames. ], batch size: 11, lr: 6.02e-03 +2024-08-06 14:07:50,950 INFO [trainer.py:765] (1/8) Epoch 20, batch 900, train_loss[loss=2.748, ArTop10Accuracy=0.7846, over 12843.00 frames. ], tot_loss[loss=2.704, ArTop10Accuracy=0.7883, over 11683.45 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 14:09:07,180 INFO [trainer.py:765] (1/8) Epoch 20, batch 1000, train_loss[loss=2.718, ArTop10Accuracy=0.7892, over 12714.00 frames. ], tot_loss[loss=2.706, ArTop10Accuracy=0.7881, over 11882.84 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 14:10:21,216 INFO [trainer.py:765] (1/8) Epoch 20, batch 1100, train_loss[loss=2.752, ArTop10Accuracy=0.7788, over 14235.00 frames. ], tot_loss[loss=2.712, ArTop10Accuracy=0.7871, over 11959.70 frames. ], batch size: 35, lr: 5.99e-03 +2024-08-06 14:11:37,820 INFO [trainer.py:765] (1/8) Epoch 20, batch 1200, train_loss[loss=2.856, ArTop10Accuracy=0.758, over 12105.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.7866, over 11876.99 frames. ], batch size: 101, lr: 5.98e-03 +2024-08-06 14:12:36,847 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 14:12:36,850 INFO [trainer.py:1069] (1/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-08-06-14-2 b/libritts-r/log/log-train-2024-08-06-08-06-14-2 new file mode 100644 index 0000000000000000000000000000000000000000..9393abdb10b36d538a7f25eed1f779cb25f5f675 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-06-14-2 @@ -0,0 +1,336 @@ +2024-08-06 08:06:14,316 INFO [trainer.py:870] (2/8) Training started +2024-08-06 08:06:14,317 INFO [trainer.py:889] (2/8) Device: cuda:2 +2024-08-06 08:06:14,317 INFO [trainer.py:890] (2/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:06:14,317 INFO [trainer.py:892] (2/8) About to create model +2024-08-06 08:06:15,078 INFO [trainer.py:899] (2/8) Number of model parameters: 367386628 +2024-08-06 08:06:16,215 INFO [trainer.py:914] (2/8) Using DDP +2024-08-06 08:06:19,151 INFO [datamodule.py:427] (2/8) About to get train cuts +2024-08-06 08:06:19,153 INFO [datamodule.py:434] (2/8) About to get dev cuts +2024-08-06 08:06:19,155 INFO [datamodule.py:292] (2/8) Disable SpecAugment +2024-08-06 08:06:19,155 INFO [datamodule.py:294] (2/8) About to create train dataset +2024-08-06 08:06:19,156 INFO [datamodule.py:323] (2/8) Using DynamicBucketingSampler +2024-08-06 08:06:19,769 INFO [datamodule.py:344] (2/8) About to create train dataloader +2024-08-06 08:06:19,770 INFO [datamodule.py:367] (2/8) About to create dev dataset +2024-08-06 08:06:20,096 INFO [datamodule.py:388] (2/8) About to create dev dataloader +2024-08-06 08:08:02,122 INFO [trainer.py:765] (2/8) Epoch 1, batch 100, train_loss[loss=4.363, ArTop10Accuracy=0.494, over 14232.00 frames. ], tot_loss[loss=5.052, ArTop10Accuracy=0.3739, over 4752.73 frames. ], batch size: 62, lr: 2.25e-02 +2024-08-06 08:09:28,827 INFO [trainer.py:765] (2/8) Epoch 1, batch 200, train_loss[loss=3.997, ArTop10Accuracy=0.554, over 13785.00 frames. ], tot_loss[loss=4.487, ArTop10Accuracy=0.4685, over 7750.12 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 08:10:52,428 INFO [trainer.py:765] (2/8) Epoch 1, batch 300, train_loss[loss=3.878, ArTop10Accuracy=0.5701, over 14358.00 frames. ], tot_loss[loss=4.217, ArTop10Accuracy=0.5127, over 9382.60 frames. ], batch size: 45, lr: 3.00e-02 +2024-08-06 08:12:12,699 INFO [trainer.py:765] (2/8) Epoch 1, batch 400, train_loss[loss=3.715, ArTop10Accuracy=0.6059, over 10305.00 frames. ], tot_loss[loss=4.027, ArTop10Accuracy=0.5453, over 10290.17 frames. ], batch size: 14, lr: 3.00e-02 +2024-08-06 08:13:40,050 INFO [trainer.py:765] (2/8) Epoch 1, batch 500, train_loss[loss=3.654, ArTop10Accuracy=0.6128, over 12216.00 frames. ], tot_loss[loss=3.879, ArTop10Accuracy=0.5711, over 10857.77 frames. ], batch size: 22, lr: 2.99e-02 +2024-08-06 08:15:00,245 INFO [trainer.py:765] (2/8) Epoch 1, batch 600, train_loss[loss=3.641, ArTop10Accuracy=0.6103, over 11523.00 frames. ], tot_loss[loss=3.768, ArTop10Accuracy=0.5906, over 11362.99 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 08:16:26,425 INFO [trainer.py:765] (2/8) Epoch 1, batch 700, train_loss[loss=3.477, ArTop10Accuracy=0.6423, over 10293.00 frames. ], tot_loss[loss=3.69, ArTop10Accuracy=0.6047, over 11525.39 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 08:17:43,020 INFO [trainer.py:765] (2/8) Epoch 1, batch 800, train_loss[loss=3.488, ArTop10Accuracy=0.6468, over 10005.00 frames. ], tot_loss[loss=3.627, ArTop10Accuracy=0.6161, over 11644.65 frames. ], batch size: 12, lr: 2.98e-02 +2024-08-06 08:18:56,151 INFO [trainer.py:765] (2/8) Epoch 1, batch 900, train_loss[loss=3.489, ArTop10Accuracy=0.6433, over 13104.00 frames. ], tot_loss[loss=3.569, ArTop10Accuracy=0.6268, over 11684.52 frames. ], batch size: 28, lr: 2.98e-02 +2024-08-06 08:20:12,863 INFO [trainer.py:765] (2/8) Epoch 1, batch 1000, train_loss[loss=3.458, ArTop10Accuracy=0.6483, over 12945.00 frames. ], tot_loss[loss=3.525, ArTop10Accuracy=0.6349, over 11858.22 frames. ], batch size: 27, lr: 2.97e-02 +2024-08-06 08:20:13,539 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 9.300e+01 1.871e+02 2.675e+02 4.030e+02 9.119e+03, threshold=5.351e+02, percent-clipped=0.0 +2024-08-06 08:21:29,155 INFO [trainer.py:765] (2/8) Epoch 1, batch 1100, train_loss[loss=3.455, ArTop10Accuracy=0.6509, over 13734.00 frames. ], tot_loss[loss=3.489, ArTop10Accuracy=0.6414, over 11951.02 frames. ], batch size: 34, lr: 2.96e-02 +2024-08-06 08:22:45,412 INFO [trainer.py:765] (2/8) Epoch 1, batch 1200, train_loss[loss=3.438, ArTop10Accuracy=0.6536, over 12531.00 frames. ], tot_loss[loss=3.463, ArTop10Accuracy=0.6462, over 11868.08 frames. ], batch size: 101, lr: 2.96e-02 +2024-08-06 08:23:45,264 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 08:25:36,238 INFO [trainer.py:765] (2/8) Epoch 2, batch 100, train_loss[loss=3.4, ArTop10Accuracy=0.6547, over 14385.00 frames. ], tot_loss[loss=3.411, ArTop10Accuracy=0.6553, over 4753.26 frames. ], batch size: 62, lr: 2.90e-02 +2024-08-06 08:26:58,956 INFO [trainer.py:765] (2/8) Epoch 2, batch 200, train_loss[loss=3.384, ArTop10Accuracy=0.6598, over 13692.00 frames. ], tot_loss[loss=3.387, ArTop10Accuracy=0.6599, over 7750.66 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 08:28:25,533 INFO [trainer.py:765] (2/8) Epoch 2, batch 300, train_loss[loss=3.337, ArTop10Accuracy=0.6695, over 14655.00 frames. ], tot_loss[loss=3.368, ArTop10Accuracy=0.6638, over 9401.76 frames. ], batch size: 45, lr: 2.89e-02 +2024-08-06 08:29:48,637 INFO [trainer.py:765] (2/8) Epoch 2, batch 400, train_loss[loss=3.286, ArTop10Accuracy=0.6797, over 10335.00 frames. ], tot_loss[loss=3.354, ArTop10Accuracy=0.6663, over 10286.49 frames. ], batch size: 14, lr: 2.88e-02 +2024-08-06 08:31:22,900 INFO [trainer.py:765] (2/8) Epoch 2, batch 500, train_loss[loss=3.357, ArTop10Accuracy=0.6681, over 12267.00 frames. ], tot_loss[loss=3.345, ArTop10Accuracy=0.6679, over 10860.51 frames. ], batch size: 22, lr: 2.87e-02 +2024-08-06 08:32:45,689 INFO [trainer.py:765] (2/8) Epoch 2, batch 600, train_loss[loss=3.376, ArTop10Accuracy=0.6645, over 11415.00 frames. ], tot_loss[loss=3.334, ArTop10Accuracy=0.6704, over 11391.79 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 08:34:13,581 INFO [trainer.py:765] (2/8) Epoch 2, batch 700, train_loss[loss=3.356, ArTop10Accuracy=0.6722, over 9474.00 frames. ], tot_loss[loss=3.329, ArTop10Accuracy=0.6711, over 11540.34 frames. ], batch size: 11, lr: 2.85e-02 +2024-08-06 08:34:31,173 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 08:34:40,887 INFO [trainer.py:811] (2/8) Epoch 2, validation: loss=3.277, ArTop10Accuracy=0.6803, over 1827537.00 frames. +2024-08-06 08:34:40,888 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33008MB +2024-08-06 08:34:41,700 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 7.953e+01 1.592e+02 2.200e+02 3.344e+02 2.949e+03, threshold=4.400e+02, percent-clipped=8.6 +2024-08-06 08:35:39,878 INFO [trainer.py:765] (2/8) Epoch 2, batch 800, train_loss[loss=3.26, ArTop10Accuracy=0.6839, over 10119.00 frames. ], tot_loss[loss=3.322, ArTop10Accuracy=0.6726, over 11672.83 frames. ], batch size: 12, lr: 2.84e-02 +2024-08-06 08:36:56,371 INFO [trainer.py:765] (2/8) Epoch 2, batch 900, train_loss[loss=3.232, ArTop10Accuracy=0.6876, over 13044.00 frames. ], tot_loss[loss=3.31, ArTop10Accuracy=0.6749, over 11707.63 frames. ], batch size: 27, lr: 2.83e-02 +2024-08-06 08:38:10,512 INFO [trainer.py:765] (2/8) Epoch 2, batch 1000, train_loss[loss=3.22, ArTop10Accuracy=0.6934, over 12762.00 frames. ], tot_loss[loss=3.302, ArTop10Accuracy=0.6761, over 11890.52 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 08:39:25,060 INFO [trainer.py:765] (2/8) Epoch 2, batch 1100, train_loss[loss=3.299, ArTop10Accuracy=0.6775, over 13686.00 frames. ], tot_loss[loss=3.296, ArTop10Accuracy=0.6772, over 11940.66 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 08:40:38,220 INFO [trainer.py:765] (2/8) Epoch 2, batch 1200, train_loss[loss=3.305, ArTop10Accuracy=0.6728, over 12009.00 frames. ], tot_loss[loss=3.285, ArTop10Accuracy=0.6794, over 11860.80 frames. ], batch size: 103, lr: 2.80e-02 +2024-08-06 08:41:38,236 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 08:43:36,650 INFO [trainer.py:765] (2/8) Epoch 3, batch 100, train_loss[loss=3.299, ArTop10Accuracy=0.6755, over 14610.00 frames. ], tot_loss[loss=3.25, ArTop10Accuracy=0.6855, over 4768.32 frames. ], batch size: 62, lr: 2.67e-02 +2024-08-06 08:45:10,499 INFO [trainer.py:765] (2/8) Epoch 3, batch 200, train_loss[loss=3.211, ArTop10Accuracy=0.6906, over 13572.00 frames. ], tot_loss[loss=3.222, ArTop10Accuracy=0.6906, over 7770.25 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 08:46:29,258 INFO [trainer.py:765] (2/8) Epoch 3, batch 300, train_loss[loss=3.181, ArTop10Accuracy=0.6992, over 14205.00 frames. ], tot_loss[loss=3.203, ArTop10Accuracy=0.6945, over 9392.67 frames. ], batch size: 44, lr: 2.64e-02 +2024-08-06 08:48:04,217 INFO [trainer.py:765] (2/8) Epoch 3, batch 400, train_loss[loss=3.158, ArTop10Accuracy=0.7093, over 10824.00 frames. ], tot_loss[loss=3.188, ArTop10Accuracy=0.6974, over 10294.96 frames. ], batch size: 15, lr: 2.63e-02 +2024-08-06 08:48:40,880 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 9.282e+01 1.561e+02 1.981e+02 2.686e+02 1.768e+03, threshold=3.962e+02, percent-clipped=7.6 +2024-08-06 08:49:25,542 INFO [trainer.py:765] (2/8) Epoch 3, batch 500, train_loss[loss=3.09, ArTop10Accuracy=0.7159, over 12096.00 frames. ], tot_loss[loss=3.174, ArTop10Accuracy=0.7, over 10852.34 frames. ], batch size: 22, lr: 2.62e-02 +2024-08-06 08:51:00,477 INFO [trainer.py:765] (2/8) Epoch 3, batch 600, train_loss[loss=3.165, ArTop10Accuracy=0.7016, over 11343.00 frames. ], tot_loss[loss=3.157, ArTop10Accuracy=0.7035, over 11365.70 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 08:52:31,618 INFO [trainer.py:765] (2/8) Epoch 3, batch 700, train_loss[loss=3.123, ArTop10Accuracy=0.7134, over 9264.00 frames. ], tot_loss[loss=3.15, ArTop10Accuracy=0.7048, over 11520.92 frames. ], batch size: 11, lr: 2.60e-02 +2024-08-06 08:53:57,388 INFO [trainer.py:765] (2/8) Epoch 3, batch 800, train_loss[loss=3.136, ArTop10Accuracy=0.7107, over 9384.00 frames. ], tot_loss[loss=3.143, ArTop10Accuracy=0.706, over 11642.41 frames. ], batch size: 11, lr: 2.59e-02 +2024-08-06 08:55:15,118 INFO [trainer.py:765] (2/8) Epoch 3, batch 900, train_loss[loss=3.106, ArTop10Accuracy=0.7141, over 12834.00 frames. ], tot_loss[loss=3.12, ArTop10Accuracy=0.7105, over 11681.59 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 08:56:31,557 INFO [trainer.py:765] (2/8) Epoch 3, batch 1000, train_loss[loss=3.044, ArTop10Accuracy=0.7252, over 12933.00 frames. ], tot_loss[loss=3.113, ArTop10Accuracy=0.7119, over 11876.26 frames. ], batch size: 27, lr: 2.56e-02 +2024-08-06 08:57:46,505 INFO [trainer.py:765] (2/8) Epoch 3, batch 1100, train_loss[loss=3.112, ArTop10Accuracy=0.7162, over 13767.00 frames. ], tot_loss[loss=3.108, ArTop10Accuracy=0.7123, over 11954.94 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 08:59:01,398 INFO [trainer.py:765] (2/8) Epoch 3, batch 1200, train_loss[loss=3.103, ArTop10Accuracy=0.712, over 12045.00 frames. ], tot_loss[loss=3.097, ArTop10Accuracy=0.7148, over 11876.24 frames. ], batch size: 101, lr: 2.54e-02 +2024-08-06 09:00:01,918 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 09:01:50,740 INFO [trainer.py:765] (2/8) Epoch 4, batch 100, train_loss[loss=3.141, ArTop10Accuracy=0.7039, over 14811.00 frames. ], tot_loss[loss=3.07, ArTop10Accuracy=0.7194, over 4779.88 frames. ], batch size: 62, lr: 2.38e-02 +2024-08-06 09:02:52,859 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 09:03:02,383 INFO [trainer.py:811] (2/8) Epoch 4, validation: loss=2.997, ArTop10Accuracy=0.7338, over 1827537.00 frames. +2024-08-06 09:03:02,384 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33008MB +2024-08-06 09:03:03,362 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.499e+02 1.782e+02 2.273e+02 1.100e+03, threshold=3.565e+02, percent-clipped=4.7 +2024-08-06 09:03:29,271 INFO [trainer.py:765] (2/8) Epoch 4, batch 200, train_loss[loss=3.03, ArTop10Accuracy=0.7293, over 13884.00 frames. ], tot_loss[loss=3.046, ArTop10Accuracy=0.7239, over 7761.27 frames. ], batch size: 35, lr: 2.37e-02 +2024-08-06 09:05:01,732 INFO [trainer.py:765] (2/8) Epoch 4, batch 300, train_loss[loss=3.033, ArTop10Accuracy=0.7252, over 14427.00 frames. ], tot_loss[loss=3.04, ArTop10Accuracy=0.7251, over 9404.39 frames. ], batch size: 44, lr: 2.36e-02 +2024-08-06 09:06:28,149 INFO [trainer.py:765] (2/8) Epoch 4, batch 400, train_loss[loss=3.001, ArTop10Accuracy=0.7312, over 10125.00 frames. ], tot_loss[loss=3.034, ArTop10Accuracy=0.7263, over 10311.26 frames. ], batch size: 14, lr: 2.34e-02 +2024-08-06 09:08:01,925 INFO [trainer.py:765] (2/8) Epoch 4, batch 500, train_loss[loss=2.926, ArTop10Accuracy=0.7474, over 12684.00 frames. ], tot_loss[loss=3.029, ArTop10Accuracy=0.7274, over 10875.39 frames. ], batch size: 23, lr: 2.33e-02 +2024-08-06 09:09:28,540 INFO [trainer.py:765] (2/8) Epoch 4, batch 600, train_loss[loss=3.015, ArTop10Accuracy=0.7289, over 11412.00 frames. ], tot_loss[loss=3.027, ArTop10Accuracy=0.7277, over 11389.35 frames. ], batch size: 18, lr: 2.32e-02 +2024-08-06 09:10:59,865 INFO [trainer.py:765] (2/8) Epoch 4, batch 700, train_loss[loss=3.008, ArTop10Accuracy=0.7289, over 10233.00 frames. ], tot_loss[loss=3.025, ArTop10Accuracy=0.728, over 11523.83 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 09:12:17,513 INFO [trainer.py:765] (2/8) Epoch 4, batch 800, train_loss[loss=2.89, ArTop10Accuracy=0.7544, over 10230.00 frames. ], tot_loss[loss=3.026, ArTop10Accuracy=0.7279, over 11647.88 frames. ], batch size: 12, lr: 2.30e-02 +2024-08-06 09:13:33,212 INFO [trainer.py:765] (2/8) Epoch 4, batch 900, train_loss[loss=3.084, ArTop10Accuracy=0.7148, over 13032.00 frames. ], tot_loss[loss=3.014, ArTop10Accuracy=0.7303, over 11678.88 frames. ], batch size: 27, lr: 2.29e-02 +2024-08-06 09:14:47,520 INFO [trainer.py:765] (2/8) Epoch 4, batch 1000, train_loss[loss=2.916, ArTop10Accuracy=0.7481, over 12747.00 frames. ], tot_loss[loss=3.013, ArTop10Accuracy=0.7304, over 11858.96 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 09:16:02,981 INFO [trainer.py:765] (2/8) Epoch 4, batch 1100, train_loss[loss=2.947, ArTop10Accuracy=0.7418, over 13398.00 frames. ], tot_loss[loss=3.014, ArTop10Accuracy=0.7302, over 11919.73 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 09:16:53,292 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.440e+02 1.636e+02 1.968e+02 7.702e+02, threshold=3.273e+02, percent-clipped=1.3 +2024-08-06 09:17:18,345 INFO [trainer.py:765] (2/8) Epoch 4, batch 1200, train_loss[loss=3.05, ArTop10Accuracy=0.722, over 11808.00 frames. ], tot_loss[loss=3.009, ArTop10Accuracy=0.7312, over 11852.81 frames. ], batch size: 101, lr: 2.25e-02 +2024-08-06 09:18:17,314 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 09:20:17,171 INFO [trainer.py:765] (2/8) Epoch 5, batch 100, train_loss[loss=3.07, ArTop10Accuracy=0.7177, over 14298.00 frames. ], tot_loss[loss=2.987, ArTop10Accuracy=0.7348, over 4753.11 frames. ], batch size: 62, lr: 2.10e-02 +2024-08-06 09:21:52,293 INFO [trainer.py:765] (2/8) Epoch 5, batch 200, train_loss[loss=2.957, ArTop10Accuracy=0.7421, over 13812.00 frames. ], tot_loss[loss=2.98, ArTop10Accuracy=0.7365, over 7752.56 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 09:23:19,239 INFO [trainer.py:765] (2/8) Epoch 5, batch 300, train_loss[loss=3.04, ArTop10Accuracy=0.7233, over 14325.00 frames. ], tot_loss[loss=2.972, ArTop10Accuracy=0.7381, over 9367.98 frames. ], batch size: 44, lr: 2.08e-02 +2024-08-06 09:24:53,537 INFO [trainer.py:765] (2/8) Epoch 5, batch 400, train_loss[loss=2.909, ArTop10Accuracy=0.7522, over 10209.00 frames. ], tot_loss[loss=2.97, ArTop10Accuracy=0.7384, over 10284.90 frames. ], batch size: 14, lr: 2.07e-02 +2024-08-06 09:26:19,418 INFO [trainer.py:765] (2/8) Epoch 5, batch 500, train_loss[loss=2.91, ArTop10Accuracy=0.7482, over 12159.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7404, over 10847.24 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 09:27:49,537 INFO [trainer.py:765] (2/8) Epoch 5, batch 600, train_loss[loss=2.969, ArTop10Accuracy=0.7339, over 11391.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.74, over 11368.24 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 09:29:21,671 INFO [trainer.py:765] (2/8) Epoch 5, batch 700, train_loss[loss=2.931, ArTop10Accuracy=0.7457, over 10194.00 frames. ], tot_loss[loss=2.966, ArTop10Accuracy=0.7394, over 11518.22 frames. ], batch size: 12, lr: 2.04e-02 +2024-08-06 09:30:44,692 INFO [trainer.py:765] (2/8) Epoch 5, batch 800, train_loss[loss=2.974, ArTop10Accuracy=0.7366, over 9222.00 frames. ], tot_loss[loss=2.97, ArTop10Accuracy=0.7384, over 11625.17 frames. ], batch size: 11, lr: 2.03e-02 +2024-08-06 09:31:51,239 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 09:32:00,761 INFO [trainer.py:811] (2/8) Epoch 5, validation: loss=2.926, ArTop10Accuracy=0.7466, over 1827537.00 frames. +2024-08-06 09:32:00,762 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33008MB +2024-08-06 09:32:01,706 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.060e+02 1.349e+02 1.525e+02 1.806e+02 1.007e+03, threshold=3.049e+02, percent-clipped=2.3 +2024-08-06 09:32:10,550 INFO [trainer.py:765] (2/8) Epoch 5, batch 900, train_loss[loss=2.953, ArTop10Accuracy=0.7378, over 12915.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7405, over 11667.24 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 09:33:27,322 INFO [trainer.py:765] (2/8) Epoch 5, batch 1000, train_loss[loss=2.969, ArTop10Accuracy=0.7443, over 12915.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.7401, over 11855.31 frames. ], batch size: 27, lr: 2.01e-02 +2024-08-06 09:34:42,299 INFO [trainer.py:765] (2/8) Epoch 5, batch 1100, train_loss[loss=2.942, ArTop10Accuracy=0.7451, over 13662.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7403, over 11926.15 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 09:35:56,330 INFO [trainer.py:765] (2/8) Epoch 5, batch 1200, train_loss[loss=3.037, ArTop10Accuracy=0.7247, over 12591.00 frames. ], tot_loss[loss=2.96, ArTop10Accuracy=0.7405, over 11843.81 frames. ], batch size: 101, lr: 1.99e-02 +2024-08-06 09:36:54,933 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 09:38:52,662 INFO [trainer.py:765] (2/8) Epoch 6, batch 100, train_loss[loss=3.01, ArTop10Accuracy=0.729, over 14643.00 frames. ], tot_loss[loss=2.948, ArTop10Accuracy=0.7425, over 4757.65 frames. ], batch size: 62, lr: 1.85e-02 +2024-08-06 09:40:19,834 INFO [trainer.py:765] (2/8) Epoch 6, batch 200, train_loss[loss=2.891, ArTop10Accuracy=0.7547, over 13698.00 frames. ], tot_loss[loss=2.936, ArTop10Accuracy=0.7452, over 7750.83 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 09:41:52,964 INFO [trainer.py:765] (2/8) Epoch 6, batch 300, train_loss[loss=2.975, ArTop10Accuracy=0.7346, over 14988.00 frames. ], tot_loss[loss=2.934, ArTop10Accuracy=0.7453, over 9390.38 frames. ], batch size: 45, lr: 1.83e-02 +2024-08-06 09:43:17,827 INFO [trainer.py:765] (2/8) Epoch 6, batch 400, train_loss[loss=2.765, ArTop10Accuracy=0.7792, over 10242.00 frames. ], tot_loss[loss=2.931, ArTop10Accuracy=0.746, over 10311.50 frames. ], batch size: 14, lr: 1.83e-02 +2024-08-06 09:44:54,128 INFO [trainer.py:765] (2/8) Epoch 6, batch 500, train_loss[loss=2.926, ArTop10Accuracy=0.7468, over 12633.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.7472, over 10873.26 frames. ], batch size: 23, lr: 1.82e-02 +2024-08-06 09:46:22,873 INFO [trainer.py:765] (2/8) Epoch 6, batch 600, train_loss[loss=2.928, ArTop10Accuracy=0.7472, over 11355.00 frames. ], tot_loss[loss=2.924, ArTop10Accuracy=0.7471, over 11385.20 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 09:46:37,219 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.339e+02 1.480e+02 1.701e+02 7.506e+02, threshold=2.959e+02, percent-clipped=1.1 +2024-08-06 09:47:57,867 INFO [trainer.py:765] (2/8) Epoch 6, batch 700, train_loss[loss=2.878, ArTop10Accuracy=0.7607, over 10119.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.7469, over 11525.26 frames. ], batch size: 12, lr: 1.80e-02 +2024-08-06 09:49:15,954 INFO [trainer.py:765] (2/8) Epoch 6, batch 800, train_loss[loss=2.852, ArTop10Accuracy=0.7609, over 9819.00 frames. ], tot_loss[loss=2.928, ArTop10Accuracy=0.7466, over 11650.70 frames. ], batch size: 12, lr: 1.79e-02 +2024-08-06 09:50:32,135 INFO [trainer.py:765] (2/8) Epoch 6, batch 900, train_loss[loss=2.791, ArTop10Accuracy=0.78, over 13071.00 frames. ], tot_loss[loss=2.924, ArTop10Accuracy=0.7474, over 11693.48 frames. ], batch size: 27, lr: 1.78e-02 +2024-08-06 09:51:47,297 INFO [trainer.py:765] (2/8) Epoch 6, batch 1000, train_loss[loss=2.926, ArTop10Accuracy=0.7512, over 12903.00 frames. ], tot_loss[loss=2.926, ArTop10Accuracy=0.747, over 11887.13 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 09:53:00,921 INFO [trainer.py:765] (2/8) Epoch 6, batch 1100, train_loss[loss=2.984, ArTop10Accuracy=0.7382, over 13563.00 frames. ], tot_loss[loss=2.929, ArTop10Accuracy=0.7464, over 11928.49 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 09:54:14,336 INFO [trainer.py:765] (2/8) Epoch 6, batch 1200, train_loss[loss=3.008, ArTop10Accuracy=0.731, over 12219.00 frames. ], tot_loss[loss=2.926, ArTop10Accuracy=0.7471, over 11866.28 frames. ], batch size: 101, lr: 1.76e-02 +2024-08-06 09:55:13,368 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 09:57:06,699 INFO [trainer.py:765] (2/8) Epoch 7, batch 100, train_loss[loss=2.987, ArTop10Accuracy=0.7334, over 14478.00 frames. ], tot_loss[loss=2.916, ArTop10Accuracy=0.7479, over 4762.83 frames. ], batch size: 62, lr: 1.64e-02 +2024-08-06 09:58:39,425 INFO [trainer.py:765] (2/8) Epoch 7, batch 200, train_loss[loss=2.928, ArTop10Accuracy=0.7454, over 13437.00 frames. ], tot_loss[loss=2.902, ArTop10Accuracy=0.7509, over 7759.42 frames. ], batch size: 34, lr: 1.64e-02 +2024-08-06 10:00:06,083 INFO [trainer.py:765] (2/8) Epoch 7, batch 300, train_loss[loss=2.956, ArTop10Accuracy=0.7386, over 14145.00 frames. ], tot_loss[loss=2.9, ArTop10Accuracy=0.7512, over 9375.25 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 10:00:40,509 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 10:00:50,245 INFO [trainer.py:811] (2/8) Epoch 7, validation: loss=2.88, ArTop10Accuracy=0.7554, over 1827537.00 frames. +2024-08-06 10:00:50,246 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33008MB +2024-08-06 10:00:50,977 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.002e+02 1.286e+02 1.429e+02 1.605e+02 1.020e+03, threshold=2.857e+02, percent-clipped=1.5 +2024-08-06 10:01:49,116 INFO [trainer.py:765] (2/8) Epoch 7, batch 400, train_loss[loss=2.927, ArTop10Accuracy=0.7455, over 10119.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7518, over 10282.62 frames. ], batch size: 14, lr: 1.62e-02 +2024-08-06 10:03:21,457 INFO [trainer.py:765] (2/8) Epoch 7, batch 500, train_loss[loss=2.883, ArTop10Accuracy=0.7547, over 12735.00 frames. ], tot_loss[loss=2.892, ArTop10Accuracy=0.7532, over 10853.66 frames. ], batch size: 23, lr: 1.61e-02 +2024-08-06 10:04:51,883 INFO [trainer.py:765] (2/8) Epoch 7, batch 600, train_loss[loss=2.797, ArTop10Accuracy=0.7727, over 11964.00 frames. ], tot_loss[loss=2.891, ArTop10Accuracy=0.7535, over 11365.43 frames. ], batch size: 19, lr: 1.61e-02 +2024-08-06 10:06:25,110 INFO [trainer.py:765] (2/8) Epoch 7, batch 700, train_loss[loss=2.876, ArTop10Accuracy=0.7522, over 10353.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7524, over 11504.87 frames. ], batch size: 12, lr: 1.60e-02 +2024-08-06 10:07:46,949 INFO [trainer.py:765] (2/8) Epoch 7, batch 800, train_loss[loss=2.859, ArTop10Accuracy=0.7643, over 10203.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7524, over 11612.79 frames. ], batch size: 12, lr: 1.59e-02 +2024-08-06 10:09:02,821 INFO [trainer.py:765] (2/8) Epoch 7, batch 900, train_loss[loss=2.795, ArTop10Accuracy=0.7766, over 12912.00 frames. ], tot_loss[loss=2.892, ArTop10Accuracy=0.7533, over 11666.77 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 10:10:19,636 INFO [trainer.py:765] (2/8) Epoch 7, batch 1000, train_loss[loss=2.902, ArTop10Accuracy=0.7527, over 12813.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7529, over 11861.16 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 10:11:35,208 INFO [trainer.py:765] (2/8) Epoch 7, batch 1100, train_loss[loss=2.863, ArTop10Accuracy=0.7594, over 13686.00 frames. ], tot_loss[loss=2.9, ArTop10Accuracy=0.7517, over 11940.68 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 10:12:48,204 INFO [trainer.py:765] (2/8) Epoch 7, batch 1200, train_loss[loss=3.009, ArTop10Accuracy=0.7302, over 11538.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7522, over 11860.24 frames. ], batch size: 103, lr: 1.57e-02 +2024-08-06 10:13:46,381 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 10:15:03,601 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.017e+02 1.283e+02 1.410e+02 1.601e+02 1.017e+03, threshold=2.820e+02, percent-clipped=0.9 +2024-08-06 10:15:40,821 INFO [trainer.py:765] (2/8) Epoch 8, batch 100, train_loss[loss=2.902, ArTop10Accuracy=0.7498, over 14082.00 frames. ], tot_loss[loss=2.885, ArTop10Accuracy=0.7545, over 4743.10 frames. ], batch size: 62, lr: 1.47e-02 +2024-08-06 10:17:12,862 INFO [trainer.py:765] (2/8) Epoch 8, batch 200, train_loss[loss=2.889, ArTop10Accuracy=0.7539, over 13659.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7557, over 7747.35 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 10:18:37,898 INFO [trainer.py:765] (2/8) Epoch 8, batch 300, train_loss[loss=2.899, ArTop10Accuracy=0.7508, over 14328.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7569, over 9362.59 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 10:20:06,342 INFO [trainer.py:765] (2/8) Epoch 8, batch 400, train_loss[loss=2.801, ArTop10Accuracy=0.7699, over 10290.00 frames. ], tot_loss[loss=2.862, ArTop10Accuracy=0.7589, over 10271.46 frames. ], batch size: 14, lr: 1.45e-02 +2024-08-06 10:21:32,411 INFO [trainer.py:765] (2/8) Epoch 8, batch 500, train_loss[loss=2.819, ArTop10Accuracy=0.7623, over 12291.00 frames. ], tot_loss[loss=2.859, ArTop10Accuracy=0.7595, over 10811.25 frames. ], batch size: 22, lr: 1.45e-02 +2024-08-06 10:23:00,974 INFO [trainer.py:765] (2/8) Epoch 8, batch 600, train_loss[loss=2.875, ArTop10Accuracy=0.7605, over 11448.00 frames. ], tot_loss[loss=2.858, ArTop10Accuracy=0.7597, over 11342.02 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 10:24:37,788 INFO [trainer.py:765] (2/8) Epoch 8, batch 700, train_loss[loss=2.805, ArTop10Accuracy=0.7703, over 9375.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.7582, over 11499.48 frames. ], batch size: 11, lr: 1.43e-02 +2024-08-06 10:25:56,085 INFO [trainer.py:765] (2/8) Epoch 8, batch 800, train_loss[loss=2.802, ArTop10Accuracy=0.7732, over 9558.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7568, over 11628.09 frames. ], batch size: 11, lr: 1.43e-02 +2024-08-06 10:27:12,244 INFO [trainer.py:765] (2/8) Epoch 8, batch 900, train_loss[loss=2.901, ArTop10Accuracy=0.7464, over 13362.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7566, over 11687.71 frames. ], batch size: 28, lr: 1.42e-02 +2024-08-06 10:28:25,263 INFO [trainer.py:765] (2/8) Epoch 8, batch 1000, train_loss[loss=2.916, ArTop10Accuracy=0.7449, over 12993.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7559, over 11886.99 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 10:29:07,156 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 10:29:16,830 INFO [trainer.py:811] (2/8) Epoch 8, validation: loss=2.858, ArTop10Accuracy=0.7594, over 1827537.00 frames. +2024-08-06 10:29:16,831 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33008MB +2024-08-06 10:29:17,491 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.275e+02 1.390e+02 1.547e+02 3.717e+02, threshold=2.781e+02, percent-clipped=0.7 +2024-08-06 10:29:51,731 INFO [trainer.py:765] (2/8) Epoch 8, batch 1100, train_loss[loss=2.966, ArTop10Accuracy=0.7374, over 13548.00 frames. ], tot_loss[loss=2.881, ArTop10Accuracy=0.7551, over 11952.90 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 10:31:05,946 INFO [trainer.py:765] (2/8) Epoch 8, batch 1200, train_loss[loss=2.996, ArTop10Accuracy=0.7329, over 12714.00 frames. ], tot_loss[loss=2.878, ArTop10Accuracy=0.7555, over 11867.75 frames. ], batch size: 103, lr: 1.40e-02 +2024-08-06 10:32:05,631 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 10:34:01,257 INFO [trainer.py:765] (2/8) Epoch 9, batch 100, train_loss[loss=2.938, ArTop10Accuracy=0.7441, over 14586.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.7581, over 4765.68 frames. ], batch size: 62, lr: 1.32e-02 +2024-08-06 10:35:31,773 INFO [trainer.py:765] (2/8) Epoch 9, batch 200, train_loss[loss=2.9, ArTop10Accuracy=0.7475, over 13782.00 frames. ], tot_loss[loss=2.856, ArTop10Accuracy=0.7596, over 7752.54 frames. ], batch size: 34, lr: 1.32e-02 +2024-08-06 10:36:57,928 INFO [trainer.py:765] (2/8) Epoch 9, batch 300, train_loss[loss=2.8, ArTop10Accuracy=0.7712, over 14121.00 frames. ], tot_loss[loss=2.849, ArTop10Accuracy=0.7611, over 9397.10 frames. ], batch size: 44, lr: 1.31e-02 +2024-08-06 10:38:32,697 INFO [trainer.py:765] (2/8) Epoch 9, batch 400, train_loss[loss=2.738, ArTop10Accuracy=0.7828, over 10422.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7617, over 10292.77 frames. ], batch size: 14, lr: 1.31e-02 +2024-08-06 10:39:59,258 INFO [trainer.py:765] (2/8) Epoch 9, batch 500, train_loss[loss=2.854, ArTop10Accuracy=0.7616, over 12159.00 frames. ], tot_loss[loss=2.842, ArTop10Accuracy=0.7626, over 10855.51 frames. ], batch size: 22, lr: 1.30e-02 +2024-08-06 10:41:29,688 INFO [trainer.py:765] (2/8) Epoch 9, batch 600, train_loss[loss=2.898, ArTop10Accuracy=0.7517, over 11454.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7617, over 11360.35 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 10:42:58,441 INFO [trainer.py:765] (2/8) Epoch 9, batch 700, train_loss[loss=2.905, ArTop10Accuracy=0.7533, over 10239.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7615, over 11519.40 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 10:44:02,952 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.039e+02 1.253e+02 1.352e+02 1.493e+02 7.010e+02, threshold=2.704e+02, percent-clipped=0.6 +2024-08-06 10:44:19,670 INFO [trainer.py:765] (2/8) Epoch 9, batch 800, train_loss[loss=2.826, ArTop10Accuracy=0.765, over 10275.00 frames. ], tot_loss[loss=2.849, ArTop10Accuracy=0.7612, over 11632.47 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 10:45:35,719 INFO [trainer.py:765] (2/8) Epoch 9, batch 900, train_loss[loss=2.826, ArTop10Accuracy=0.7678, over 12942.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.7624, over 11680.07 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 10:46:51,272 INFO [trainer.py:765] (2/8) Epoch 9, batch 1000, train_loss[loss=2.815, ArTop10Accuracy=0.7701, over 12792.00 frames. ], tot_loss[loss=2.849, ArTop10Accuracy=0.7614, over 11876.81 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 10:48:06,247 INFO [trainer.py:765] (2/8) Epoch 9, batch 1100, train_loss[loss=2.844, ArTop10Accuracy=0.765, over 13515.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7605, over 11950.97 frames. ], batch size: 34, lr: 1.28e-02 +2024-08-06 10:49:21,053 INFO [trainer.py:765] (2/8) Epoch 9, batch 1200, train_loss[loss=2.933, ArTop10Accuracy=0.7475, over 12606.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7604, over 11852.02 frames. ], batch size: 101, lr: 1.27e-02 +2024-08-06 10:50:22,705 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 10:52:12,326 INFO [trainer.py:765] (2/8) Epoch 10, batch 100, train_loss[loss=2.848, ArTop10Accuracy=0.7638, over 14544.00 frames. ], tot_loss[loss=2.835, ArTop10Accuracy=0.7632, over 4772.41 frames. ], batch size: 62, lr: 1.20e-02 +2024-08-06 10:53:44,586 INFO [trainer.py:765] (2/8) Epoch 10, batch 200, train_loss[loss=2.894, ArTop10Accuracy=0.7551, over 13695.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7647, over 7750.75 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 10:55:08,090 INFO [trainer.py:765] (2/8) Epoch 10, batch 300, train_loss[loss=2.908, ArTop10Accuracy=0.7534, over 14178.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7653, over 9377.29 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 10:56:41,175 INFO [trainer.py:765] (2/8) Epoch 10, batch 400, train_loss[loss=2.698, ArTop10Accuracy=0.7918, over 10212.00 frames. ], tot_loss[loss=2.826, ArTop10Accuracy=0.7655, over 10290.51 frames. ], batch size: 14, lr: 1.19e-02 +2024-08-06 10:58:04,938 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 10:58:14,557 INFO [trainer.py:811] (2/8) Epoch 10, validation: loss=2.842, ArTop10Accuracy=0.7624, over 1827537.00 frames. +2024-08-06 10:58:14,558 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33008MB +2024-08-06 10:58:15,570 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.228e+02 1.320e+02 1.458e+02 6.096e+02, threshold=2.641e+02, percent-clipped=0.6 +2024-08-06 10:58:15,575 INFO [trainer.py:765] (2/8) Epoch 10, batch 500, train_loss[loss=2.761, ArTop10Accuracy=0.7821, over 12861.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7663, over 10857.40 frames. ], batch size: 23, lr: 1.19e-02 +2024-08-06 10:59:42,814 INFO [trainer.py:765] (2/8) Epoch 10, batch 600, train_loss[loss=2.725, ArTop10Accuracy=0.7842, over 11610.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7667, over 11367.33 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 11:01:18,107 INFO [trainer.py:765] (2/8) Epoch 10, batch 700, train_loss[loss=2.806, ArTop10Accuracy=0.7697, over 9408.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7654, over 11509.69 frames. ], batch size: 11, lr: 1.18e-02 +2024-08-06 11:02:36,917 INFO [trainer.py:765] (2/8) Epoch 10, batch 800, train_loss[loss=2.718, ArTop10Accuracy=0.7886, over 9372.00 frames. ], tot_loss[loss=2.832, ArTop10Accuracy=0.7645, over 11622.04 frames. ], batch size: 11, lr: 1.17e-02 +2024-08-06 11:03:51,212 INFO [trainer.py:765] (2/8) Epoch 10, batch 900, train_loss[loss=2.789, ArTop10Accuracy=0.7745, over 12852.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.7658, over 11675.50 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 11:05:06,352 INFO [trainer.py:765] (2/8) Epoch 10, batch 1000, train_loss[loss=2.797, ArTop10Accuracy=0.7696, over 13302.00 frames. ], tot_loss[loss=2.829, ArTop10Accuracy=0.7649, over 11874.18 frames. ], batch size: 28, lr: 1.17e-02 +2024-08-06 11:06:21,720 INFO [trainer.py:765] (2/8) Epoch 10, batch 1100, train_loss[loss=2.756, ArTop10Accuracy=0.7815, over 13491.00 frames. ], tot_loss[loss=2.832, ArTop10Accuracy=0.7642, over 11941.88 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 11:07:34,772 INFO [trainer.py:765] (2/8) Epoch 10, batch 1200, train_loss[loss=2.953, ArTop10Accuracy=0.7409, over 12528.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7635, over 11862.54 frames. ], batch size: 101, lr: 1.16e-02 +2024-08-06 11:08:33,717 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 11:10:29,954 INFO [trainer.py:765] (2/8) Epoch 11, batch 100, train_loss[loss=2.905, ArTop10Accuracy=0.7495, over 14376.00 frames. ], tot_loss[loss=2.823, ArTop10Accuracy=0.766, over 4753.26 frames. ], batch size: 62, lr: 1.10e-02 +2024-08-06 11:12:04,673 INFO [trainer.py:765] (2/8) Epoch 11, batch 200, train_loss[loss=2.831, ArTop10Accuracy=0.7636, over 13989.00 frames. ], tot_loss[loss=2.814, ArTop10Accuracy=0.7677, over 7754.04 frames. ], batch size: 35, lr: 1.10e-02 +2024-08-06 11:12:22,825 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 9.884e+01 1.240e+02 1.333e+02 1.457e+02 6.939e+02, threshold=2.667e+02, percent-clipped=0.1 +2024-08-06 11:13:31,545 INFO [trainer.py:765] (2/8) Epoch 11, batch 300, train_loss[loss=2.864, ArTop10Accuracy=0.7564, over 14175.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7691, over 9363.48 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 11:15:03,269 INFO [trainer.py:765] (2/8) Epoch 11, batch 400, train_loss[loss=2.861, ArTop10Accuracy=0.7604, over 10101.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7696, over 10275.08 frames. ], batch size: 14, lr: 1.09e-02 +2024-08-06 11:16:29,637 INFO [trainer.py:765] (2/8) Epoch 11, batch 500, train_loss[loss=2.773, ArTop10Accuracy=0.7726, over 12708.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7693, over 10826.48 frames. ], batch size: 23, lr: 1.09e-02 +2024-08-06 11:18:00,517 INFO [trainer.py:765] (2/8) Epoch 11, batch 600, train_loss[loss=2.764, ArTop10Accuracy=0.7791, over 11493.00 frames. ], tot_loss[loss=2.806, ArTop10Accuracy=0.7693, over 11350.63 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 11:19:34,511 INFO [trainer.py:765] (2/8) Epoch 11, batch 700, train_loss[loss=2.662, ArTop10Accuracy=0.7939, over 9405.00 frames. ], tot_loss[loss=2.809, ArTop10Accuracy=0.7687, over 11500.78 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 11:20:55,480 INFO [trainer.py:765] (2/8) Epoch 11, batch 800, train_loss[loss=2.791, ArTop10Accuracy=0.7738, over 10173.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.7684, over 11644.67 frames. ], batch size: 12, lr: 1.07e-02 +2024-08-06 11:22:13,705 INFO [trainer.py:765] (2/8) Epoch 11, batch 900, train_loss[loss=2.791, ArTop10Accuracy=0.7762, over 13308.00 frames. ], tot_loss[loss=2.81, ArTop10Accuracy=0.7689, over 11696.54 frames. ], batch size: 28, lr: 1.07e-02 +2024-08-06 11:23:31,799 INFO [trainer.py:765] (2/8) Epoch 11, batch 1000, train_loss[loss=2.792, ArTop10Accuracy=0.766, over 12885.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.7682, over 11886.02 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 11:24:46,902 INFO [trainer.py:765] (2/8) Epoch 11, batch 1100, train_loss[loss=2.821, ArTop10Accuracy=0.7629, over 13929.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7664, over 11966.48 frames. ], batch size: 35, lr: 1.06e-02 +2024-08-06 11:26:00,733 INFO [trainer.py:765] (2/8) Epoch 11, batch 1200, train_loss[loss=2.978, ArTop10Accuracy=0.7354, over 11682.00 frames. ], tot_loss[loss=2.819, ArTop10Accuracy=0.7667, over 11862.92 frames. ], batch size: 101, lr: 1.06e-02 +2024-08-06 11:26:15,847 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 11:26:25,556 INFO [trainer.py:811] (2/8) Epoch 11, validation: loss=2.831, ArTop10Accuracy=0.7643, over 1827537.00 frames. +2024-08-06 11:26:25,557 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33008MB +2024-08-06 11:26:26,185 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.251e+02 1.335e+02 1.441e+02 2.942e+02, threshold=2.669e+02, percent-clipped=0.1 +2024-08-06 11:27:09,617 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 11:29:03,450 INFO [trainer.py:765] (2/8) Epoch 12, batch 100, train_loss[loss=2.878, ArTop10Accuracy=0.755, over 14742.00 frames. ], tot_loss[loss=2.809, ArTop10Accuracy=0.7682, over 4789.07 frames. ], batch size: 62, lr: 1.01e-02 +2024-08-06 11:30:30,672 INFO [trainer.py:765] (2/8) Epoch 12, batch 200, train_loss[loss=2.788, ArTop10Accuracy=0.7703, over 13956.00 frames. ], tot_loss[loss=2.802, ArTop10Accuracy=0.7696, over 7759.42 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 11:31:57,655 INFO [trainer.py:765] (2/8) Epoch 12, batch 300, train_loss[loss=2.867, ArTop10Accuracy=0.7579, over 14355.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7703, over 9398.69 frames. ], batch size: 45, lr: 1.01e-02 +2024-08-06 11:33:30,738 INFO [trainer.py:765] (2/8) Epoch 12, batch 400, train_loss[loss=2.621, ArTop10Accuracy=0.8039, over 10920.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7713, over 10284.91 frames. ], batch size: 15, lr: 1.00e-02 +2024-08-06 11:34:55,731 INFO [trainer.py:765] (2/8) Epoch 12, batch 500, train_loss[loss=2.757, ArTop10Accuracy=0.7775, over 12078.00 frames. ], tot_loss[loss=2.789, ArTop10Accuracy=0.7725, over 10826.63 frames. ], batch size: 22, lr: 1.00e-02 +2024-08-06 11:36:29,362 INFO [trainer.py:765] (2/8) Epoch 12, batch 600, train_loss[loss=2.762, ArTop10Accuracy=0.7754, over 11370.00 frames. ], tot_loss[loss=2.79, ArTop10Accuracy=0.7722, over 11358.90 frames. ], batch size: 18, lr: 9.97e-03 +2024-08-06 11:38:00,343 INFO [trainer.py:765] (2/8) Epoch 12, batch 700, train_loss[loss=2.66, ArTop10Accuracy=0.7981, over 10167.00 frames. ], tot_loss[loss=2.794, ArTop10Accuracy=0.7716, over 11498.68 frames. ], batch size: 12, lr: 9.93e-03 +2024-08-06 11:39:23,610 INFO [trainer.py:765] (2/8) Epoch 12, batch 800, train_loss[loss=2.768, ArTop10Accuracy=0.7754, over 10056.00 frames. ], tot_loss[loss=2.796, ArTop10Accuracy=0.7714, over 11621.85 frames. ], batch size: 12, lr: 9.90e-03 +2024-08-06 11:40:39,889 INFO [trainer.py:765] (2/8) Epoch 12, batch 900, train_loss[loss=2.752, ArTop10Accuracy=0.7747, over 13314.00 frames. ], tot_loss[loss=2.794, ArTop10Accuracy=0.7718, over 11684.35 frames. ], batch size: 28, lr: 9.87e-03 +2024-08-06 11:41:13,993 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.041e+02 1.248e+02 1.348e+02 1.459e+02 5.540e+02, threshold=2.695e+02, percent-clipped=0.3 +2024-08-06 11:41:56,189 INFO [trainer.py:765] (2/8) Epoch 12, batch 1000, train_loss[loss=2.823, ArTop10Accuracy=0.7637, over 13404.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7708, over 11891.06 frames. ], batch size: 28, lr: 9.85e-03 +2024-08-06 11:43:14,320 INFO [trainer.py:765] (2/8) Epoch 12, batch 1100, train_loss[loss=2.808, ArTop10Accuracy=0.7702, over 13656.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7695, over 11941.91 frames. ], batch size: 34, lr: 9.82e-03 +2024-08-06 11:44:26,155 INFO [trainer.py:765] (2/8) Epoch 12, batch 1200, train_loss[loss=2.945, ArTop10Accuracy=0.7388, over 12102.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7694, over 11857.94 frames. ], batch size: 101, lr: 9.79e-03 +2024-08-06 11:45:26,863 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 11:47:26,600 INFO [trainer.py:765] (2/8) Epoch 13, batch 100, train_loss[loss=2.827, ArTop10Accuracy=0.7635, over 14637.00 frames. ], tot_loss[loss=2.793, ArTop10Accuracy=0.7713, over 4744.99 frames. ], batch size: 62, lr: 9.37e-03 +2024-08-06 11:48:54,778 INFO [trainer.py:765] (2/8) Epoch 13, batch 200, train_loss[loss=2.797, ArTop10Accuracy=0.7655, over 13446.00 frames. ], tot_loss[loss=2.785, ArTop10Accuracy=0.7728, over 7731.58 frames. ], batch size: 34, lr: 9.34e-03 +2024-08-06 11:50:20,515 INFO [trainer.py:765] (2/8) Epoch 13, batch 300, train_loss[loss=2.8, ArTop10Accuracy=0.7724, over 14124.00 frames. ], tot_loss[loss=2.782, ArTop10Accuracy=0.7733, over 9354.51 frames. ], batch size: 44, lr: 9.31e-03 +2024-08-06 11:51:48,764 INFO [trainer.py:765] (2/8) Epoch 13, batch 400, train_loss[loss=2.705, ArTop10Accuracy=0.7874, over 10371.00 frames. ], tot_loss[loss=2.777, ArTop10Accuracy=0.7745, over 10278.47 frames. ], batch size: 14, lr: 9.28e-03 +2024-08-06 11:53:13,405 INFO [trainer.py:765] (2/8) Epoch 13, batch 500, train_loss[loss=2.679, ArTop10Accuracy=0.7927, over 12141.00 frames. ], tot_loss[loss=2.772, ArTop10Accuracy=0.7756, over 10843.83 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 11:54:52,223 INFO [trainer.py:765] (2/8) Epoch 13, batch 600, train_loss[loss=2.73, ArTop10Accuracy=0.7851, over 11412.00 frames. ], tot_loss[loss=2.774, ArTop10Accuracy=0.7752, over 11366.55 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 11:55:47,081 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 11:55:56,835 INFO [trainer.py:811] (2/8) Epoch 13, validation: loss=2.824, ArTop10Accuracy=0.7662, over 1827537.00 frames. +2024-08-06 11:55:56,835 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33008MB +2024-08-06 11:55:57,711 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.064e+02 1.255e+02 1.343e+02 1.452e+02 4.888e+02, threshold=2.687e+02, percent-clipped=0.1 +2024-08-06 11:56:28,465 INFO [trainer.py:765] (2/8) Epoch 13, batch 700, train_loss[loss=2.789, ArTop10Accuracy=0.7731, over 10020.00 frames. ], tot_loss[loss=2.78, ArTop10Accuracy=0.7741, over 11510.09 frames. ], batch size: 12, lr: 9.20e-03 +2024-08-06 11:57:46,683 INFO [trainer.py:765] (2/8) Epoch 13, batch 800, train_loss[loss=2.646, ArTop10Accuracy=0.8021, over 10278.00 frames. ], tot_loss[loss=2.783, ArTop10Accuracy=0.7736, over 11638.26 frames. ], batch size: 12, lr: 9.18e-03 +2024-08-06 11:59:03,284 INFO [trainer.py:765] (2/8) Epoch 13, batch 900, train_loss[loss=2.749, ArTop10Accuracy=0.7827, over 12804.00 frames. ], tot_loss[loss=2.778, ArTop10Accuracy=0.7745, over 11690.54 frames. ], batch size: 27, lr: 9.15e-03 +2024-08-06 12:00:19,175 INFO [trainer.py:765] (2/8) Epoch 13, batch 1000, train_loss[loss=2.778, ArTop10Accuracy=0.7769, over 12894.00 frames. ], tot_loss[loss=2.785, ArTop10Accuracy=0.7733, over 11902.64 frames. ], batch size: 27, lr: 9.13e-03 +2024-08-06 12:01:34,879 INFO [trainer.py:765] (2/8) Epoch 13, batch 1100, train_loss[loss=2.878, ArTop10Accuracy=0.7554, over 13428.00 frames. ], tot_loss[loss=2.794, ArTop10Accuracy=0.7717, over 11968.13 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 12:02:48,662 INFO [trainer.py:765] (2/8) Epoch 13, batch 1200, train_loss[loss=2.927, ArTop10Accuracy=0.7453, over 12432.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7716, over 11888.02 frames. ], batch size: 101, lr: 9.08e-03 +2024-08-06 12:03:48,159 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 12:05:45,332 INFO [trainer.py:765] (2/8) Epoch 14, batch 100, train_loss[loss=2.837, ArTop10Accuracy=0.7623, over 14376.00 frames. ], tot_loss[loss=2.777, ArTop10Accuracy=0.7738, over 4767.97 frames. ], batch size: 62, lr: 8.71e-03 +2024-08-06 12:07:16,602 INFO [trainer.py:765] (2/8) Epoch 14, batch 200, train_loss[loss=2.785, ArTop10Accuracy=0.7733, over 13857.00 frames. ], tot_loss[loss=2.769, ArTop10Accuracy=0.7755, over 7753.86 frames. ], batch size: 34, lr: 8.69e-03 +2024-08-06 12:08:44,311 INFO [trainer.py:765] (2/8) Epoch 14, batch 300, train_loss[loss=2.779, ArTop10Accuracy=0.772, over 14271.00 frames. ], tot_loss[loss=2.764, ArTop10Accuracy=0.7764, over 9384.04 frames. ], batch size: 44, lr: 8.66e-03 +2024-08-06 12:10:01,129 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.266e+02 1.374e+02 1.483e+02 6.480e+02, threshold=2.748e+02, percent-clipped=0.2 +2024-08-06 12:10:10,227 INFO [trainer.py:765] (2/8) Epoch 14, batch 400, train_loss[loss=2.689, ArTop10Accuracy=0.7941, over 11049.00 frames. ], tot_loss[loss=2.764, ArTop10Accuracy=0.7769, over 10302.72 frames. ], batch size: 15, lr: 8.64e-03 +2024-08-06 12:11:36,151 INFO [trainer.py:765] (2/8) Epoch 14, batch 500, train_loss[loss=2.766, ArTop10Accuracy=0.7788, over 12171.00 frames. ], tot_loss[loss=2.759, ArTop10Accuracy=0.778, over 10839.36 frames. ], batch size: 22, lr: 8.62e-03 +2024-08-06 12:13:05,995 INFO [trainer.py:765] (2/8) Epoch 14, batch 600, train_loss[loss=2.759, ArTop10Accuracy=0.7802, over 11607.00 frames. ], tot_loss[loss=2.764, ArTop10Accuracy=0.7772, over 11352.46 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 12:14:38,554 INFO [trainer.py:765] (2/8) Epoch 14, batch 700, train_loss[loss=2.653, ArTop10Accuracy=0.7979, over 10074.00 frames. ], tot_loss[loss=2.767, ArTop10Accuracy=0.7766, over 11499.27 frames. ], batch size: 12, lr: 8.57e-03 +2024-08-06 12:15:58,072 INFO [trainer.py:765] (2/8) Epoch 14, batch 800, train_loss[loss=2.697, ArTop10Accuracy=0.7895, over 9354.00 frames. ], tot_loss[loss=2.771, ArTop10Accuracy=0.7758, over 11630.13 frames. ], batch size: 11, lr: 8.55e-03 +2024-08-06 12:17:12,866 INFO [trainer.py:765] (2/8) Epoch 14, batch 900, train_loss[loss=2.692, ArTop10Accuracy=0.7951, over 12969.00 frames. ], tot_loss[loss=2.767, ArTop10Accuracy=0.7764, over 11672.33 frames. ], batch size: 27, lr: 8.52e-03 +2024-08-06 12:18:29,612 INFO [trainer.py:765] (2/8) Epoch 14, batch 1000, train_loss[loss=2.764, ArTop10Accuracy=0.7829, over 12813.00 frames. ], tot_loss[loss=2.772, ArTop10Accuracy=0.7754, over 11880.22 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 12:19:45,377 INFO [trainer.py:765] (2/8) Epoch 14, batch 1100, train_loss[loss=2.763, ArTop10Accuracy=0.7783, over 13455.00 frames. ], tot_loss[loss=2.777, ArTop10Accuracy=0.7746, over 11943.84 frames. ], batch size: 34, lr: 8.48e-03 +2024-08-06 12:20:59,279 INFO [trainer.py:765] (2/8) Epoch 14, batch 1200, train_loss[loss=2.902, ArTop10Accuracy=0.7476, over 11772.00 frames. ], tot_loss[loss=2.78, ArTop10Accuracy=0.774, over 11847.85 frames. ], batch size: 101, lr: 8.46e-03 +2024-08-06 12:21:58,162 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 12:23:51,961 INFO [trainer.py:765] (2/8) Epoch 15, batch 100, train_loss[loss=2.847, ArTop10Accuracy=0.7598, over 14640.00 frames. ], tot_loss[loss=2.77, ArTop10Accuracy=0.7752, over 4775.25 frames. ], batch size: 62, lr: 8.14e-03 +2024-08-06 12:24:00,598 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 12:24:10,290 INFO [trainer.py:811] (2/8) Epoch 15, validation: loss=2.819, ArTop10Accuracy=0.7675, over 1827537.00 frames. +2024-08-06 12:24:10,291 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33008MB +2024-08-06 12:24:11,094 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.284e+02 1.371e+02 1.488e+02 4.667e+02, threshold=2.743e+02, percent-clipped=0.2 +2024-08-06 12:25:29,986 INFO [trainer.py:765] (2/8) Epoch 15, batch 200, train_loss[loss=2.764, ArTop10Accuracy=0.7793, over 13491.00 frames. ], tot_loss[loss=2.755, ArTop10Accuracy=0.7786, over 7767.90 frames. ], batch size: 34, lr: 8.12e-03 +2024-08-06 12:26:58,695 INFO [trainer.py:765] (2/8) Epoch 15, batch 300, train_loss[loss=2.755, ArTop10Accuracy=0.7769, over 14298.00 frames. ], tot_loss[loss=2.751, ArTop10Accuracy=0.7791, over 9413.88 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 12:28:28,533 INFO [trainer.py:765] (2/8) Epoch 15, batch 400, train_loss[loss=2.735, ArTop10Accuracy=0.7841, over 10920.00 frames. ], tot_loss[loss=2.75, ArTop10Accuracy=0.7793, over 10302.96 frames. ], batch size: 15, lr: 8.07e-03 +2024-08-06 12:29:54,031 INFO [trainer.py:765] (2/8) Epoch 15, batch 500, train_loss[loss=2.659, ArTop10Accuracy=0.7957, over 12117.00 frames. ], tot_loss[loss=2.746, ArTop10Accuracy=0.7804, over 10858.01 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 12:31:23,293 INFO [trainer.py:765] (2/8) Epoch 15, batch 600, train_loss[loss=2.7, ArTop10Accuracy=0.7888, over 11391.00 frames. ], tot_loss[loss=2.75, ArTop10Accuracy=0.7795, over 11375.98 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 12:32:53,174 INFO [trainer.py:765] (2/8) Epoch 15, batch 700, train_loss[loss=2.552, ArTop10Accuracy=0.8162, over 9945.00 frames. ], tot_loss[loss=2.755, ArTop10Accuracy=0.7785, over 11508.10 frames. ], batch size: 12, lr: 8.01e-03 +2024-08-06 12:34:18,254 INFO [trainer.py:765] (2/8) Epoch 15, batch 800, train_loss[loss=2.783, ArTop10Accuracy=0.773, over 10314.00 frames. ], tot_loss[loss=2.758, ArTop10Accuracy=0.7781, over 11629.59 frames. ], batch size: 12, lr: 7.99e-03 +2024-08-06 12:35:34,728 INFO [trainer.py:765] (2/8) Epoch 15, batch 900, train_loss[loss=2.818, ArTop10Accuracy=0.7685, over 12936.00 frames. ], tot_loss[loss=2.752, ArTop10Accuracy=0.7794, over 11669.12 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 12:36:50,541 INFO [trainer.py:765] (2/8) Epoch 15, batch 1000, train_loss[loss=2.774, ArTop10Accuracy=0.7705, over 12861.00 frames. ], tot_loss[loss=2.758, ArTop10Accuracy=0.7781, over 11870.57 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 12:38:05,177 INFO [trainer.py:765] (2/8) Epoch 15, batch 1100, train_loss[loss=2.838, ArTop10Accuracy=0.763, over 13584.00 frames. ], tot_loss[loss=2.765, ArTop10Accuracy=0.7767, over 11923.60 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 12:38:12,841 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.293e+02 1.379e+02 1.467e+02 2.824e+02, threshold=2.759e+02, percent-clipped=0.1 +2024-08-06 12:39:18,789 INFO [trainer.py:765] (2/8) Epoch 15, batch 1200, train_loss[loss=2.862, ArTop10Accuracy=0.7544, over 12441.00 frames. ], tot_loss[loss=2.764, ArTop10Accuracy=0.777, over 11837.70 frames. ], batch size: 101, lr: 7.91e-03 +2024-08-06 12:40:18,530 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 12:42:17,617 INFO [trainer.py:765] (2/8) Epoch 16, batch 100, train_loss[loss=2.803, ArTop10Accuracy=0.7697, over 14316.00 frames. ], tot_loss[loss=2.754, ArTop10Accuracy=0.7784, over 4759.65 frames. ], batch size: 63, lr: 7.63e-03 +2024-08-06 12:43:49,563 INFO [trainer.py:765] (2/8) Epoch 16, batch 200, train_loss[loss=2.635, ArTop10Accuracy=0.799, over 13557.00 frames. ], tot_loss[loss=2.748, ArTop10Accuracy=0.7797, over 7747.42 frames. ], batch size: 34, lr: 7.61e-03 +2024-08-06 12:45:18,502 INFO [trainer.py:765] (2/8) Epoch 16, batch 300, train_loss[loss=2.834, ArTop10Accuracy=0.7625, over 13779.00 frames. ], tot_loss[loss=2.741, ArTop10Accuracy=0.781, over 9373.27 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 12:46:45,207 INFO [trainer.py:765] (2/8) Epoch 16, batch 400, train_loss[loss=2.708, ArTop10Accuracy=0.7908, over 10155.00 frames. ], tot_loss[loss=2.735, ArTop10Accuracy=0.7825, over 10260.65 frames. ], batch size: 14, lr: 7.58e-03 +2024-08-06 12:48:16,310 INFO [trainer.py:765] (2/8) Epoch 16, batch 500, train_loss[loss=2.684, ArTop10Accuracy=0.7932, over 12183.00 frames. ], tot_loss[loss=2.731, ArTop10Accuracy=0.7832, over 10821.97 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 12:49:46,642 INFO [trainer.py:765] (2/8) Epoch 16, batch 600, train_loss[loss=2.672, ArTop10Accuracy=0.7957, over 11847.00 frames. ], tot_loss[loss=2.734, ArTop10Accuracy=0.7826, over 11354.91 frames. ], batch size: 19, lr: 7.54e-03 +2024-08-06 12:51:23,681 INFO [trainer.py:765] (2/8) Epoch 16, batch 700, train_loss[loss=2.805, ArTop10Accuracy=0.7707, over 9318.00 frames. ], tot_loss[loss=2.744, ArTop10Accuracy=0.7808, over 11503.01 frames. ], batch size: 11, lr: 7.52e-03 +2024-08-06 12:52:43,501 INFO [trainer.py:765] (2/8) Epoch 16, batch 800, train_loss[loss=2.669, ArTop10Accuracy=0.793, over 10242.00 frames. ], tot_loss[loss=2.752, ArTop10Accuracy=0.7792, over 11612.27 frames. ], batch size: 12, lr: 7.51e-03 +2024-08-06 12:53:06,014 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 12:53:15,496 INFO [trainer.py:811] (2/8) Epoch 16, validation: loss=2.816, ArTop10Accuracy=0.7678, over 1827537.00 frames. +2024-08-06 12:53:15,497 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33008MB +2024-08-06 12:53:16,186 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.112e+02 1.291e+02 1.391e+02 1.487e+02 3.459e+02, threshold=2.783e+02, percent-clipped=0.1 +2024-08-06 12:54:06,481 INFO [trainer.py:765] (2/8) Epoch 16, batch 900, train_loss[loss=2.798, ArTop10Accuracy=0.7704, over 12786.00 frames. ], tot_loss[loss=2.747, ArTop10Accuracy=0.78, over 11657.17 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 12:55:19,792 INFO [trainer.py:765] (2/8) Epoch 16, batch 1000, train_loss[loss=2.778, ArTop10Accuracy=0.7748, over 12894.00 frames. ], tot_loss[loss=2.751, ArTop10Accuracy=0.7795, over 11860.56 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 12:56:33,162 INFO [trainer.py:765] (2/8) Epoch 16, batch 1100, train_loss[loss=2.797, ArTop10Accuracy=0.7688, over 13869.00 frames. ], tot_loss[loss=2.758, ArTop10Accuracy=0.7783, over 11950.02 frames. ], batch size: 35, lr: 7.45e-03 +2024-08-06 12:57:48,485 INFO [trainer.py:765] (2/8) Epoch 16, batch 1200, train_loss[loss=2.856, ArTop10Accuracy=0.7589, over 12633.00 frames. ], tot_loss[loss=2.757, ArTop10Accuracy=0.7783, over 11848.50 frames. ], batch size: 103, lr: 7.44e-03 +2024-08-06 12:58:48,362 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 13:00:47,898 INFO [trainer.py:765] (2/8) Epoch 17, batch 100, train_loss[loss=2.763, ArTop10Accuracy=0.7751, over 14601.00 frames. ], tot_loss[loss=2.736, ArTop10Accuracy=0.7817, over 4750.76 frames. ], batch size: 63, lr: 7.18e-03 +2024-08-06 13:02:19,302 INFO [trainer.py:765] (2/8) Epoch 17, batch 200, train_loss[loss=2.645, ArTop10Accuracy=0.7961, over 13347.00 frames. ], tot_loss[loss=2.732, ArTop10Accuracy=0.7829, over 7753.52 frames. ], batch size: 34, lr: 7.17e-03 +2024-08-06 13:03:45,517 INFO [trainer.py:765] (2/8) Epoch 17, batch 300, train_loss[loss=2.766, ArTop10Accuracy=0.7765, over 14475.00 frames. ], tot_loss[loss=2.73, ArTop10Accuracy=0.7835, over 9368.92 frames. ], batch size: 45, lr: 7.15e-03 +2024-08-06 13:05:21,761 INFO [trainer.py:765] (2/8) Epoch 17, batch 400, train_loss[loss=2.698, ArTop10Accuracy=0.7876, over 10851.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7838, over 10285.68 frames. ], batch size: 15, lr: 7.14e-03 +2024-08-06 13:06:47,021 INFO [trainer.py:765] (2/8) Epoch 17, batch 500, train_loss[loss=2.613, ArTop10Accuracy=0.8077, over 12156.00 frames. ], tot_loss[loss=2.72, ArTop10Accuracy=0.7855, over 10849.34 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 13:07:39,876 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.140e+02 1.293e+02 1.386e+02 1.488e+02 3.253e+02, threshold=2.772e+02, percent-clipped=0.1 +2024-08-06 13:08:22,688 INFO [trainer.py:765] (2/8) Epoch 17, batch 600, train_loss[loss=2.731, ArTop10Accuracy=0.783, over 11442.00 frames. ], tot_loss[loss=2.726, ArTop10Accuracy=0.7841, over 11370.60 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 13:09:54,836 INFO [trainer.py:765] (2/8) Epoch 17, batch 700, train_loss[loss=2.633, ArTop10Accuracy=0.807, over 10050.00 frames. ], tot_loss[loss=2.732, ArTop10Accuracy=0.7831, over 11529.16 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 13:11:19,480 INFO [trainer.py:765] (2/8) Epoch 17, batch 800, train_loss[loss=2.641, ArTop10Accuracy=0.8067, over 9264.00 frames. ], tot_loss[loss=2.733, ArTop10Accuracy=0.7829, over 11639.94 frames. ], batch size: 11, lr: 7.07e-03 +2024-08-06 13:12:35,670 INFO [trainer.py:765] (2/8) Epoch 17, batch 900, train_loss[loss=2.787, ArTop10Accuracy=0.7766, over 13002.00 frames. ], tot_loss[loss=2.73, ArTop10Accuracy=0.7836, over 11693.45 frames. ], batch size: 27, lr: 7.06e-03 +2024-08-06 13:13:53,061 INFO [trainer.py:765] (2/8) Epoch 17, batch 1000, train_loss[loss=2.69, ArTop10Accuracy=0.7868, over 12726.00 frames. ], tot_loss[loss=2.739, ArTop10Accuracy=0.7817, over 11891.49 frames. ], batch size: 27, lr: 7.04e-03 +2024-08-06 13:15:08,484 INFO [trainer.py:765] (2/8) Epoch 17, batch 1100, train_loss[loss=2.742, ArTop10Accuracy=0.7813, over 13857.00 frames. ], tot_loss[loss=2.744, ArTop10Accuracy=0.7807, over 11966.62 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 13:16:22,388 INFO [trainer.py:765] (2/8) Epoch 17, batch 1200, train_loss[loss=2.858, ArTop10Accuracy=0.7581, over 12147.00 frames. ], tot_loss[loss=2.747, ArTop10Accuracy=0.7801, over 11878.52 frames. ], batch size: 103, lr: 7.01e-03 +2024-08-06 13:17:21,657 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 13:19:15,994 INFO [trainer.py:765] (2/8) Epoch 18, batch 100, train_loss[loss=2.798, ArTop10Accuracy=0.7686, over 14166.00 frames. ], tot_loss[loss=2.727, ArTop10Accuracy=0.7833, over 4761.06 frames. ], batch size: 62, lr: 6.78e-03 +2024-08-06 13:20:46,598 INFO [trainer.py:765] (2/8) Epoch 18, batch 200, train_loss[loss=2.716, ArTop10Accuracy=0.7841, over 13710.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7835, over 7760.09 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 13:21:55,105 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 13:22:04,751 INFO [trainer.py:811] (2/8) Epoch 18, validation: loss=2.817, ArTop10Accuracy=0.768, over 1827537.00 frames. +2024-08-06 13:22:04,752 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33008MB +2024-08-06 13:22:05,473 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.323e+02 1.409e+02 1.514e+02 3.209e+02, threshold=2.818e+02, percent-clipped=0.1 +2024-08-06 13:22:26,581 INFO [trainer.py:765] (2/8) Epoch 18, batch 300, train_loss[loss=2.701, ArTop10Accuracy=0.7888, over 14001.00 frames. ], tot_loss[loss=2.721, ArTop10Accuracy=0.7847, over 9379.73 frames. ], batch size: 44, lr: 6.76e-03 +2024-08-06 13:23:57,930 INFO [trainer.py:765] (2/8) Epoch 18, batch 400, train_loss[loss=2.628, ArTop10Accuracy=0.8027, over 10353.00 frames. ], tot_loss[loss=2.72, ArTop10Accuracy=0.785, over 10253.95 frames. ], batch size: 14, lr: 6.74e-03 +2024-08-06 13:25:34,014 INFO [trainer.py:765] (2/8) Epoch 18, batch 500, train_loss[loss=2.724, ArTop10Accuracy=0.7832, over 12105.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.7862, over 10832.76 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 13:27:00,634 INFO [trainer.py:765] (2/8) Epoch 18, batch 600, train_loss[loss=2.664, ArTop10Accuracy=0.7917, over 11499.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.7864, over 11359.21 frames. ], batch size: 18, lr: 6.71e-03 +2024-08-06 13:28:33,581 INFO [trainer.py:765] (2/8) Epoch 18, batch 700, train_loss[loss=2.679, ArTop10Accuracy=0.7905, over 10233.00 frames. ], tot_loss[loss=2.723, ArTop10Accuracy=0.7849, over 11509.88 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 13:29:54,984 INFO [trainer.py:765] (2/8) Epoch 18, batch 800, train_loss[loss=2.624, ArTop10Accuracy=0.8056, over 10095.00 frames. ], tot_loss[loss=2.727, ArTop10Accuracy=0.7841, over 11633.45 frames. ], batch size: 12, lr: 6.68e-03 +2024-08-06 13:31:12,519 INFO [trainer.py:765] (2/8) Epoch 18, batch 900, train_loss[loss=2.763, ArTop10Accuracy=0.7727, over 12981.00 frames. ], tot_loss[loss=2.724, ArTop10Accuracy=0.7846, over 11688.74 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 13:32:26,552 INFO [trainer.py:765] (2/8) Epoch 18, batch 1000, train_loss[loss=2.712, ArTop10Accuracy=0.7877, over 12930.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7838, over 11878.51 frames. ], batch size: 27, lr: 6.66e-03 +2024-08-06 13:33:41,497 INFO [trainer.py:765] (2/8) Epoch 18, batch 1100, train_loss[loss=2.725, ArTop10Accuracy=0.7836, over 13770.00 frames. ], tot_loss[loss=2.732, ArTop10Accuracy=0.7829, over 11933.41 frames. ], batch size: 34, lr: 6.64e-03 +2024-08-06 13:34:54,675 INFO [trainer.py:765] (2/8) Epoch 18, batch 1200, train_loss[loss=2.849, ArTop10Accuracy=0.7605, over 12456.00 frames. ], tot_loss[loss=2.736, ArTop10Accuracy=0.7823, over 11823.99 frames. ], batch size: 101, lr: 6.63e-03 +2024-08-06 13:35:51,064 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.124e+02 1.340e+02 1.433e+02 1.533e+02 2.444e+02, threshold=2.867e+02, percent-clipped=0.0 +2024-08-06 13:35:54,276 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 13:37:48,623 INFO [trainer.py:765] (2/8) Epoch 19, batch 100, train_loss[loss=2.829, ArTop10Accuracy=0.7638, over 14544.00 frames. ], tot_loss[loss=2.721, ArTop10Accuracy=0.7847, over 4759.09 frames. ], batch size: 63, lr: 6.43e-03 +2024-08-06 13:39:23,258 INFO [trainer.py:765] (2/8) Epoch 19, batch 200, train_loss[loss=2.664, ArTop10Accuracy=0.7977, over 13635.00 frames. ], tot_loss[loss=2.712, ArTop10Accuracy=0.7865, over 7744.99 frames. ], batch size: 34, lr: 6.41e-03 +2024-08-06 13:40:48,361 INFO [trainer.py:765] (2/8) Epoch 19, batch 300, train_loss[loss=2.759, ArTop10Accuracy=0.7783, over 14727.00 frames. ], tot_loss[loss=2.708, ArTop10Accuracy=0.7873, over 9362.71 frames. ], batch size: 45, lr: 6.40e-03 +2024-08-06 13:42:21,067 INFO [trainer.py:765] (2/8) Epoch 19, batch 400, train_loss[loss=2.777, ArTop10Accuracy=0.7715, over 10257.00 frames. ], tot_loss[loss=2.708, ArTop10Accuracy=0.7875, over 10289.48 frames. ], batch size: 14, lr: 6.39e-03 +2024-08-06 13:43:44,954 INFO [trainer.py:765] (2/8) Epoch 19, batch 500, train_loss[loss=2.716, ArTop10Accuracy=0.7888, over 12099.00 frames. ], tot_loss[loss=2.706, ArTop10Accuracy=0.7879, over 10829.04 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 13:45:16,681 INFO [trainer.py:765] (2/8) Epoch 19, batch 600, train_loss[loss=2.644, ArTop10Accuracy=0.7979, over 11373.00 frames. ], tot_loss[loss=2.708, ArTop10Accuracy=0.7875, over 11342.18 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 13:46:48,321 INFO [trainer.py:765] (2/8) Epoch 19, batch 700, train_loss[loss=2.622, ArTop10Accuracy=0.8048, over 9222.00 frames. ], tot_loss[loss=2.712, ArTop10Accuracy=0.7868, over 11489.32 frames. ], batch size: 11, lr: 6.35e-03 +2024-08-06 13:48:11,883 INFO [trainer.py:765] (2/8) Epoch 19, batch 800, train_loss[loss=2.682, ArTop10Accuracy=0.7907, over 9537.00 frames. ], tot_loss[loss=2.714, ArTop10Accuracy=0.7861, over 11598.77 frames. ], batch size: 11, lr: 6.34e-03 +2024-08-06 13:49:27,258 INFO [trainer.py:765] (2/8) Epoch 19, batch 900, train_loss[loss=2.691, ArTop10Accuracy=0.7925, over 13038.00 frames. ], tot_loss[loss=2.709, ArTop10Accuracy=0.7874, over 11650.51 frames. ], batch size: 28, lr: 6.32e-03 +2024-08-06 13:50:40,652 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 13:50:50,536 INFO [trainer.py:811] (2/8) Epoch 19, validation: loss=2.818, ArTop10Accuracy=0.7679, over 1827537.00 frames. +2024-08-06 13:50:50,536 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33008MB +2024-08-06 13:50:51,488 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.371e+02 1.455e+02 1.550e+02 3.697e+02, threshold=2.909e+02, percent-clipped=0.2 +2024-08-06 13:50:52,917 INFO [trainer.py:765] (2/8) Epoch 19, batch 1000, train_loss[loss=2.821, ArTop10Accuracy=0.7719, over 13188.00 frames. ], tot_loss[loss=2.718, ArTop10Accuracy=0.7857, over 11863.63 frames. ], batch size: 28, lr: 6.31e-03 +2024-08-06 13:52:08,266 INFO [trainer.py:765] (2/8) Epoch 19, batch 1100, train_loss[loss=2.794, ArTop10Accuracy=0.7726, over 13524.00 frames. ], tot_loss[loss=2.727, ArTop10Accuracy=0.7837, over 11957.72 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 13:53:22,313 INFO [trainer.py:765] (2/8) Epoch 19, batch 1200, train_loss[loss=2.808, ArTop10Accuracy=0.7734, over 12612.00 frames. ], tot_loss[loss=2.729, ArTop10Accuracy=0.7834, over 11877.42 frames. ], batch size: 101, lr: 6.28e-03 +2024-08-06 13:54:22,076 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 13:56:12,905 INFO [trainer.py:765] (2/8) Epoch 20, batch 100, train_loss[loss=2.786, ArTop10Accuracy=0.7693, over 14730.00 frames. ], tot_loss[loss=2.709, ArTop10Accuracy=0.7874, over 4769.30 frames. ], batch size: 63, lr: 6.10e-03 +2024-08-06 13:57:42,493 INFO [trainer.py:765] (2/8) Epoch 20, batch 200, train_loss[loss=2.748, ArTop10Accuracy=0.7818, over 13584.00 frames. ], tot_loss[loss=2.7, ArTop10Accuracy=0.789, over 7782.09 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 13:59:15,430 INFO [trainer.py:765] (2/8) Epoch 20, batch 300, train_loss[loss=2.791, ArTop10Accuracy=0.7734, over 14217.00 frames. ], tot_loss[loss=2.699, ArTop10Accuracy=0.7891, over 9382.25 frames. ], batch size: 44, lr: 6.08e-03 +2024-08-06 14:00:44,356 INFO [trainer.py:765] (2/8) Epoch 20, batch 400, train_loss[loss=2.715, ArTop10Accuracy=0.7895, over 10272.00 frames. ], tot_loss[loss=2.698, ArTop10Accuracy=0.7893, over 10264.93 frames. ], batch size: 14, lr: 6.07e-03 +2024-08-06 14:02:14,854 INFO [trainer.py:765] (2/8) Epoch 20, batch 500, train_loss[loss=2.714, ArTop10Accuracy=0.7842, over 12114.00 frames. ], tot_loss[loss=2.694, ArTop10Accuracy=0.79, over 10835.01 frames. ], batch size: 22, lr: 6.06e-03 +2024-08-06 14:03:40,853 INFO [trainer.py:765] (2/8) Epoch 20, batch 600, train_loss[loss=2.604, ArTop10Accuracy=0.8084, over 11556.00 frames. ], tot_loss[loss=2.7, ArTop10Accuracy=0.789, over 11368.42 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 14:05:13,864 INFO [trainer.py:765] (2/8) Epoch 20, batch 700, train_loss[loss=2.532, ArTop10Accuracy=0.8161, over 9213.00 frames. ], tot_loss[loss=2.704, ArTop10Accuracy=0.7882, over 11519.69 frames. ], batch size: 11, lr: 6.03e-03 +2024-08-06 14:05:30,789 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.365e+02 1.456e+02 1.550e+02 3.525e+02, threshold=2.913e+02, percent-clipped=0.1 +2024-08-06 14:06:34,508 INFO [trainer.py:765] (2/8) Epoch 20, batch 800, train_loss[loss=2.548, ArTop10Accuracy=0.8172, over 10098.00 frames. ], tot_loss[loss=2.707, ArTop10Accuracy=0.7874, over 11627.31 frames. ], batch size: 12, lr: 6.02e-03 +2024-08-06 14:07:50,944 INFO [trainer.py:765] (2/8) Epoch 20, batch 900, train_loss[loss=2.708, ArTop10Accuracy=0.7879, over 12912.00 frames. ], tot_loss[loss=2.703, ArTop10Accuracy=0.7883, over 11679.18 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 14:09:07,173 INFO [trainer.py:765] (2/8) Epoch 20, batch 1000, train_loss[loss=2.672, ArTop10Accuracy=0.7979, over 13386.00 frames. ], tot_loss[loss=2.706, ArTop10Accuracy=0.7883, over 11867.63 frames. ], batch size: 28, lr: 6.00e-03 +2024-08-06 14:10:21,210 INFO [trainer.py:765] (2/8) Epoch 20, batch 1100, train_loss[loss=2.742, ArTop10Accuracy=0.7785, over 13809.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.7864, over 11939.19 frames. ], batch size: 34, lr: 5.99e-03 +2024-08-06 14:11:37,813 INFO [trainer.py:765] (2/8) Epoch 20, batch 1200, train_loss[loss=2.899, ArTop10Accuracy=0.7505, over 12645.00 frames. ], tot_loss[loss=2.718, ArTop10Accuracy=0.7857, over 11855.77 frames. ], batch size: 101, lr: 5.98e-03 +2024-08-06 14:12:37,479 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 14:12:37,481 INFO [trainer.py:1069] (2/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-08-06-14-3 b/libritts-r/log/log-train-2024-08-06-08-06-14-3 new file mode 100644 index 0000000000000000000000000000000000000000..b60149e3732ce1565e89880b3cb3c356853755e9 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-06-14-3 @@ -0,0 +1,336 @@ +2024-08-06 08:06:14,317 INFO [trainer.py:870] (3/8) Training started +2024-08-06 08:06:14,318 INFO [trainer.py:889] (3/8) Device: cuda:3 +2024-08-06 08:06:14,318 INFO [trainer.py:890] (3/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:06:14,318 INFO [trainer.py:892] (3/8) About to create model +2024-08-06 08:06:15,086 INFO [trainer.py:899] (3/8) Number of model parameters: 367386628 +2024-08-06 08:06:16,728 INFO [trainer.py:914] (3/8) Using DDP +2024-08-06 08:06:19,151 INFO [datamodule.py:427] (3/8) About to get train cuts +2024-08-06 08:06:19,153 INFO [datamodule.py:434] (3/8) About to get dev cuts +2024-08-06 08:06:19,154 INFO [datamodule.py:292] (3/8) Disable SpecAugment +2024-08-06 08:06:19,154 INFO [datamodule.py:294] (3/8) About to create train dataset +2024-08-06 08:06:19,155 INFO [datamodule.py:323] (3/8) Using DynamicBucketingSampler +2024-08-06 08:06:19,758 INFO [datamodule.py:344] (3/8) About to create train dataloader +2024-08-06 08:06:19,758 INFO [datamodule.py:367] (3/8) About to create dev dataset +2024-08-06 08:06:20,081 INFO [datamodule.py:388] (3/8) About to create dev dataloader +2024-08-06 08:08:02,124 INFO [trainer.py:765] (3/8) Epoch 1, batch 100, train_loss[loss=4.321, ArTop10Accuracy=0.494, over 14253.00 frames. ], tot_loss[loss=5.055, ArTop10Accuracy=0.3723, over 4750.99 frames. ], batch size: 62, lr: 2.25e-02 +2024-08-06 08:09:28,832 INFO [trainer.py:765] (3/8) Epoch 1, batch 200, train_loss[loss=4.028, ArTop10Accuracy=0.5454, over 13785.00 frames. ], tot_loss[loss=4.489, ArTop10Accuracy=0.4677, over 7752.98 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 08:10:52,433 INFO [trainer.py:765] (3/8) Epoch 1, batch 300, train_loss[loss=3.866, ArTop10Accuracy=0.5741, over 14454.00 frames. ], tot_loss[loss=4.214, ArTop10Accuracy=0.5139, over 9356.97 frames. ], batch size: 44, lr: 3.00e-02 +2024-08-06 08:12:12,702 INFO [trainer.py:765] (3/8) Epoch 1, batch 400, train_loss[loss=3.711, ArTop10Accuracy=0.6045, over 10398.00 frames. ], tot_loss[loss=4.026, ArTop10Accuracy=0.5457, over 10273.01 frames. ], batch size: 14, lr: 3.00e-02 +2024-08-06 08:13:40,054 INFO [trainer.py:765] (3/8) Epoch 1, batch 500, train_loss[loss=3.637, ArTop10Accuracy=0.6122, over 12705.00 frames. ], tot_loss[loss=3.879, ArTop10Accuracy=0.5712, over 10836.94 frames. ], batch size: 23, lr: 2.99e-02 +2024-08-06 08:15:00,247 INFO [trainer.py:765] (3/8) Epoch 1, batch 600, train_loss[loss=3.602, ArTop10Accuracy=0.6212, over 11367.00 frames. ], tot_loss[loss=3.767, ArTop10Accuracy=0.5912, over 11362.36 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 08:16:26,429 INFO [trainer.py:765] (3/8) Epoch 1, batch 700, train_loss[loss=3.538, ArTop10Accuracy=0.633, over 10170.00 frames. ], tot_loss[loss=3.684, ArTop10Accuracy=0.6062, over 11494.64 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 08:17:43,022 INFO [trainer.py:765] (3/8) Epoch 1, batch 800, train_loss[loss=3.434, ArTop10Accuracy=0.6542, over 10245.00 frames. ], tot_loss[loss=3.624, ArTop10Accuracy=0.617, over 11656.86 frames. ], batch size: 12, lr: 2.98e-02 +2024-08-06 08:18:56,155 INFO [trainer.py:765] (3/8) Epoch 1, batch 900, train_loss[loss=3.534, ArTop10Accuracy=0.6336, over 12966.00 frames. ], tot_loss[loss=3.566, ArTop10Accuracy=0.6278, over 11705.99 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 08:20:12,867 INFO [trainer.py:765] (3/8) Epoch 1, batch 1000, train_loss[loss=3.492, ArTop10Accuracy=0.6432, over 12921.00 frames. ], tot_loss[loss=3.524, ArTop10Accuracy=0.6352, over 11908.83 frames. ], batch size: 27, lr: 2.97e-02 +2024-08-06 08:20:13,547 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 9.300e+01 1.871e+02 2.675e+02 4.030e+02 9.119e+03, threshold=5.351e+02, percent-clipped=0.0 +2024-08-06 08:21:29,161 INFO [trainer.py:765] (3/8) Epoch 1, batch 1100, train_loss[loss=3.45, ArTop10Accuracy=0.6492, over 13404.00 frames. ], tot_loss[loss=3.486, ArTop10Accuracy=0.6422, over 11968.58 frames. ], batch size: 34, lr: 2.96e-02 +2024-08-06 08:22:45,417 INFO [trainer.py:765] (3/8) Epoch 1, batch 1200, train_loss[loss=3.485, ArTop10Accuracy=0.6411, over 12429.00 frames. ], tot_loss[loss=3.457, ArTop10Accuracy=0.6477, over 11881.54 frames. ], batch size: 101, lr: 2.96e-02 +2024-08-06 08:23:45,270 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 08:25:36,244 INFO [trainer.py:765] (3/8) Epoch 2, batch 100, train_loss[loss=3.434, ArTop10Accuracy=0.6529, over 14478.00 frames. ], tot_loss[loss=3.425, ArTop10Accuracy=0.6525, over 4759.18 frames. ], batch size: 62, lr: 2.90e-02 +2024-08-06 08:26:58,962 INFO [trainer.py:765] (3/8) Epoch 2, batch 200, train_loss[loss=3.295, ArTop10Accuracy=0.6847, over 13569.00 frames. ], tot_loss[loss=3.39, ArTop10Accuracy=0.6593, over 7756.33 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 08:28:25,540 INFO [trainer.py:765] (3/8) Epoch 2, batch 300, train_loss[loss=3.367, ArTop10Accuracy=0.6621, over 14055.00 frames. ], tot_loss[loss=3.367, ArTop10Accuracy=0.6633, over 9383.94 frames. ], batch size: 44, lr: 2.89e-02 +2024-08-06 08:29:48,643 INFO [trainer.py:765] (3/8) Epoch 2, batch 400, train_loss[loss=3.229, ArTop10Accuracy=0.6936, over 10959.00 frames. ], tot_loss[loss=3.356, ArTop10Accuracy=0.6656, over 10302.25 frames. ], batch size: 15, lr: 2.88e-02 +2024-08-06 08:31:22,908 INFO [trainer.py:765] (3/8) Epoch 2, batch 500, train_loss[loss=3.37, ArTop10Accuracy=0.6632, over 12750.00 frames. ], tot_loss[loss=3.343, ArTop10Accuracy=0.668, over 10853.14 frames. ], batch size: 23, lr: 2.87e-02 +2024-08-06 08:32:45,694 INFO [trainer.py:765] (3/8) Epoch 2, batch 600, train_loss[loss=3.274, ArTop10Accuracy=0.684, over 11472.00 frames. ], tot_loss[loss=3.332, ArTop10Accuracy=0.6701, over 11378.49 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 08:34:13,589 INFO [trainer.py:765] (3/8) Epoch 2, batch 700, train_loss[loss=3.395, ArTop10Accuracy=0.6511, over 9318.00 frames. ], tot_loss[loss=3.328, ArTop10Accuracy=0.671, over 11516.79 frames. ], batch size: 11, lr: 2.85e-02 +2024-08-06 08:34:31,181 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 08:34:40,888 INFO [trainer.py:811] (3/8) Epoch 2, validation: loss=3.277, ArTop10Accuracy=0.6803, over 1827537.00 frames. +2024-08-06 08:34:40,889 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29540MB +2024-08-06 08:34:41,706 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 7.953e+01 1.592e+02 2.200e+02 3.344e+02 2.949e+03, threshold=4.400e+02, percent-clipped=8.6 +2024-08-06 08:35:39,883 INFO [trainer.py:765] (3/8) Epoch 2, batch 800, train_loss[loss=3.373, ArTop10Accuracy=0.6659, over 9150.00 frames. ], tot_loss[loss=3.325, ArTop10Accuracy=0.6717, over 11642.86 frames. ], batch size: 11, lr: 2.84e-02 +2024-08-06 08:36:56,377 INFO [trainer.py:765] (3/8) Epoch 2, batch 900, train_loss[loss=3.308, ArTop10Accuracy=0.6785, over 12996.00 frames. ], tot_loss[loss=3.31, ArTop10Accuracy=0.6747, over 11701.06 frames. ], batch size: 27, lr: 2.83e-02 +2024-08-06 08:38:10,517 INFO [trainer.py:765] (3/8) Epoch 2, batch 1000, train_loss[loss=3.3, ArTop10Accuracy=0.6792, over 12924.00 frames. ], tot_loss[loss=3.303, ArTop10Accuracy=0.6759, over 11883.62 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 08:39:25,066 INFO [trainer.py:765] (3/8) Epoch 2, batch 1100, train_loss[loss=3.255, ArTop10Accuracy=0.6854, over 13749.00 frames. ], tot_loss[loss=3.294, ArTop10Accuracy=0.6775, over 11961.15 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 08:40:38,226 INFO [trainer.py:765] (3/8) Epoch 2, batch 1200, train_loss[loss=3.319, ArTop10Accuracy=0.6732, over 12072.00 frames. ], tot_loss[loss=3.284, ArTop10Accuracy=0.6796, over 11891.67 frames. ], batch size: 101, lr: 2.80e-02 +2024-08-06 08:41:38,664 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 08:43:36,656 INFO [trainer.py:765] (3/8) Epoch 3, batch 100, train_loss[loss=3.244, ArTop10Accuracy=0.6856, over 14499.00 frames. ], tot_loss[loss=3.254, ArTop10Accuracy=0.6846, over 4767.19 frames. ], batch size: 62, lr: 2.67e-02 +2024-08-06 08:45:10,506 INFO [trainer.py:765] (3/8) Epoch 3, batch 200, train_loss[loss=3.139, ArTop10Accuracy=0.7055, over 13473.00 frames. ], tot_loss[loss=3.23, ArTop10Accuracy=0.6888, over 7744.68 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 08:46:29,264 INFO [trainer.py:765] (3/8) Epoch 3, batch 300, train_loss[loss=3.263, ArTop10Accuracy=0.6859, over 14445.00 frames. ], tot_loss[loss=3.209, ArTop10Accuracy=0.6931, over 9375.03 frames. ], batch size: 45, lr: 2.64e-02 +2024-08-06 08:48:04,225 INFO [trainer.py:765] (3/8) Epoch 3, batch 400, train_loss[loss=3.121, ArTop10Accuracy=0.7107, over 10851.00 frames. ], tot_loss[loss=3.191, ArTop10Accuracy=0.6967, over 10285.27 frames. ], batch size: 15, lr: 2.63e-02 +2024-08-06 08:48:40,887 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 9.282e+01 1.561e+02 1.981e+02 2.686e+02 1.768e+03, threshold=3.962e+02, percent-clipped=7.6 +2024-08-06 08:49:25,547 INFO [trainer.py:765] (3/8) Epoch 3, batch 500, train_loss[loss=3.155, ArTop10Accuracy=0.709, over 12711.00 frames. ], tot_loss[loss=3.174, ArTop10Accuracy=0.7001, over 10844.23 frames. ], batch size: 23, lr: 2.62e-02 +2024-08-06 08:51:00,483 INFO [trainer.py:765] (3/8) Epoch 3, batch 600, train_loss[loss=3.06, ArTop10Accuracy=0.725, over 11397.00 frames. ], tot_loss[loss=3.157, ArTop10Accuracy=0.7033, over 11347.80 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 08:52:31,624 INFO [trainer.py:765] (3/8) Epoch 3, batch 700, train_loss[loss=3.042, ArTop10Accuracy=0.7264, over 10038.00 frames. ], tot_loss[loss=3.145, ArTop10Accuracy=0.7056, over 11508.63 frames. ], batch size: 12, lr: 2.60e-02 +2024-08-06 08:53:57,394 INFO [trainer.py:765] (3/8) Epoch 3, batch 800, train_loss[loss=3.221, ArTop10Accuracy=0.6856, over 10071.00 frames. ], tot_loss[loss=3.138, ArTop10Accuracy=0.7071, over 11643.06 frames. ], batch size: 12, lr: 2.59e-02 +2024-08-06 08:55:15,124 INFO [trainer.py:765] (3/8) Epoch 3, batch 900, train_loss[loss=3.071, ArTop10Accuracy=0.7171, over 12846.00 frames. ], tot_loss[loss=3.117, ArTop10Accuracy=0.711, over 11684.97 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 08:56:31,563 INFO [trainer.py:765] (3/8) Epoch 3, batch 1000, train_loss[loss=3.188, ArTop10Accuracy=0.6936, over 12897.00 frames. ], tot_loss[loss=3.112, ArTop10Accuracy=0.7118, over 11865.36 frames. ], batch size: 27, lr: 2.56e-02 +2024-08-06 08:57:46,512 INFO [trainer.py:765] (3/8) Epoch 3, batch 1100, train_loss[loss=3.058, ArTop10Accuracy=0.7222, over 13737.00 frames. ], tot_loss[loss=3.105, ArTop10Accuracy=0.7131, over 11941.48 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 08:59:01,405 INFO [trainer.py:765] (3/8) Epoch 3, batch 1200, train_loss[loss=3.175, ArTop10Accuracy=0.7042, over 12597.00 frames. ], tot_loss[loss=3.095, ArTop10Accuracy=0.7152, over 11835.39 frames. ], batch size: 101, lr: 2.54e-02 +2024-08-06 09:00:02,031 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 09:01:50,747 INFO [trainer.py:765] (3/8) Epoch 4, batch 100, train_loss[loss=3.104, ArTop10Accuracy=0.7088, over 14130.00 frames. ], tot_loss[loss=3.069, ArTop10Accuracy=0.7194, over 4744.71 frames. ], batch size: 62, lr: 2.38e-02 +2024-08-06 09:02:52,864 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 09:03:02,384 INFO [trainer.py:811] (3/8) Epoch 4, validation: loss=2.997, ArTop10Accuracy=0.7338, over 1827537.00 frames. +2024-08-06 09:03:02,385 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29540MB +2024-08-06 09:03:03,370 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.499e+02 1.782e+02 2.273e+02 1.100e+03, threshold=3.565e+02, percent-clipped=4.7 +2024-08-06 09:03:29,279 INFO [trainer.py:765] (3/8) Epoch 4, batch 200, train_loss[loss=3.02, ArTop10Accuracy=0.7308, over 13695.00 frames. ], tot_loss[loss=3.044, ArTop10Accuracy=0.7244, over 7751.43 frames. ], batch size: 34, lr: 2.37e-02 +2024-08-06 09:05:01,738 INFO [trainer.py:765] (3/8) Epoch 4, batch 300, train_loss[loss=3.065, ArTop10Accuracy=0.7227, over 14499.00 frames. ], tot_loss[loss=3.04, ArTop10Accuracy=0.7256, over 9375.57 frames. ], batch size: 45, lr: 2.36e-02 +2024-08-06 09:06:28,156 INFO [trainer.py:765] (3/8) Epoch 4, batch 400, train_loss[loss=2.988, ArTop10Accuracy=0.7383, over 10809.00 frames. ], tot_loss[loss=3.03, ArTop10Accuracy=0.7275, over 10281.95 frames. ], batch size: 15, lr: 2.34e-02 +2024-08-06 09:08:01,930 INFO [trainer.py:765] (3/8) Epoch 4, batch 500, train_loss[loss=2.973, ArTop10Accuracy=0.7446, over 12387.00 frames. ], tot_loss[loss=3.022, ArTop10Accuracy=0.729, over 10821.67 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 09:09:28,547 INFO [trainer.py:765] (3/8) Epoch 4, batch 600, train_loss[loss=2.865, ArTop10Accuracy=0.7627, over 11403.00 frames. ], tot_loss[loss=3.02, ArTop10Accuracy=0.7295, over 11351.68 frames. ], batch size: 18, lr: 2.32e-02 +2024-08-06 09:10:59,872 INFO [trainer.py:765] (3/8) Epoch 4, batch 700, train_loss[loss=2.839, ArTop10Accuracy=0.765, over 9333.00 frames. ], tot_loss[loss=3.022, ArTop10Accuracy=0.7291, over 11490.84 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 09:12:17,519 INFO [trainer.py:765] (3/8) Epoch 4, batch 800, train_loss[loss=2.979, ArTop10Accuracy=0.7389, over 9318.00 frames. ], tot_loss[loss=3.021, ArTop10Accuracy=0.7293, over 11615.33 frames. ], batch size: 11, lr: 2.30e-02 +2024-08-06 09:13:33,219 INFO [trainer.py:765] (3/8) Epoch 4, batch 900, train_loss[loss=2.958, ArTop10Accuracy=0.7471, over 12633.00 frames. ], tot_loss[loss=3.011, ArTop10Accuracy=0.7311, over 11664.01 frames. ], batch size: 27, lr: 2.29e-02 +2024-08-06 09:14:47,526 INFO [trainer.py:765] (3/8) Epoch 4, batch 1000, train_loss[loss=2.934, ArTop10Accuracy=0.7402, over 12672.00 frames. ], tot_loss[loss=3.013, ArTop10Accuracy=0.7307, over 11855.93 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 09:16:02,988 INFO [trainer.py:765] (3/8) Epoch 4, batch 1100, train_loss[loss=3.039, ArTop10Accuracy=0.7209, over 13689.00 frames. ], tot_loss[loss=3.015, ArTop10Accuracy=0.7302, over 11936.22 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 09:16:53,297 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.440e+02 1.636e+02 1.968e+02 7.702e+02, threshold=3.273e+02, percent-clipped=1.3 +2024-08-06 09:17:18,350 INFO [trainer.py:765] (3/8) Epoch 4, batch 1200, train_loss[loss=3.08, ArTop10Accuracy=0.7212, over 13128.00 frames. ], tot_loss[loss=3.012, ArTop10Accuracy=0.7306, over 11871.12 frames. ], batch size: 103, lr: 2.25e-02 +2024-08-06 09:18:17,420 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 09:20:17,177 INFO [trainer.py:765] (3/8) Epoch 5, batch 100, train_loss[loss=3.008, ArTop10Accuracy=0.7247, over 14334.00 frames. ], tot_loss[loss=2.988, ArTop10Accuracy=0.7348, over 4769.18 frames. ], batch size: 63, lr: 2.10e-02 +2024-08-06 09:21:52,302 INFO [trainer.py:765] (3/8) Epoch 5, batch 200, train_loss[loss=3.035, ArTop10Accuracy=0.7238, over 13557.00 frames. ], tot_loss[loss=2.981, ArTop10Accuracy=0.7363, over 7749.95 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 09:23:19,247 INFO [trainer.py:765] (3/8) Epoch 5, batch 300, train_loss[loss=3.01, ArTop10Accuracy=0.7287, over 14130.00 frames. ], tot_loss[loss=2.969, ArTop10Accuracy=0.7386, over 9362.27 frames. ], batch size: 44, lr: 2.08e-02 +2024-08-06 09:24:53,543 INFO [trainer.py:765] (3/8) Epoch 5, batch 400, train_loss[loss=2.831, ArTop10Accuracy=0.7688, over 10419.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7389, over 10263.43 frames. ], batch size: 14, lr: 2.07e-02 +2024-08-06 09:26:19,424 INFO [trainer.py:765] (3/8) Epoch 5, batch 500, train_loss[loss=2.948, ArTop10Accuracy=0.7401, over 12189.00 frames. ], tot_loss[loss=2.964, ArTop10Accuracy=0.7394, over 10831.03 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 09:27:49,543 INFO [trainer.py:765] (3/8) Epoch 5, batch 600, train_loss[loss=3.025, ArTop10Accuracy=0.7193, over 11202.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.7395, over 11339.96 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 09:29:21,676 INFO [trainer.py:765] (3/8) Epoch 5, batch 700, train_loss[loss=2.829, ArTop10Accuracy=0.7669, over 9447.00 frames. ], tot_loss[loss=2.965, ArTop10Accuracy=0.7396, over 11490.41 frames. ], batch size: 11, lr: 2.04e-02 +2024-08-06 09:30:44,699 INFO [trainer.py:765] (3/8) Epoch 5, batch 800, train_loss[loss=2.886, ArTop10Accuracy=0.7585, over 9441.00 frames. ], tot_loss[loss=2.969, ArTop10Accuracy=0.7388, over 11618.93 frames. ], batch size: 11, lr: 2.03e-02 +2024-08-06 09:31:51,245 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 09:32:00,762 INFO [trainer.py:811] (3/8) Epoch 5, validation: loss=2.926, ArTop10Accuracy=0.7466, over 1827537.00 frames. +2024-08-06 09:32:00,763 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29540MB +2024-08-06 09:32:01,714 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.060e+02 1.349e+02 1.525e+02 1.806e+02 1.007e+03, threshold=3.049e+02, percent-clipped=2.3 +2024-08-06 09:32:10,560 INFO [trainer.py:765] (3/8) Epoch 5, batch 900, train_loss[loss=2.864, ArTop10Accuracy=0.758, over 12900.00 frames. ], tot_loss[loss=2.958, ArTop10Accuracy=0.741, over 11669.20 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 09:33:27,329 INFO [trainer.py:765] (3/8) Epoch 5, batch 1000, train_loss[loss=2.95, ArTop10Accuracy=0.7414, over 12960.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7401, over 11887.77 frames. ], batch size: 27, lr: 2.01e-02 +2024-08-06 09:34:42,307 INFO [trainer.py:765] (3/8) Epoch 5, batch 1100, train_loss[loss=2.925, ArTop10Accuracy=0.7515, over 13506.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7391, over 11948.88 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 09:35:56,338 INFO [trainer.py:765] (3/8) Epoch 5, batch 1200, train_loss[loss=2.999, ArTop10Accuracy=0.7334, over 12924.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7391, over 11862.59 frames. ], batch size: 101, lr: 1.99e-02 +2024-08-06 09:36:55,670 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 09:38:52,670 INFO [trainer.py:765] (3/8) Epoch 6, batch 100, train_loss[loss=3, ArTop10Accuracy=0.7349, over 14295.00 frames. ], tot_loss[loss=2.947, ArTop10Accuracy=0.7423, over 4739.69 frames. ], batch size: 62, lr: 1.85e-02 +2024-08-06 09:40:19,840 INFO [trainer.py:765] (3/8) Epoch 6, batch 200, train_loss[loss=2.892, ArTop10Accuracy=0.7548, over 13599.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.7444, over 7746.65 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 09:41:52,970 INFO [trainer.py:765] (3/8) Epoch 6, batch 300, train_loss[loss=3.024, ArTop10Accuracy=0.7262, over 14010.00 frames. ], tot_loss[loss=2.93, ArTop10Accuracy=0.746, over 9367.16 frames. ], batch size: 44, lr: 1.83e-02 +2024-08-06 09:43:17,833 INFO [trainer.py:765] (3/8) Epoch 6, batch 400, train_loss[loss=2.828, ArTop10Accuracy=0.7645, over 10341.00 frames. ], tot_loss[loss=2.926, ArTop10Accuracy=0.7468, over 10282.94 frames. ], batch size: 14, lr: 1.83e-02 +2024-08-06 09:44:54,133 INFO [trainer.py:765] (3/8) Epoch 6, batch 500, train_loss[loss=2.917, ArTop10Accuracy=0.7487, over 12615.00 frames. ], tot_loss[loss=2.92, ArTop10Accuracy=0.7477, over 10844.16 frames. ], batch size: 23, lr: 1.82e-02 +2024-08-06 09:46:22,878 INFO [trainer.py:765] (3/8) Epoch 6, batch 600, train_loss[loss=2.954, ArTop10Accuracy=0.7389, over 11496.00 frames. ], tot_loss[loss=2.92, ArTop10Accuracy=0.7478, over 11382.94 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 09:46:37,225 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.339e+02 1.480e+02 1.701e+02 7.506e+02, threshold=2.959e+02, percent-clipped=1.1 +2024-08-06 09:47:57,876 INFO [trainer.py:765] (3/8) Epoch 6, batch 700, train_loss[loss=2.81, ArTop10Accuracy=0.7641, over 9426.00 frames. ], tot_loss[loss=2.927, ArTop10Accuracy=0.7465, over 11545.50 frames. ], batch size: 11, lr: 1.80e-02 +2024-08-06 09:49:15,960 INFO [trainer.py:765] (3/8) Epoch 6, batch 800, train_loss[loss=2.978, ArTop10Accuracy=0.738, over 10194.00 frames. ], tot_loss[loss=2.929, ArTop10Accuracy=0.7462, over 11653.83 frames. ], batch size: 12, lr: 1.79e-02 +2024-08-06 09:50:32,141 INFO [trainer.py:765] (3/8) Epoch 6, batch 900, train_loss[loss=2.958, ArTop10Accuracy=0.7424, over 12861.00 frames. ], tot_loss[loss=2.923, ArTop10Accuracy=0.7476, over 11703.21 frames. ], batch size: 27, lr: 1.78e-02 +2024-08-06 09:51:47,305 INFO [trainer.py:765] (3/8) Epoch 6, batch 1000, train_loss[loss=2.948, ArTop10Accuracy=0.7387, over 12723.00 frames. ], tot_loss[loss=2.922, ArTop10Accuracy=0.7476, over 11897.90 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 09:53:00,926 INFO [trainer.py:765] (3/8) Epoch 6, batch 1100, train_loss[loss=2.986, ArTop10Accuracy=0.7328, over 13701.00 frames. ], tot_loss[loss=2.928, ArTop10Accuracy=0.7464, over 11964.42 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 09:54:14,342 INFO [trainer.py:765] (3/8) Epoch 6, batch 1200, train_loss[loss=3.044, ArTop10Accuracy=0.7283, over 12891.00 frames. ], tot_loss[loss=2.927, ArTop10Accuracy=0.7465, over 11877.83 frames. ], batch size: 101, lr: 1.76e-02 +2024-08-06 09:55:13,384 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 09:57:06,705 INFO [trainer.py:765] (3/8) Epoch 7, batch 100, train_loss[loss=2.902, ArTop10Accuracy=0.7499, over 14643.00 frames. ], tot_loss[loss=2.913, ArTop10Accuracy=0.7485, over 4780.37 frames. ], batch size: 63, lr: 1.64e-02 +2024-08-06 09:58:39,432 INFO [trainer.py:765] (3/8) Epoch 7, batch 200, train_loss[loss=2.904, ArTop10Accuracy=0.7525, over 13992.00 frames. ], tot_loss[loss=2.905, ArTop10Accuracy=0.7503, over 7757.95 frames. ], batch size: 35, lr: 1.64e-02 +2024-08-06 10:00:06,089 INFO [trainer.py:765] (3/8) Epoch 7, batch 300, train_loss[loss=2.937, ArTop10Accuracy=0.7427, over 14214.00 frames. ], tot_loss[loss=2.901, ArTop10Accuracy=0.7509, over 9379.79 frames. ], batch size: 45, lr: 1.63e-02 +2024-08-06 10:00:40,516 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 10:00:50,245 INFO [trainer.py:811] (3/8) Epoch 7, validation: loss=2.88, ArTop10Accuracy=0.7554, over 1827537.00 frames. +2024-08-06 10:00:50,246 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30046MB +2024-08-06 10:00:50,983 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.002e+02 1.286e+02 1.429e+02 1.605e+02 1.020e+03, threshold=2.857e+02, percent-clipped=1.5 +2024-08-06 10:01:49,122 INFO [trainer.py:765] (3/8) Epoch 7, batch 400, train_loss[loss=2.901, ArTop10Accuracy=0.7524, over 10989.00 frames. ], tot_loss[loss=2.895, ArTop10Accuracy=0.7526, over 10298.60 frames. ], batch size: 15, lr: 1.62e-02 +2024-08-06 10:03:21,465 INFO [trainer.py:765] (3/8) Epoch 7, batch 500, train_loss[loss=2.942, ArTop10Accuracy=0.7404, over 12828.00 frames. ], tot_loss[loss=2.891, ArTop10Accuracy=0.7535, over 10868.22 frames. ], batch size: 23, lr: 1.61e-02 +2024-08-06 10:04:51,889 INFO [trainer.py:765] (3/8) Epoch 7, batch 600, train_loss[loss=2.859, ArTop10Accuracy=0.7626, over 11541.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7528, over 11398.95 frames. ], batch size: 18, lr: 1.61e-02 +2024-08-06 10:06:25,118 INFO [trainer.py:765] (3/8) Epoch 7, batch 700, train_loss[loss=2.801, ArTop10Accuracy=0.7696, over 10050.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7524, over 11508.96 frames. ], batch size: 12, lr: 1.60e-02 +2024-08-06 10:07:46,954 INFO [trainer.py:765] (3/8) Epoch 7, batch 800, train_loss[loss=2.846, ArTop10Accuracy=0.7637, over 10302.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7525, over 11633.29 frames. ], batch size: 12, lr: 1.59e-02 +2024-08-06 10:09:02,830 INFO [trainer.py:765] (3/8) Epoch 7, batch 900, train_loss[loss=2.874, ArTop10Accuracy=0.7574, over 12936.00 frames. ], tot_loss[loss=2.888, ArTop10Accuracy=0.7541, over 11680.46 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 10:10:19,642 INFO [trainer.py:765] (3/8) Epoch 7, batch 1000, train_loss[loss=2.844, ArTop10Accuracy=0.7628, over 12774.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7524, over 11883.71 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 10:11:35,215 INFO [trainer.py:765] (3/8) Epoch 7, batch 1100, train_loss[loss=2.865, ArTop10Accuracy=0.7577, over 13803.00 frames. ], tot_loss[loss=2.9, ArTop10Accuracy=0.7517, over 11945.40 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 10:12:48,210 INFO [trainer.py:765] (3/8) Epoch 7, batch 1200, train_loss[loss=2.986, ArTop10Accuracy=0.7322, over 12135.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7522, over 11873.39 frames. ], batch size: 101, lr: 1.57e-02 +2024-08-06 10:13:46,761 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 10:15:03,607 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.017e+02 1.283e+02 1.410e+02 1.601e+02 1.017e+03, threshold=2.820e+02, percent-clipped=0.9 +2024-08-06 10:15:40,826 INFO [trainer.py:765] (3/8) Epoch 8, batch 100, train_loss[loss=2.98, ArTop10Accuracy=0.7361, over 14430.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.7539, over 4774.76 frames. ], batch size: 62, lr: 1.47e-02 +2024-08-06 10:17:12,867 INFO [trainer.py:765] (3/8) Epoch 8, batch 200, train_loss[loss=2.818, ArTop10Accuracy=0.7666, over 13695.00 frames. ], tot_loss[loss=2.876, ArTop10Accuracy=0.7558, over 7759.07 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 10:18:37,904 INFO [trainer.py:765] (3/8) Epoch 8, batch 300, train_loss[loss=2.93, ArTop10Accuracy=0.7451, over 14208.00 frames. ], tot_loss[loss=2.868, ArTop10Accuracy=0.7573, over 9366.52 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 10:20:06,350 INFO [trainer.py:765] (3/8) Epoch 8, batch 400, train_loss[loss=2.762, ArTop10Accuracy=0.7819, over 10890.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.7582, over 10274.78 frames. ], batch size: 15, lr: 1.45e-02 +2024-08-06 10:21:32,417 INFO [trainer.py:765] (3/8) Epoch 8, batch 500, train_loss[loss=2.789, ArTop10Accuracy=0.7733, over 12693.00 frames. ], tot_loss[loss=2.859, ArTop10Accuracy=0.7593, over 10844.23 frames. ], batch size: 23, lr: 1.45e-02 +2024-08-06 10:23:00,980 INFO [trainer.py:765] (3/8) Epoch 8, batch 600, train_loss[loss=2.75, ArTop10Accuracy=0.779, over 11301.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.7585, over 11361.98 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 10:24:37,793 INFO [trainer.py:765] (3/8) Epoch 8, batch 700, train_loss[loss=2.707, ArTop10Accuracy=0.7901, over 10263.00 frames. ], tot_loss[loss=2.87, ArTop10Accuracy=0.7573, over 11497.41 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 10:25:56,091 INFO [trainer.py:765] (3/8) Epoch 8, batch 800, train_loss[loss=2.766, ArTop10Accuracy=0.7851, over 9222.00 frames. ], tot_loss[loss=2.875, ArTop10Accuracy=0.7566, over 11602.97 frames. ], batch size: 11, lr: 1.43e-02 +2024-08-06 10:27:12,251 INFO [trainer.py:765] (3/8) Epoch 8, batch 900, train_loss[loss=2.853, ArTop10Accuracy=0.7619, over 12891.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.7586, over 11666.59 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 10:28:25,269 INFO [trainer.py:765] (3/8) Epoch 8, batch 1000, train_loss[loss=2.875, ArTop10Accuracy=0.7523, over 12798.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7572, over 11869.24 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 10:29:07,161 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 10:29:16,830 INFO [trainer.py:811] (3/8) Epoch 8, validation: loss=2.858, ArTop10Accuracy=0.7594, over 1827537.00 frames. +2024-08-06 10:29:16,831 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33011MB +2024-08-06 10:29:17,497 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.275e+02 1.390e+02 1.547e+02 3.717e+02, threshold=2.781e+02, percent-clipped=0.7 +2024-08-06 10:29:51,737 INFO [trainer.py:765] (3/8) Epoch 8, batch 1100, train_loss[loss=2.957, ArTop10Accuracy=0.737, over 13581.00 frames. ], tot_loss[loss=2.88, ArTop10Accuracy=0.7556, over 11946.69 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 10:31:05,952 INFO [trainer.py:765] (3/8) Epoch 8, batch 1200, train_loss[loss=2.957, ArTop10Accuracy=0.7357, over 12132.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7559, over 11833.29 frames. ], batch size: 101, lr: 1.40e-02 +2024-08-06 10:32:05,668 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 10:34:01,262 INFO [trainer.py:765] (3/8) Epoch 9, batch 100, train_loss[loss=2.938, ArTop10Accuracy=0.7428, over 14349.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.7592, over 4768.86 frames. ], batch size: 62, lr: 1.32e-02 +2024-08-06 10:35:31,778 INFO [trainer.py:765] (3/8) Epoch 9, batch 200, train_loss[loss=2.876, ArTop10Accuracy=0.7568, over 13407.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7602, over 7749.90 frames. ], batch size: 34, lr: 1.32e-02 +2024-08-06 10:36:57,933 INFO [trainer.py:765] (3/8) Epoch 9, batch 300, train_loss[loss=2.883, ArTop10Accuracy=0.7558, over 14283.00 frames. ], tot_loss[loss=2.851, ArTop10Accuracy=0.7606, over 9375.68 frames. ], batch size: 44, lr: 1.31e-02 +2024-08-06 10:38:32,703 INFO [trainer.py:765] (3/8) Epoch 9, batch 400, train_loss[loss=2.786, ArTop10Accuracy=0.7768, over 10629.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7612, over 10299.19 frames. ], batch size: 14, lr: 1.31e-02 +2024-08-06 10:39:59,262 INFO [trainer.py:765] (3/8) Epoch 9, batch 500, train_loss[loss=2.803, ArTop10Accuracy=0.7703, over 12306.00 frames. ], tot_loss[loss=2.844, ArTop10Accuracy=0.7624, over 10831.99 frames. ], batch size: 22, lr: 1.30e-02 +2024-08-06 10:41:29,696 INFO [trainer.py:765] (3/8) Epoch 9, batch 600, train_loss[loss=2.849, ArTop10Accuracy=0.7581, over 11571.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7621, over 11341.92 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 10:42:58,446 INFO [trainer.py:765] (3/8) Epoch 9, batch 700, train_loss[loss=2.93, ArTop10Accuracy=0.7417, over 9555.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7619, over 11496.78 frames. ], batch size: 11, lr: 1.29e-02 +2024-08-06 10:44:02,958 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.039e+02 1.253e+02 1.352e+02 1.493e+02 7.010e+02, threshold=2.704e+02, percent-clipped=0.6 +2024-08-06 10:44:19,675 INFO [trainer.py:765] (3/8) Epoch 9, batch 800, train_loss[loss=2.76, ArTop10Accuracy=0.7781, over 10053.00 frames. ], tot_loss[loss=2.853, ArTop10Accuracy=0.7607, over 11611.66 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 10:45:35,725 INFO [trainer.py:765] (3/8) Epoch 9, batch 900, train_loss[loss=2.775, ArTop10Accuracy=0.7765, over 13281.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7624, over 11670.68 frames. ], batch size: 28, lr: 1.28e-02 +2024-08-06 10:46:51,277 INFO [trainer.py:765] (3/8) Epoch 9, batch 1000, train_loss[loss=2.849, ArTop10Accuracy=0.7631, over 12930.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7619, over 11874.30 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 10:48:06,252 INFO [trainer.py:765] (3/8) Epoch 9, batch 1100, train_loss[loss=2.854, ArTop10Accuracy=0.7604, over 13707.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7606, over 11943.12 frames. ], batch size: 34, lr: 1.28e-02 +2024-08-06 10:49:21,059 INFO [trainer.py:765] (3/8) Epoch 9, batch 1200, train_loss[loss=2.971, ArTop10Accuracy=0.7419, over 12234.00 frames. ], tot_loss[loss=2.856, ArTop10Accuracy=0.76, over 11871.25 frames. ], batch size: 101, lr: 1.27e-02 +2024-08-06 10:50:21,935 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 10:52:12,332 INFO [trainer.py:765] (3/8) Epoch 10, batch 100, train_loss[loss=2.91, ArTop10Accuracy=0.7482, over 14496.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7617, over 4757.99 frames. ], batch size: 62, lr: 1.20e-02 +2024-08-06 10:53:44,592 INFO [trainer.py:765] (3/8) Epoch 10, batch 200, train_loss[loss=2.72, ArTop10Accuracy=0.79, over 13527.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.7645, over 7751.44 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 10:55:08,096 INFO [trainer.py:765] (3/8) Epoch 10, batch 300, train_loss[loss=2.869, ArTop10Accuracy=0.757, over 14196.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.7654, over 9370.30 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 10:56:41,182 INFO [trainer.py:765] (3/8) Epoch 10, batch 400, train_loss[loss=2.822, ArTop10Accuracy=0.7626, over 10914.00 frames. ], tot_loss[loss=2.824, ArTop10Accuracy=0.7661, over 10267.64 frames. ], batch size: 15, lr: 1.19e-02 +2024-08-06 10:58:04,944 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 10:58:14,559 INFO [trainer.py:811] (3/8) Epoch 10, validation: loss=2.842, ArTop10Accuracy=0.7624, over 1827537.00 frames. +2024-08-06 10:58:14,560 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33011MB +2024-08-06 10:58:15,578 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.228e+02 1.320e+02 1.458e+02 6.096e+02, threshold=2.641e+02, percent-clipped=0.6 +2024-08-06 10:58:15,585 INFO [trainer.py:765] (3/8) Epoch 10, batch 500, train_loss[loss=2.765, ArTop10Accuracy=0.7807, over 12336.00 frames. ], tot_loss[loss=2.819, ArTop10Accuracy=0.7668, over 10858.82 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 10:59:42,820 INFO [trainer.py:765] (3/8) Epoch 10, batch 600, train_loss[loss=2.878, ArTop10Accuracy=0.7531, over 11310.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7663, over 11375.27 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 11:01:18,113 INFO [trainer.py:765] (3/8) Epoch 10, batch 700, train_loss[loss=2.746, ArTop10Accuracy=0.7848, over 9285.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.7654, over 11525.92 frames. ], batch size: 11, lr: 1.18e-02 +2024-08-06 11:02:36,923 INFO [trainer.py:765] (3/8) Epoch 10, batch 800, train_loss[loss=2.666, ArTop10Accuracy=0.7947, over 10209.00 frames. ], tot_loss[loss=2.829, ArTop10Accuracy=0.7651, over 11652.28 frames. ], batch size: 12, lr: 1.17e-02 +2024-08-06 11:03:51,217 INFO [trainer.py:765] (3/8) Epoch 10, batch 900, train_loss[loss=2.843, ArTop10Accuracy=0.764, over 12903.00 frames. ], tot_loss[loss=2.824, ArTop10Accuracy=0.766, over 11698.22 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 11:05:06,357 INFO [trainer.py:765] (3/8) Epoch 10, batch 1000, train_loss[loss=2.825, ArTop10Accuracy=0.7681, over 12927.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.7648, over 11889.20 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 11:06:21,727 INFO [trainer.py:765] (3/8) Epoch 10, batch 1100, train_loss[loss=2.881, ArTop10Accuracy=0.7536, over 13725.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7633, over 11982.89 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 11:07:34,778 INFO [trainer.py:765] (3/8) Epoch 10, batch 1200, train_loss[loss=2.931, ArTop10Accuracy=0.7462, over 12060.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7637, over 11894.39 frames. ], batch size: 101, lr: 1.16e-02 +2024-08-06 11:08:33,651 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 11:10:29,961 INFO [trainer.py:765] (3/8) Epoch 11, batch 100, train_loss[loss=2.872, ArTop10Accuracy=0.7564, over 14637.00 frames. ], tot_loss[loss=2.823, ArTop10Accuracy=0.7654, over 4739.55 frames. ], batch size: 63, lr: 1.10e-02 +2024-08-06 11:12:04,681 INFO [trainer.py:765] (3/8) Epoch 11, batch 200, train_loss[loss=2.874, ArTop10Accuracy=0.755, over 13779.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.767, over 7746.84 frames. ], batch size: 34, lr: 1.10e-02 +2024-08-06 11:12:22,831 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 9.884e+01 1.240e+02 1.333e+02 1.457e+02 6.939e+02, threshold=2.667e+02, percent-clipped=0.1 +2024-08-06 11:13:31,554 INFO [trainer.py:765] (3/8) Epoch 11, batch 300, train_loss[loss=2.886, ArTop10Accuracy=0.7523, over 14241.00 frames. ], tot_loss[loss=2.811, ArTop10Accuracy=0.7679, over 9372.27 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 11:15:03,275 INFO [trainer.py:765] (3/8) Epoch 11, batch 400, train_loss[loss=2.797, ArTop10Accuracy=0.7679, over 10716.00 frames. ], tot_loss[loss=2.809, ArTop10Accuracy=0.7686, over 10280.11 frames. ], batch size: 15, lr: 1.09e-02 +2024-08-06 11:16:29,643 INFO [trainer.py:765] (3/8) Epoch 11, batch 500, train_loss[loss=2.834, ArTop10Accuracy=0.7655, over 12348.00 frames. ], tot_loss[loss=2.806, ArTop10Accuracy=0.7691, over 10847.00 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 11:18:00,524 INFO [trainer.py:765] (3/8) Epoch 11, batch 600, train_loss[loss=2.729, ArTop10Accuracy=0.7832, over 11418.00 frames. ], tot_loss[loss=2.809, ArTop10Accuracy=0.7687, over 11384.89 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 11:19:34,519 INFO [trainer.py:765] (3/8) Epoch 11, batch 700, train_loss[loss=2.69, ArTop10Accuracy=0.7945, over 10086.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.7676, over 11518.99 frames. ], batch size: 12, lr: 1.08e-02 +2024-08-06 11:20:55,489 INFO [trainer.py:765] (3/8) Epoch 11, batch 800, train_loss[loss=2.668, ArTop10Accuracy=0.7995, over 10152.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.7676, over 11650.91 frames. ], batch size: 12, lr: 1.07e-02 +2024-08-06 11:22:13,711 INFO [trainer.py:765] (3/8) Epoch 11, batch 900, train_loss[loss=2.8, ArTop10Accuracy=0.7695, over 12996.00 frames. ], tot_loss[loss=2.814, ArTop10Accuracy=0.7678, over 11706.55 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 11:23:31,804 INFO [trainer.py:765] (3/8) Epoch 11, batch 1000, train_loss[loss=2.759, ArTop10Accuracy=0.7778, over 12738.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.7677, over 11899.04 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 11:24:46,908 INFO [trainer.py:765] (3/8) Epoch 11, batch 1100, train_loss[loss=2.76, ArTop10Accuracy=0.7812, over 13725.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7667, over 11968.01 frames. ], batch size: 34, lr: 1.06e-02 +2024-08-06 11:26:00,739 INFO [trainer.py:765] (3/8) Epoch 11, batch 1200, train_loss[loss=2.928, ArTop10Accuracy=0.7442, over 12543.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7664, over 11889.44 frames. ], batch size: 101, lr: 1.06e-02 +2024-08-06 11:26:15,853 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 11:26:25,556 INFO [trainer.py:811] (3/8) Epoch 11, validation: loss=2.831, ArTop10Accuracy=0.7643, over 1827537.00 frames. +2024-08-06 11:26:25,556 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33011MB +2024-08-06 11:26:26,191 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.251e+02 1.335e+02 1.441e+02 2.942e+02, threshold=2.669e+02, percent-clipped=0.1 +2024-08-06 11:27:09,618 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 11:29:03,457 INFO [trainer.py:765] (3/8) Epoch 12, batch 100, train_loss[loss=2.846, ArTop10Accuracy=0.7631, over 14118.00 frames. ], tot_loss[loss=2.806, ArTop10Accuracy=0.7687, over 4773.56 frames. ], batch size: 62, lr: 1.01e-02 +2024-08-06 11:30:30,680 INFO [trainer.py:765] (3/8) Epoch 12, batch 200, train_loss[loss=2.78, ArTop10Accuracy=0.7769, over 13701.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7701, over 7763.97 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 11:31:57,661 INFO [trainer.py:765] (3/8) Epoch 12, batch 300, train_loss[loss=2.809, ArTop10Accuracy=0.7704, over 14559.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7712, over 9355.86 frames. ], batch size: 45, lr: 1.01e-02 +2024-08-06 11:33:30,744 INFO [trainer.py:765] (3/8) Epoch 12, batch 400, train_loss[loss=2.739, ArTop10Accuracy=0.7841, over 10419.00 frames. ], tot_loss[loss=2.791, ArTop10Accuracy=0.7719, over 10277.50 frames. ], batch size: 14, lr: 1.00e-02 +2024-08-06 11:34:55,739 INFO [trainer.py:765] (3/8) Epoch 12, batch 500, train_loss[loss=2.723, ArTop10Accuracy=0.7865, over 12318.00 frames. ], tot_loss[loss=2.785, ArTop10Accuracy=0.7731, over 10829.72 frames. ], batch size: 22, lr: 1.00e-02 +2024-08-06 11:36:29,367 INFO [trainer.py:765] (3/8) Epoch 12, batch 600, train_loss[loss=2.752, ArTop10Accuracy=0.7847, over 12060.00 frames. ], tot_loss[loss=2.787, ArTop10Accuracy=0.7728, over 11358.64 frames. ], batch size: 19, lr: 9.97e-03 +2024-08-06 11:38:00,349 INFO [trainer.py:765] (3/8) Epoch 12, batch 700, train_loss[loss=2.783, ArTop10Accuracy=0.7784, over 10062.00 frames. ], tot_loss[loss=2.791, ArTop10Accuracy=0.772, over 11511.89 frames. ], batch size: 12, lr: 9.93e-03 +2024-08-06 11:39:23,617 INFO [trainer.py:765] (3/8) Epoch 12, batch 800, train_loss[loss=2.752, ArTop10Accuracy=0.785, over 9558.00 frames. ], tot_loss[loss=2.797, ArTop10Accuracy=0.7708, over 11639.07 frames. ], batch size: 11, lr: 9.90e-03 +2024-08-06 11:40:39,895 INFO [trainer.py:765] (3/8) Epoch 12, batch 900, train_loss[loss=2.831, ArTop10Accuracy=0.7677, over 12933.00 frames. ], tot_loss[loss=2.794, ArTop10Accuracy=0.7714, over 11694.97 frames. ], batch size: 27, lr: 9.87e-03 +2024-08-06 11:41:14,001 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.041e+02 1.248e+02 1.348e+02 1.459e+02 5.540e+02, threshold=2.695e+02, percent-clipped=0.3 +2024-08-06 11:41:56,195 INFO [trainer.py:765] (3/8) Epoch 12, batch 1000, train_loss[loss=2.767, ArTop10Accuracy=0.7761, over 12909.00 frames. ], tot_loss[loss=2.797, ArTop10Accuracy=0.7708, over 11880.84 frames. ], batch size: 27, lr: 9.85e-03 +2024-08-06 11:43:14,326 INFO [trainer.py:765] (3/8) Epoch 12, batch 1100, train_loss[loss=2.787, ArTop10Accuracy=0.7675, over 13797.00 frames. ], tot_loss[loss=2.802, ArTop10Accuracy=0.77, over 11937.72 frames. ], batch size: 34, lr: 9.82e-03 +2024-08-06 11:44:26,162 INFO [trainer.py:765] (3/8) Epoch 12, batch 1200, train_loss[loss=2.935, ArTop10Accuracy=0.7424, over 12132.00 frames. ], tot_loss[loss=2.801, ArTop10Accuracy=0.7701, over 11867.60 frames. ], batch size: 101, lr: 9.79e-03 +2024-08-06 11:45:26,537 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 11:47:26,606 INFO [trainer.py:765] (3/8) Epoch 13, batch 100, train_loss[loss=2.859, ArTop10Accuracy=0.7575, over 14892.00 frames. ], tot_loss[loss=2.794, ArTop10Accuracy=0.7707, over 4740.93 frames. ], batch size: 62, lr: 9.37e-03 +2024-08-06 11:48:54,785 INFO [trainer.py:765] (3/8) Epoch 13, batch 200, train_loss[loss=2.754, ArTop10Accuracy=0.7835, over 14028.00 frames. ], tot_loss[loss=2.785, ArTop10Accuracy=0.7729, over 7741.31 frames. ], batch size: 35, lr: 9.34e-03 +2024-08-06 11:50:20,521 INFO [trainer.py:765] (3/8) Epoch 13, batch 300, train_loss[loss=2.751, ArTop10Accuracy=0.7841, over 14265.00 frames. ], tot_loss[loss=2.774, ArTop10Accuracy=0.7751, over 9363.54 frames. ], batch size: 44, lr: 9.31e-03 +2024-08-06 11:51:48,772 INFO [trainer.py:765] (3/8) Epoch 13, batch 400, train_loss[loss=2.735, ArTop10Accuracy=0.7855, over 10485.00 frames. ], tot_loss[loss=2.776, ArTop10Accuracy=0.775, over 10270.67 frames. ], batch size: 14, lr: 9.28e-03 +2024-08-06 11:53:13,413 INFO [trainer.py:765] (3/8) Epoch 13, batch 500, train_loss[loss=2.73, ArTop10Accuracy=0.7846, over 12183.00 frames. ], tot_loss[loss=2.776, ArTop10Accuracy=0.7753, over 10846.70 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 11:54:52,229 INFO [trainer.py:765] (3/8) Epoch 13, batch 600, train_loss[loss=2.769, ArTop10Accuracy=0.7767, over 11760.00 frames. ], tot_loss[loss=2.775, ArTop10Accuracy=0.7753, over 11382.98 frames. ], batch size: 19, lr: 9.23e-03 +2024-08-06 11:55:47,087 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 11:55:56,835 INFO [trainer.py:811] (3/8) Epoch 13, validation: loss=2.824, ArTop10Accuracy=0.7662, over 1827537.00 frames. +2024-08-06 11:55:56,835 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33011MB +2024-08-06 11:55:57,718 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.064e+02 1.255e+02 1.343e+02 1.452e+02 4.888e+02, threshold=2.687e+02, percent-clipped=0.1 +2024-08-06 11:56:28,472 INFO [trainer.py:765] (3/8) Epoch 13, batch 700, train_loss[loss=2.726, ArTop10Accuracy=0.7868, over 10173.00 frames. ], tot_loss[loss=2.779, ArTop10Accuracy=0.7747, over 11525.72 frames. ], batch size: 12, lr: 9.20e-03 +2024-08-06 11:57:46,689 INFO [trainer.py:765] (3/8) Epoch 13, batch 800, train_loss[loss=2.736, ArTop10Accuracy=0.7815, over 10086.00 frames. ], tot_loss[loss=2.784, ArTop10Accuracy=0.7734, over 11612.96 frames. ], batch size: 12, lr: 9.18e-03 +2024-08-06 11:59:03,292 INFO [trainer.py:765] (3/8) Epoch 13, batch 900, train_loss[loss=2.768, ArTop10Accuracy=0.7799, over 12903.00 frames. ], tot_loss[loss=2.782, ArTop10Accuracy=0.7737, over 11684.55 frames. ], batch size: 27, lr: 9.15e-03 +2024-08-06 12:00:19,180 INFO [trainer.py:765] (3/8) Epoch 13, batch 1000, train_loss[loss=2.768, ArTop10Accuracy=0.7764, over 12711.00 frames. ], tot_loss[loss=2.786, ArTop10Accuracy=0.773, over 11887.42 frames. ], batch size: 27, lr: 9.13e-03 +2024-08-06 12:01:34,887 INFO [trainer.py:765] (3/8) Epoch 13, batch 1100, train_loss[loss=2.819, ArTop10Accuracy=0.7671, over 13707.00 frames. ], tot_loss[loss=2.791, ArTop10Accuracy=0.772, over 11953.14 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 12:02:48,671 INFO [trainer.py:765] (3/8) Epoch 13, batch 1200, train_loss[loss=2.951, ArTop10Accuracy=0.7404, over 11715.00 frames. ], tot_loss[loss=2.793, ArTop10Accuracy=0.7714, over 11872.35 frames. ], batch size: 101, lr: 9.08e-03 +2024-08-06 12:03:48,181 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 12:05:45,340 INFO [trainer.py:765] (3/8) Epoch 14, batch 100, train_loss[loss=2.837, ArTop10Accuracy=0.7671, over 14409.00 frames. ], tot_loss[loss=2.776, ArTop10Accuracy=0.7742, over 4764.27 frames. ], batch size: 62, lr: 8.71e-03 +2024-08-06 12:07:16,610 INFO [trainer.py:765] (3/8) Epoch 14, batch 200, train_loss[loss=2.752, ArTop10Accuracy=0.7797, over 13509.00 frames. ], tot_loss[loss=2.767, ArTop10Accuracy=0.7761, over 7760.98 frames. ], batch size: 34, lr: 8.69e-03 +2024-08-06 12:08:44,317 INFO [trainer.py:765] (3/8) Epoch 14, batch 300, train_loss[loss=2.763, ArTop10Accuracy=0.7764, over 14157.00 frames. ], tot_loss[loss=2.762, ArTop10Accuracy=0.7774, over 9387.83 frames. ], batch size: 44, lr: 8.66e-03 +2024-08-06 12:10:01,137 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.266e+02 1.374e+02 1.483e+02 6.480e+02, threshold=2.748e+02, percent-clipped=0.2 +2024-08-06 12:10:10,233 INFO [trainer.py:765] (3/8) Epoch 14, batch 400, train_loss[loss=2.688, ArTop10Accuracy=0.7928, over 10413.00 frames. ], tot_loss[loss=2.759, ArTop10Accuracy=0.7781, over 10296.32 frames. ], batch size: 14, lr: 8.64e-03 +2024-08-06 12:11:36,157 INFO [trainer.py:765] (3/8) Epoch 14, batch 500, train_loss[loss=2.743, ArTop10Accuracy=0.7808, over 12153.00 frames. ], tot_loss[loss=2.759, ArTop10Accuracy=0.7781, over 10850.15 frames. ], batch size: 22, lr: 8.62e-03 +2024-08-06 12:13:05,999 INFO [trainer.py:765] (3/8) Epoch 14, batch 600, train_loss[loss=2.798, ArTop10Accuracy=0.7717, over 11496.00 frames. ], tot_loss[loss=2.762, ArTop10Accuracy=0.7775, over 11365.20 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 12:14:38,559 INFO [trainer.py:765] (3/8) Epoch 14, batch 700, train_loss[loss=2.733, ArTop10Accuracy=0.7762, over 10191.00 frames. ], tot_loss[loss=2.767, ArTop10Accuracy=0.7765, over 11522.21 frames. ], batch size: 12, lr: 8.57e-03 +2024-08-06 12:15:58,076 INFO [trainer.py:765] (3/8) Epoch 14, batch 800, train_loss[loss=2.694, ArTop10Accuracy=0.793, over 9315.00 frames. ], tot_loss[loss=2.769, ArTop10Accuracy=0.7762, over 11630.20 frames. ], batch size: 11, lr: 8.55e-03 +2024-08-06 12:17:12,872 INFO [trainer.py:765] (3/8) Epoch 14, batch 900, train_loss[loss=2.755, ArTop10Accuracy=0.7847, over 12834.00 frames. ], tot_loss[loss=2.766, ArTop10Accuracy=0.7768, over 11689.30 frames. ], batch size: 27, lr: 8.52e-03 +2024-08-06 12:18:29,620 INFO [trainer.py:765] (3/8) Epoch 14, batch 1000, train_loss[loss=2.773, ArTop10Accuracy=0.7776, over 12876.00 frames. ], tot_loss[loss=2.772, ArTop10Accuracy=0.7755, over 11875.98 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 12:19:45,382 INFO [trainer.py:765] (3/8) Epoch 14, batch 1100, train_loss[loss=2.798, ArTop10Accuracy=0.7736, over 13728.00 frames. ], tot_loss[loss=2.781, ArTop10Accuracy=0.7738, over 11970.38 frames. ], batch size: 34, lr: 8.48e-03 +2024-08-06 12:20:59,285 INFO [trainer.py:765] (3/8) Epoch 14, batch 1200, train_loss[loss=2.908, ArTop10Accuracy=0.7524, over 12462.00 frames. ], tot_loss[loss=2.783, ArTop10Accuracy=0.7734, over 11864.36 frames. ], batch size: 101, lr: 8.46e-03 +2024-08-06 12:21:58,393 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 12:23:51,968 INFO [trainer.py:765] (3/8) Epoch 15, batch 100, train_loss[loss=2.804, ArTop10Accuracy=0.7692, over 14889.00 frames. ], tot_loss[loss=2.761, ArTop10Accuracy=0.7771, over 4760.90 frames. ], batch size: 65, lr: 8.14e-03 +2024-08-06 12:24:00,605 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 12:24:10,290 INFO [trainer.py:811] (3/8) Epoch 15, validation: loss=2.819, ArTop10Accuracy=0.7675, over 1827537.00 frames. +2024-08-06 12:24:10,291 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33011MB +2024-08-06 12:24:11,100 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.284e+02 1.371e+02 1.488e+02 4.667e+02, threshold=2.743e+02, percent-clipped=0.2 +2024-08-06 12:25:29,995 INFO [trainer.py:765] (3/8) Epoch 15, batch 200, train_loss[loss=2.662, ArTop10Accuracy=0.7959, over 13275.00 frames. ], tot_loss[loss=2.752, ArTop10Accuracy=0.7792, over 7758.27 frames. ], batch size: 34, lr: 8.12e-03 +2024-08-06 12:26:58,702 INFO [trainer.py:765] (3/8) Epoch 15, batch 300, train_loss[loss=2.777, ArTop10Accuracy=0.772, over 13758.00 frames. ], tot_loss[loss=2.75, ArTop10Accuracy=0.7796, over 9380.39 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 12:28:28,540 INFO [trainer.py:765] (3/8) Epoch 15, batch 400, train_loss[loss=2.664, ArTop10Accuracy=0.7956, over 10533.00 frames. ], tot_loss[loss=2.75, ArTop10Accuracy=0.7797, over 10288.58 frames. ], batch size: 14, lr: 8.07e-03 +2024-08-06 12:29:54,038 INFO [trainer.py:765] (3/8) Epoch 15, batch 500, train_loss[loss=2.802, ArTop10Accuracy=0.7713, over 12066.00 frames. ], tot_loss[loss=2.749, ArTop10Accuracy=0.7799, over 10860.11 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 12:31:23,299 INFO [trainer.py:765] (3/8) Epoch 15, batch 600, train_loss[loss=2.704, ArTop10Accuracy=0.7892, over 11985.00 frames. ], tot_loss[loss=2.751, ArTop10Accuracy=0.7796, over 11365.41 frames. ], batch size: 19, lr: 8.03e-03 +2024-08-06 12:32:53,182 INFO [trainer.py:765] (3/8) Epoch 15, batch 700, train_loss[loss=2.61, ArTop10Accuracy=0.8052, over 10227.00 frames. ], tot_loss[loss=2.756, ArTop10Accuracy=0.7784, over 11530.22 frames. ], batch size: 12, lr: 8.01e-03 +2024-08-06 12:34:18,261 INFO [trainer.py:765] (3/8) Epoch 15, batch 800, train_loss[loss=2.733, ArTop10Accuracy=0.7805, over 10134.00 frames. ], tot_loss[loss=2.762, ArTop10Accuracy=0.7775, over 11637.96 frames. ], batch size: 12, lr: 7.99e-03 +2024-08-06 12:35:34,733 INFO [trainer.py:765] (3/8) Epoch 15, batch 900, train_loss[loss=2.748, ArTop10Accuracy=0.779, over 12927.00 frames. ], tot_loss[loss=2.757, ArTop10Accuracy=0.7785, over 11705.92 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 12:36:50,547 INFO [trainer.py:765] (3/8) Epoch 15, batch 1000, train_loss[loss=2.757, ArTop10Accuracy=0.7794, over 12849.00 frames. ], tot_loss[loss=2.762, ArTop10Accuracy=0.7777, over 11893.13 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 12:38:05,186 INFO [trainer.py:765] (3/8) Epoch 15, batch 1100, train_loss[loss=2.801, ArTop10Accuracy=0.7653, over 13638.00 frames. ], tot_loss[loss=2.767, ArTop10Accuracy=0.7765, over 11952.94 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 12:38:12,847 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.293e+02 1.379e+02 1.467e+02 2.824e+02, threshold=2.759e+02, percent-clipped=0.1 +2024-08-06 12:39:18,795 INFO [trainer.py:765] (3/8) Epoch 15, batch 1200, train_loss[loss=2.927, ArTop10Accuracy=0.7428, over 12492.00 frames. ], tot_loss[loss=2.768, ArTop10Accuracy=0.7763, over 11874.98 frames. ], batch size: 101, lr: 7.91e-03 +2024-08-06 12:40:18,968 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 12:42:17,625 INFO [trainer.py:765] (3/8) Epoch 16, batch 100, train_loss[loss=2.855, ArTop10Accuracy=0.7584, over 14046.00 frames. ], tot_loss[loss=2.755, ArTop10Accuracy=0.7784, over 4773.43 frames. ], batch size: 62, lr: 7.63e-03 +2024-08-06 12:43:49,570 INFO [trainer.py:765] (3/8) Epoch 16, batch 200, train_loss[loss=2.744, ArTop10Accuracy=0.7782, over 13509.00 frames. ], tot_loss[loss=2.741, ArTop10Accuracy=0.7811, over 7745.98 frames. ], batch size: 34, lr: 7.61e-03 +2024-08-06 12:45:18,508 INFO [trainer.py:765] (3/8) Epoch 16, batch 300, train_loss[loss=2.82, ArTop10Accuracy=0.7672, over 14163.00 frames. ], tot_loss[loss=2.733, ArTop10Accuracy=0.7828, over 9367.42 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 12:46:45,215 INFO [trainer.py:765] (3/8) Epoch 16, batch 400, train_loss[loss=2.704, ArTop10Accuracy=0.7902, over 10095.00 frames. ], tot_loss[loss=2.738, ArTop10Accuracy=0.7819, over 10274.41 frames. ], batch size: 14, lr: 7.58e-03 +2024-08-06 12:48:16,317 INFO [trainer.py:765] (3/8) Epoch 16, batch 500, train_loss[loss=2.739, ArTop10Accuracy=0.7879, over 12132.00 frames. ], tot_loss[loss=2.735, ArTop10Accuracy=0.7824, over 10839.76 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 12:49:46,648 INFO [trainer.py:765] (3/8) Epoch 16, batch 600, train_loss[loss=2.728, ArTop10Accuracy=0.7856, over 11466.00 frames. ], tot_loss[loss=2.74, ArTop10Accuracy=0.7816, over 11376.36 frames. ], batch size: 18, lr: 7.54e-03 +2024-08-06 12:51:23,687 INFO [trainer.py:765] (3/8) Epoch 16, batch 700, train_loss[loss=2.741, ArTop10Accuracy=0.7755, over 10176.00 frames. ], tot_loss[loss=2.743, ArTop10Accuracy=0.7809, over 11521.68 frames. ], batch size: 12, lr: 7.52e-03 +2024-08-06 12:52:43,507 INFO [trainer.py:765] (3/8) Epoch 16, batch 800, train_loss[loss=2.648, ArTop10Accuracy=0.8006, over 9210.00 frames. ], tot_loss[loss=2.748, ArTop10Accuracy=0.7801, over 11645.23 frames. ], batch size: 11, lr: 7.51e-03 +2024-08-06 12:53:06,022 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 12:53:15,496 INFO [trainer.py:811] (3/8) Epoch 16, validation: loss=2.816, ArTop10Accuracy=0.7678, over 1827537.00 frames. +2024-08-06 12:53:15,497 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33011MB +2024-08-06 12:53:16,192 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.112e+02 1.291e+02 1.391e+02 1.487e+02 3.459e+02, threshold=2.783e+02, percent-clipped=0.1 +2024-08-06 12:54:06,486 INFO [trainer.py:765] (3/8) Epoch 16, batch 900, train_loss[loss=2.737, ArTop10Accuracy=0.7819, over 12945.00 frames. ], tot_loss[loss=2.743, ArTop10Accuracy=0.781, over 11677.13 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 12:55:19,797 INFO [trainer.py:765] (3/8) Epoch 16, batch 1000, train_loss[loss=2.689, ArTop10Accuracy=0.7908, over 13293.00 frames. ], tot_loss[loss=2.748, ArTop10Accuracy=0.78, over 11861.96 frames. ], batch size: 28, lr: 7.47e-03 +2024-08-06 12:56:33,168 INFO [trainer.py:765] (3/8) Epoch 16, batch 1100, train_loss[loss=2.79, ArTop10Accuracy=0.7754, over 13731.00 frames. ], tot_loss[loss=2.757, ArTop10Accuracy=0.7783, over 11946.49 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 12:57:48,491 INFO [trainer.py:765] (3/8) Epoch 16, batch 1200, train_loss[loss=2.893, ArTop10Accuracy=0.7484, over 12243.00 frames. ], tot_loss[loss=2.757, ArTop10Accuracy=0.7782, over 11839.50 frames. ], batch size: 101, lr: 7.44e-03 +2024-08-06 12:58:48,504 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 13:00:47,906 INFO [trainer.py:765] (3/8) Epoch 17, batch 100, train_loss[loss=2.789, ArTop10Accuracy=0.7714, over 14616.00 frames. ], tot_loss[loss=2.74, ArTop10Accuracy=0.7809, over 4765.07 frames. ], batch size: 63, lr: 7.18e-03 +2024-08-06 13:02:19,309 INFO [trainer.py:765] (3/8) Epoch 17, batch 200, train_loss[loss=2.732, ArTop10Accuracy=0.7834, over 13602.00 frames. ], tot_loss[loss=2.733, ArTop10Accuracy=0.7823, over 7748.91 frames. ], batch size: 34, lr: 7.17e-03 +2024-08-06 13:03:45,523 INFO [trainer.py:765] (3/8) Epoch 17, batch 300, train_loss[loss=2.795, ArTop10Accuracy=0.7725, over 13977.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7836, over 9359.46 frames. ], batch size: 44, lr: 7.15e-03 +2024-08-06 13:05:21,766 INFO [trainer.py:765] (3/8) Epoch 17, batch 400, train_loss[loss=2.692, ArTop10Accuracy=0.7914, over 10851.00 frames. ], tot_loss[loss=2.729, ArTop10Accuracy=0.7836, over 10278.48 frames. ], batch size: 15, lr: 7.14e-03 +2024-08-06 13:06:47,027 INFO [trainer.py:765] (3/8) Epoch 17, batch 500, train_loss[loss=2.7, ArTop10Accuracy=0.7846, over 12618.00 frames. ], tot_loss[loss=2.723, ArTop10Accuracy=0.7847, over 10854.47 frames. ], batch size: 23, lr: 7.12e-03 +2024-08-06 13:07:39,884 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.140e+02 1.293e+02 1.386e+02 1.488e+02 3.253e+02, threshold=2.772e+02, percent-clipped=0.1 +2024-08-06 13:08:22,694 INFO [trainer.py:765] (3/8) Epoch 17, batch 600, train_loss[loss=2.655, ArTop10Accuracy=0.8002, over 11547.00 frames. ], tot_loss[loss=2.726, ArTop10Accuracy=0.784, over 11386.03 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 13:09:54,842 INFO [trainer.py:765] (3/8) Epoch 17, batch 700, train_loss[loss=2.522, ArTop10Accuracy=0.8214, over 10182.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7836, over 11523.38 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 13:11:19,487 INFO [trainer.py:765] (3/8) Epoch 17, batch 800, train_loss[loss=2.54, ArTop10Accuracy=0.8153, over 10068.00 frames. ], tot_loss[loss=2.732, ArTop10Accuracy=0.7831, over 11645.95 frames. ], batch size: 12, lr: 7.07e-03 +2024-08-06 13:12:35,676 INFO [trainer.py:765] (3/8) Epoch 17, batch 900, train_loss[loss=2.767, ArTop10Accuracy=0.7784, over 12936.00 frames. ], tot_loss[loss=2.733, ArTop10Accuracy=0.7831, over 11694.16 frames. ], batch size: 27, lr: 7.06e-03 +2024-08-06 13:13:53,067 INFO [trainer.py:765] (3/8) Epoch 17, batch 1000, train_loss[loss=2.722, ArTop10Accuracy=0.7889, over 12888.00 frames. ], tot_loss[loss=2.739, ArTop10Accuracy=0.782, over 11891.73 frames. ], batch size: 27, lr: 7.04e-03 +2024-08-06 13:15:08,490 INFO [trainer.py:765] (3/8) Epoch 17, batch 1100, train_loss[loss=2.697, ArTop10Accuracy=0.7897, over 13773.00 frames. ], tot_loss[loss=2.744, ArTop10Accuracy=0.7811, over 11951.73 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 13:16:22,395 INFO [trainer.py:765] (3/8) Epoch 17, batch 1200, train_loss[loss=2.828, ArTop10Accuracy=0.7645, over 12483.00 frames. ], tot_loss[loss=2.746, ArTop10Accuracy=0.7804, over 11856.50 frames. ], batch size: 101, lr: 7.01e-03 +2024-08-06 13:17:21,345 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 13:19:16,000 INFO [trainer.py:765] (3/8) Epoch 18, batch 100, train_loss[loss=2.748, ArTop10Accuracy=0.7797, over 14040.00 frames. ], tot_loss[loss=2.732, ArTop10Accuracy=0.7818, over 4757.97 frames. ], batch size: 62, lr: 6.78e-03 +2024-08-06 13:20:46,607 INFO [trainer.py:765] (3/8) Epoch 18, batch 200, train_loss[loss=2.765, ArTop10Accuracy=0.7724, over 13836.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7828, over 7748.93 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 13:21:55,111 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 13:22:04,751 INFO [trainer.py:811] (3/8) Epoch 18, validation: loss=2.817, ArTop10Accuracy=0.768, over 1827537.00 frames. +2024-08-06 13:22:04,752 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33011MB +2024-08-06 13:22:05,480 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.323e+02 1.409e+02 1.514e+02 3.209e+02, threshold=2.818e+02, percent-clipped=0.1 +2024-08-06 13:22:26,587 INFO [trainer.py:765] (3/8) Epoch 18, batch 300, train_loss[loss=2.719, ArTop10Accuracy=0.7826, over 14019.00 frames. ], tot_loss[loss=2.719, ArTop10Accuracy=0.7851, over 9382.02 frames. ], batch size: 44, lr: 6.76e-03 +2024-08-06 13:23:57,935 INFO [trainer.py:765] (3/8) Epoch 18, batch 400, train_loss[loss=2.703, ArTop10Accuracy=0.7932, over 10263.00 frames. ], tot_loss[loss=2.714, ArTop10Accuracy=0.7862, over 10301.86 frames. ], batch size: 14, lr: 6.74e-03 +2024-08-06 13:25:34,019 INFO [trainer.py:765] (3/8) Epoch 18, batch 500, train_loss[loss=2.681, ArTop10Accuracy=0.7939, over 12159.00 frames. ], tot_loss[loss=2.716, ArTop10Accuracy=0.7859, over 10841.49 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 13:27:00,639 INFO [trainer.py:765] (3/8) Epoch 18, batch 600, train_loss[loss=2.633, ArTop10Accuracy=0.8064, over 11547.00 frames. ], tot_loss[loss=2.721, ArTop10Accuracy=0.785, over 11359.83 frames. ], batch size: 18, lr: 6.71e-03 +2024-08-06 13:28:33,588 INFO [trainer.py:765] (3/8) Epoch 18, batch 700, train_loss[loss=2.561, ArTop10Accuracy=0.814, over 10119.00 frames. ], tot_loss[loss=2.721, ArTop10Accuracy=0.7849, over 11503.18 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 13:29:54,991 INFO [trainer.py:765] (3/8) Epoch 18, batch 800, train_loss[loss=2.603, ArTop10Accuracy=0.8063, over 10134.00 frames. ], tot_loss[loss=2.725, ArTop10Accuracy=0.7842, over 11650.29 frames. ], batch size: 12, lr: 6.68e-03 +2024-08-06 13:31:12,525 INFO [trainer.py:765] (3/8) Epoch 18, batch 900, train_loss[loss=2.859, ArTop10Accuracy=0.7596, over 12912.00 frames. ], tot_loss[loss=2.721, ArTop10Accuracy=0.7852, over 11680.26 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 13:32:26,557 INFO [trainer.py:765] (3/8) Epoch 18, batch 1000, train_loss[loss=2.705, ArTop10Accuracy=0.7892, over 13044.00 frames. ], tot_loss[loss=2.724, ArTop10Accuracy=0.7844, over 11875.32 frames. ], batch size: 27, lr: 6.66e-03 +2024-08-06 13:33:41,503 INFO [trainer.py:765] (3/8) Epoch 18, batch 1100, train_loss[loss=2.799, ArTop10Accuracy=0.7727, over 13740.00 frames. ], tot_loss[loss=2.733, ArTop10Accuracy=0.7828, over 11949.67 frames. ], batch size: 34, lr: 6.64e-03 +2024-08-06 13:34:54,679 INFO [trainer.py:765] (3/8) Epoch 18, batch 1200, train_loss[loss=2.883, ArTop10Accuracy=0.7567, over 12735.00 frames. ], tot_loss[loss=2.738, ArTop10Accuracy=0.7818, over 11873.72 frames. ], batch size: 101, lr: 6.63e-03 +2024-08-06 13:35:51,070 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.124e+02 1.340e+02 1.433e+02 1.533e+02 2.444e+02, threshold=2.867e+02, percent-clipped=0.0 +2024-08-06 13:35:54,614 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 13:37:48,631 INFO [trainer.py:765] (3/8) Epoch 19, batch 100, train_loss[loss=2.814, ArTop10Accuracy=0.7616, over 14505.00 frames. ], tot_loss[loss=2.714, ArTop10Accuracy=0.786, over 4772.08 frames. ], batch size: 62, lr: 6.43e-03 +2024-08-06 13:39:23,263 INFO [trainer.py:765] (3/8) Epoch 19, batch 200, train_loss[loss=2.737, ArTop10Accuracy=0.7761, over 13692.00 frames. ], tot_loss[loss=2.714, ArTop10Accuracy=0.786, over 7752.10 frames. ], batch size: 34, lr: 6.41e-03 +2024-08-06 13:40:48,365 INFO [trainer.py:765] (3/8) Epoch 19, batch 300, train_loss[loss=2.774, ArTop10Accuracy=0.7773, over 13896.00 frames. ], tot_loss[loss=2.71, ArTop10Accuracy=0.7867, over 9365.27 frames. ], batch size: 44, lr: 6.40e-03 +2024-08-06 13:42:21,074 INFO [trainer.py:765] (3/8) Epoch 19, batch 400, train_loss[loss=2.689, ArTop10Accuracy=0.7877, over 10443.00 frames. ], tot_loss[loss=2.705, ArTop10Accuracy=0.7878, over 10266.11 frames. ], batch size: 14, lr: 6.39e-03 +2024-08-06 13:43:44,962 INFO [trainer.py:765] (3/8) Epoch 19, batch 500, train_loss[loss=2.773, ArTop10Accuracy=0.7813, over 12192.00 frames. ], tot_loss[loss=2.707, ArTop10Accuracy=0.7875, over 10846.68 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 13:45:16,688 INFO [trainer.py:765] (3/8) Epoch 19, batch 600, train_loss[loss=2.611, ArTop10Accuracy=0.8074, over 11343.00 frames. ], tot_loss[loss=2.71, ArTop10Accuracy=0.7871, over 11365.54 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 13:46:48,330 INFO [trainer.py:765] (3/8) Epoch 19, batch 700, train_loss[loss=2.617, ArTop10Accuracy=0.8064, over 9999.00 frames. ], tot_loss[loss=2.714, ArTop10Accuracy=0.7864, over 11505.08 frames. ], batch size: 12, lr: 6.35e-03 +2024-08-06 13:48:11,890 INFO [trainer.py:765] (3/8) Epoch 19, batch 800, train_loss[loss=2.594, ArTop10Accuracy=0.8097, over 9999.00 frames. ], tot_loss[loss=2.72, ArTop10Accuracy=0.7854, over 11627.45 frames. ], batch size: 12, lr: 6.34e-03 +2024-08-06 13:49:27,265 INFO [trainer.py:765] (3/8) Epoch 19, batch 900, train_loss[loss=2.669, ArTop10Accuracy=0.7927, over 13359.00 frames. ], tot_loss[loss=2.714, ArTop10Accuracy=0.7863, over 11681.49 frames. ], batch size: 28, lr: 6.32e-03 +2024-08-06 13:50:40,660 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 13:50:50,537 INFO [trainer.py:811] (3/8) Epoch 19, validation: loss=2.818, ArTop10Accuracy=0.7679, over 1827537.00 frames. +2024-08-06 13:50:50,537 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33011MB +2024-08-06 13:50:51,496 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.371e+02 1.455e+02 1.550e+02 3.697e+02, threshold=2.909e+02, percent-clipped=0.2 +2024-08-06 13:50:52,922 INFO [trainer.py:765] (3/8) Epoch 19, batch 1000, train_loss[loss=2.747, ArTop10Accuracy=0.782, over 12804.00 frames. ], tot_loss[loss=2.721, ArTop10Accuracy=0.7851, over 11885.30 frames. ], batch size: 27, lr: 6.31e-03 +2024-08-06 13:52:08,271 INFO [trainer.py:765] (3/8) Epoch 19, batch 1100, train_loss[loss=2.752, ArTop10Accuracy=0.7856, over 13539.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7839, over 11950.86 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 13:53:22,318 INFO [trainer.py:765] (3/8) Epoch 19, batch 1200, train_loss[loss=2.822, ArTop10Accuracy=0.7666, over 12456.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7836, over 11872.46 frames. ], batch size: 101, lr: 6.28e-03 +2024-08-06 13:54:21,909 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 13:56:12,911 INFO [trainer.py:765] (3/8) Epoch 20, batch 100, train_loss[loss=2.75, ArTop10Accuracy=0.7818, over 14421.00 frames. ], tot_loss[loss=2.709, ArTop10Accuracy=0.7869, over 4732.71 frames. ], batch size: 62, lr: 6.10e-03 +2024-08-06 13:57:42,502 INFO [trainer.py:765] (3/8) Epoch 20, batch 200, train_loss[loss=2.767, ArTop10Accuracy=0.7772, over 13650.00 frames. ], tot_loss[loss=2.704, ArTop10Accuracy=0.788, over 7747.52 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 13:59:15,437 INFO [trainer.py:765] (3/8) Epoch 20, batch 300, train_loss[loss=2.739, ArTop10Accuracy=0.7793, over 14226.00 frames. ], tot_loss[loss=2.697, ArTop10Accuracy=0.7894, over 9362.38 frames. ], batch size: 44, lr: 6.08e-03 +2024-08-06 14:00:44,363 INFO [trainer.py:765] (3/8) Epoch 20, batch 400, train_loss[loss=2.645, ArTop10Accuracy=0.7971, over 10269.00 frames. ], tot_loss[loss=2.695, ArTop10Accuracy=0.7897, over 10275.36 frames. ], batch size: 14, lr: 6.07e-03 +2024-08-06 14:02:14,860 INFO [trainer.py:765] (3/8) Epoch 20, batch 500, train_loss[loss=2.717, ArTop10Accuracy=0.786, over 12165.00 frames. ], tot_loss[loss=2.693, ArTop10Accuracy=0.7903, over 10821.64 frames. ], batch size: 22, lr: 6.06e-03 +2024-08-06 14:03:40,861 INFO [trainer.py:765] (3/8) Epoch 20, batch 600, train_loss[loss=2.75, ArTop10Accuracy=0.78, over 11436.00 frames. ], tot_loss[loss=2.695, ArTop10Accuracy=0.7902, over 11359.02 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 14:05:13,871 INFO [trainer.py:765] (3/8) Epoch 20, batch 700, train_loss[loss=2.699, ArTop10Accuracy=0.7882, over 9402.00 frames. ], tot_loss[loss=2.702, ArTop10Accuracy=0.7887, over 11513.76 frames. ], batch size: 11, lr: 6.03e-03 +2024-08-06 14:05:30,797 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.365e+02 1.456e+02 1.550e+02 3.525e+02, threshold=2.913e+02, percent-clipped=0.1 +2024-08-06 14:06:34,515 INFO [trainer.py:765] (3/8) Epoch 20, batch 800, train_loss[loss=2.575, ArTop10Accuracy=0.8192, over 10212.00 frames. ], tot_loss[loss=2.706, ArTop10Accuracy=0.7877, over 11662.80 frames. ], batch size: 12, lr: 6.02e-03 +2024-08-06 14:07:50,950 INFO [trainer.py:765] (3/8) Epoch 20, batch 900, train_loss[loss=2.687, ArTop10Accuracy=0.7923, over 12840.00 frames. ], tot_loss[loss=2.703, ArTop10Accuracy=0.7885, over 11704.16 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 14:09:07,179 INFO [trainer.py:765] (3/8) Epoch 20, batch 1000, train_loss[loss=2.767, ArTop10Accuracy=0.776, over 13011.00 frames. ], tot_loss[loss=2.705, ArTop10Accuracy=0.7881, over 11884.06 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 14:10:21,215 INFO [trainer.py:765] (3/8) Epoch 20, batch 1100, train_loss[loss=2.713, ArTop10Accuracy=0.7884, over 13860.00 frames. ], tot_loss[loss=2.712, ArTop10Accuracy=0.7867, over 11958.77 frames. ], batch size: 34, lr: 5.99e-03 +2024-08-06 14:11:37,819 INFO [trainer.py:765] (3/8) Epoch 20, batch 1200, train_loss[loss=2.817, ArTop10Accuracy=0.7719, over 11877.00 frames. ], tot_loss[loss=2.713, ArTop10Accuracy=0.7866, over 11870.45 frames. ], batch size: 101, lr: 5.98e-03 +2024-08-06 14:12:36,807 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 14:12:36,810 INFO [trainer.py:1069] (3/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-08-06-14-4 b/libritts-r/log/log-train-2024-08-06-08-06-14-4 new file mode 100644 index 0000000000000000000000000000000000000000..c1e300bf6aeb895684a231638ad8ffb2e87c000b --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-06-14-4 @@ -0,0 +1,336 @@ +2024-08-06 08:06:14,318 INFO [trainer.py:870] (4/8) Training started +2024-08-06 08:06:14,319 INFO [trainer.py:889] (4/8) Device: cuda:4 +2024-08-06 08:06:14,319 INFO [trainer.py:890] (4/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:06:14,319 INFO [trainer.py:892] (4/8) About to create model +2024-08-06 08:06:15,010 INFO [trainer.py:899] (4/8) Number of model parameters: 367386628 +2024-08-06 08:06:16,222 INFO [trainer.py:914] (4/8) Using DDP +2024-08-06 08:06:19,151 INFO [datamodule.py:427] (4/8) About to get train cuts +2024-08-06 08:06:19,153 INFO [datamodule.py:434] (4/8) About to get dev cuts +2024-08-06 08:06:19,155 INFO [datamodule.py:292] (4/8) Disable SpecAugment +2024-08-06 08:06:19,155 INFO [datamodule.py:294] (4/8) About to create train dataset +2024-08-06 08:06:19,155 INFO [datamodule.py:323] (4/8) Using DynamicBucketingSampler +2024-08-06 08:06:19,763 INFO [datamodule.py:344] (4/8) About to create train dataloader +2024-08-06 08:06:19,763 INFO [datamodule.py:367] (4/8) About to create dev dataset +2024-08-06 08:06:20,087 INFO [datamodule.py:388] (4/8) About to create dev dataloader +2024-08-06 08:08:02,120 INFO [trainer.py:765] (4/8) Epoch 1, batch 100, train_loss[loss=4.335, ArTop10Accuracy=0.4992, over 14349.00 frames. ], tot_loss[loss=5.058, ArTop10Accuracy=0.3727, over 4771.89 frames. ], batch size: 62, lr: 2.25e-02 +2024-08-06 08:09:28,827 INFO [trainer.py:765] (4/8) Epoch 1, batch 200, train_loss[loss=4.111, ArTop10Accuracy=0.5308, over 13680.00 frames. ], tot_loss[loss=4.496, ArTop10Accuracy=0.4669, over 7746.35 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 08:10:52,430 INFO [trainer.py:765] (4/8) Epoch 1, batch 300, train_loss[loss=3.881, ArTop10Accuracy=0.5686, over 14085.00 frames. ], tot_loss[loss=4.218, ArTop10Accuracy=0.5129, over 9372.51 frames. ], batch size: 44, lr: 3.00e-02 +2024-08-06 08:12:12,698 INFO [trainer.py:765] (4/8) Epoch 1, batch 400, train_loss[loss=3.738, ArTop10Accuracy=0.6, over 10314.00 frames. ], tot_loss[loss=4.027, ArTop10Accuracy=0.5454, over 10297.26 frames. ], batch size: 14, lr: 3.00e-02 +2024-08-06 08:13:40,049 INFO [trainer.py:765] (4/8) Epoch 1, batch 500, train_loss[loss=3.696, ArTop10Accuracy=0.6047, over 12063.00 frames. ], tot_loss[loss=3.883, ArTop10Accuracy=0.5703, over 10855.34 frames. ], batch size: 22, lr: 2.99e-02 +2024-08-06 08:15:00,242 INFO [trainer.py:765] (4/8) Epoch 1, batch 600, train_loss[loss=3.559, ArTop10Accuracy=0.6271, over 11481.00 frames. ], tot_loss[loss=3.773, ArTop10Accuracy=0.5898, over 11365.23 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 08:16:26,424 INFO [trainer.py:765] (4/8) Epoch 1, batch 700, train_loss[loss=3.57, ArTop10Accuracy=0.6244, over 10320.00 frames. ], tot_loss[loss=3.695, ArTop10Accuracy=0.6037, over 11513.99 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 08:17:43,017 INFO [trainer.py:765] (4/8) Epoch 1, batch 800, train_loss[loss=3.429, ArTop10Accuracy=0.6523, over 9978.00 frames. ], tot_loss[loss=3.627, ArTop10Accuracy=0.6163, over 11645.81 frames. ], batch size: 12, lr: 2.98e-02 +2024-08-06 08:18:56,150 INFO [trainer.py:765] (4/8) Epoch 1, batch 900, train_loss[loss=3.458, ArTop10Accuracy=0.6464, over 12951.00 frames. ], tot_loss[loss=3.567, ArTop10Accuracy=0.6273, over 11687.10 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 08:20:12,862 INFO [trainer.py:765] (4/8) Epoch 1, batch 1000, train_loss[loss=3.476, ArTop10Accuracy=0.6408, over 13488.00 frames. ], tot_loss[loss=3.524, ArTop10Accuracy=0.635, over 11889.35 frames. ], batch size: 28, lr: 2.97e-02 +2024-08-06 08:20:13,539 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 9.300e+01 1.871e+02 2.675e+02 4.030e+02 9.119e+03, threshold=5.351e+02, percent-clipped=0.0 +2024-08-06 08:21:29,154 INFO [trainer.py:765] (4/8) Epoch 1, batch 1100, train_loss[loss=3.469, ArTop10Accuracy=0.6412, over 13692.00 frames. ], tot_loss[loss=3.487, ArTop10Accuracy=0.6416, over 11959.37 frames. ], batch size: 34, lr: 2.96e-02 +2024-08-06 08:22:45,410 INFO [trainer.py:765] (4/8) Epoch 1, batch 1200, train_loss[loss=3.468, ArTop10Accuracy=0.6428, over 11691.00 frames. ], tot_loss[loss=3.456, ArTop10Accuracy=0.6475, over 11856.20 frames. ], batch size: 101, lr: 2.96e-02 +2024-08-06 08:23:45,262 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 08:25:36,236 INFO [trainer.py:765] (4/8) Epoch 2, batch 100, train_loss[loss=3.453, ArTop10Accuracy=0.6483, over 14559.00 frames. ], tot_loss[loss=3.419, ArTop10Accuracy=0.6533, over 4753.85 frames. ], batch size: 62, lr: 2.90e-02 +2024-08-06 08:26:58,955 INFO [trainer.py:765] (4/8) Epoch 2, batch 200, train_loss[loss=3.27, ArTop10Accuracy=0.6853, over 13752.00 frames. ], tot_loss[loss=3.384, ArTop10Accuracy=0.6604, over 7757.10 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 08:28:25,533 INFO [trainer.py:765] (4/8) Epoch 2, batch 300, train_loss[loss=3.402, ArTop10Accuracy=0.6578, over 14046.00 frames. ], tot_loss[loss=3.371, ArTop10Accuracy=0.6631, over 9382.03 frames. ], batch size: 44, lr: 2.89e-02 +2024-08-06 08:29:48,636 INFO [trainer.py:765] (4/8) Epoch 2, batch 400, train_loss[loss=3.355, ArTop10Accuracy=0.6619, over 10944.00 frames. ], tot_loss[loss=3.358, ArTop10Accuracy=0.6657, over 10312.86 frames. ], batch size: 15, lr: 2.88e-02 +2024-08-06 08:31:22,902 INFO [trainer.py:765] (4/8) Epoch 2, batch 500, train_loss[loss=3.212, ArTop10Accuracy=0.6956, over 12171.00 frames. ], tot_loss[loss=3.339, ArTop10Accuracy=0.6692, over 10869.97 frames. ], batch size: 22, lr: 2.87e-02 +2024-08-06 08:32:45,688 INFO [trainer.py:765] (4/8) Epoch 2, batch 600, train_loss[loss=3.308, ArTop10Accuracy=0.6743, over 11418.00 frames. ], tot_loss[loss=3.329, ArTop10Accuracy=0.671, over 11384.57 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 08:34:13,582 INFO [trainer.py:765] (4/8) Epoch 2, batch 700, train_loss[loss=3.313, ArTop10Accuracy=0.6793, over 9951.00 frames. ], tot_loss[loss=3.325, ArTop10Accuracy=0.6719, over 11534.00 frames. ], batch size: 12, lr: 2.85e-02 +2024-08-06 08:34:31,175 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 08:34:40,888 INFO [trainer.py:811] (4/8) Epoch 2, validation: loss=3.277, ArTop10Accuracy=0.6803, over 1827537.00 frames. +2024-08-06 08:34:40,889 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28695MB +2024-08-06 08:34:41,699 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 7.953e+01 1.592e+02 2.200e+02 3.344e+02 2.949e+03, threshold=4.400e+02, percent-clipped=8.6 +2024-08-06 08:35:39,878 INFO [trainer.py:765] (4/8) Epoch 2, batch 800, train_loss[loss=3.2, ArTop10Accuracy=0.6972, over 9570.00 frames. ], tot_loss[loss=3.318, ArTop10Accuracy=0.6735, over 11652.56 frames. ], batch size: 11, lr: 2.84e-02 +2024-08-06 08:36:56,371 INFO [trainer.py:765] (4/8) Epoch 2, batch 900, train_loss[loss=3.262, ArTop10Accuracy=0.6776, over 12861.00 frames. ], tot_loss[loss=3.305, ArTop10Accuracy=0.6758, over 11691.96 frames. ], batch size: 27, lr: 2.83e-02 +2024-08-06 08:38:10,511 INFO [trainer.py:765] (4/8) Epoch 2, batch 1000, train_loss[loss=3.307, ArTop10Accuracy=0.6773, over 13053.00 frames. ], tot_loss[loss=3.299, ArTop10Accuracy=0.677, over 11892.14 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 08:39:25,059 INFO [trainer.py:765] (4/8) Epoch 2, batch 1100, train_loss[loss=3.159, ArTop10Accuracy=0.7048, over 13839.00 frames. ], tot_loss[loss=3.292, ArTop10Accuracy=0.6781, over 11930.52 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 08:40:38,219 INFO [trainer.py:765] (4/8) Epoch 2, batch 1200, train_loss[loss=3.333, ArTop10Accuracy=0.6674, over 13452.00 frames. ], tot_loss[loss=3.283, ArTop10Accuracy=0.6799, over 11836.01 frames. ], batch size: 101, lr: 2.80e-02 +2024-08-06 08:41:38,601 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 08:43:36,649 INFO [trainer.py:765] (4/8) Epoch 3, batch 100, train_loss[loss=3.256, ArTop10Accuracy=0.6832, over 14394.00 frames. ], tot_loss[loss=3.244, ArTop10Accuracy=0.6866, over 4768.62 frames. ], batch size: 62, lr: 2.67e-02 +2024-08-06 08:45:10,500 INFO [trainer.py:765] (4/8) Epoch 3, batch 200, train_loss[loss=3.201, ArTop10Accuracy=0.695, over 13674.00 frames. ], tot_loss[loss=3.221, ArTop10Accuracy=0.6908, over 7764.36 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 08:46:29,258 INFO [trainer.py:765] (4/8) Epoch 3, batch 300, train_loss[loss=3.233, ArTop10Accuracy=0.6863, over 14310.00 frames. ], tot_loss[loss=3.207, ArTop10Accuracy=0.6938, over 9365.66 frames. ], batch size: 44, lr: 2.64e-02 +2024-08-06 08:48:04,219 INFO [trainer.py:765] (4/8) Epoch 3, batch 400, train_loss[loss=3.129, ArTop10Accuracy=0.7089, over 10473.00 frames. ], tot_loss[loss=3.192, ArTop10Accuracy=0.6969, over 10282.72 frames. ], batch size: 14, lr: 2.63e-02 +2024-08-06 08:48:40,881 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 9.282e+01 1.561e+02 1.981e+02 2.686e+02 1.768e+03, threshold=3.962e+02, percent-clipped=7.6 +2024-08-06 08:49:25,541 INFO [trainer.py:765] (4/8) Epoch 3, batch 500, train_loss[loss=3.17, ArTop10Accuracy=0.7073, over 12351.00 frames. ], tot_loss[loss=3.172, ArTop10Accuracy=0.7009, over 10852.39 frames. ], batch size: 22, lr: 2.62e-02 +2024-08-06 08:51:00,477 INFO [trainer.py:765] (4/8) Epoch 3, batch 600, train_loss[loss=3.056, ArTop10Accuracy=0.7223, over 11325.00 frames. ], tot_loss[loss=3.16, ArTop10Accuracy=0.7028, over 11381.83 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 08:52:31,618 INFO [trainer.py:765] (4/8) Epoch 3, batch 700, train_loss[loss=3.058, ArTop10Accuracy=0.7241, over 10176.00 frames. ], tot_loss[loss=3.143, ArTop10Accuracy=0.7062, over 11521.06 frames. ], batch size: 12, lr: 2.60e-02 +2024-08-06 08:53:57,389 INFO [trainer.py:765] (4/8) Epoch 3, batch 800, train_loss[loss=3.078, ArTop10Accuracy=0.7212, over 9276.00 frames. ], tot_loss[loss=3.137, ArTop10Accuracy=0.7072, over 11622.44 frames. ], batch size: 11, lr: 2.59e-02 +2024-08-06 08:55:15,119 INFO [trainer.py:765] (4/8) Epoch 3, batch 900, train_loss[loss=3.061, ArTop10Accuracy=0.7179, over 13047.00 frames. ], tot_loss[loss=3.123, ArTop10Accuracy=0.7099, over 11666.34 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 08:56:31,557 INFO [trainer.py:765] (4/8) Epoch 3, batch 1000, train_loss[loss=3.183, ArTop10Accuracy=0.6972, over 12882.00 frames. ], tot_loss[loss=3.112, ArTop10Accuracy=0.7119, over 11867.25 frames. ], batch size: 27, lr: 2.56e-02 +2024-08-06 08:57:46,506 INFO [trainer.py:765] (4/8) Epoch 3, batch 1100, train_loss[loss=2.998, ArTop10Accuracy=0.7333, over 13554.00 frames. ], tot_loss[loss=3.105, ArTop10Accuracy=0.7132, over 11926.75 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 08:59:01,399 INFO [trainer.py:765] (4/8) Epoch 3, batch 1200, train_loss[loss=3.151, ArTop10Accuracy=0.7024, over 13326.00 frames. ], tot_loss[loss=3.097, ArTop10Accuracy=0.7145, over 11854.55 frames. ], batch size: 101, lr: 2.54e-02 +2024-08-06 09:00:01,980 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 09:01:50,742 INFO [trainer.py:765] (4/8) Epoch 4, batch 100, train_loss[loss=3.127, ArTop10Accuracy=0.7081, over 14670.00 frames. ], tot_loss[loss=3.065, ArTop10Accuracy=0.7201, over 4761.72 frames. ], batch size: 64, lr: 2.38e-02 +2024-08-06 09:02:52,859 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 09:03:02,384 INFO [trainer.py:811] (4/8) Epoch 4, validation: loss=2.997, ArTop10Accuracy=0.7338, over 1827537.00 frames. +2024-08-06 09:03:02,385 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29513MB +2024-08-06 09:03:03,364 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.499e+02 1.782e+02 2.273e+02 1.100e+03, threshold=3.565e+02, percent-clipped=4.7 +2024-08-06 09:03:29,273 INFO [trainer.py:765] (4/8) Epoch 4, batch 200, train_loss[loss=3.069, ArTop10Accuracy=0.718, over 13527.00 frames. ], tot_loss[loss=3.041, ArTop10Accuracy=0.7249, over 7754.25 frames. ], batch size: 34, lr: 2.37e-02 +2024-08-06 09:05:01,733 INFO [trainer.py:765] (4/8) Epoch 4, batch 300, train_loss[loss=3.133, ArTop10Accuracy=0.7066, over 14562.00 frames. ], tot_loss[loss=3.038, ArTop10Accuracy=0.7259, over 9353.08 frames. ], batch size: 45, lr: 2.36e-02 +2024-08-06 09:06:28,151 INFO [trainer.py:765] (4/8) Epoch 4, batch 400, train_loss[loss=2.94, ArTop10Accuracy=0.7486, over 10116.00 frames. ], tot_loss[loss=3.034, ArTop10Accuracy=0.7265, over 10275.79 frames. ], batch size: 14, lr: 2.34e-02 +2024-08-06 09:08:01,925 INFO [trainer.py:765] (4/8) Epoch 4, batch 500, train_loss[loss=3.045, ArTop10Accuracy=0.7286, over 12501.00 frames. ], tot_loss[loss=3.029, ArTop10Accuracy=0.7272, over 10828.04 frames. ], batch size: 23, lr: 2.33e-02 +2024-08-06 09:09:28,540 INFO [trainer.py:765] (4/8) Epoch 4, batch 600, train_loss[loss=2.955, ArTop10Accuracy=0.7423, over 11589.00 frames. ], tot_loss[loss=3.024, ArTop10Accuracy=0.7284, over 11374.57 frames. ], batch size: 18, lr: 2.32e-02 +2024-08-06 09:10:59,865 INFO [trainer.py:765] (4/8) Epoch 4, batch 700, train_loss[loss=3.009, ArTop10Accuracy=0.7394, over 10125.00 frames. ], tot_loss[loss=3.026, ArTop10Accuracy=0.7277, over 11515.59 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 09:12:17,513 INFO [trainer.py:765] (4/8) Epoch 4, batch 800, train_loss[loss=2.969, ArTop10Accuracy=0.7425, over 9366.00 frames. ], tot_loss[loss=3.022, ArTop10Accuracy=0.7288, over 11640.09 frames. ], batch size: 11, lr: 2.30e-02 +2024-08-06 09:13:33,212 INFO [trainer.py:765] (4/8) Epoch 4, batch 900, train_loss[loss=2.992, ArTop10Accuracy=0.7306, over 12924.00 frames. ], tot_loss[loss=3.012, ArTop10Accuracy=0.7308, over 11686.06 frames. ], batch size: 27, lr: 2.29e-02 +2024-08-06 09:14:47,520 INFO [trainer.py:765] (4/8) Epoch 4, batch 1000, train_loss[loss=2.935, ArTop10Accuracy=0.7421, over 12690.00 frames. ], tot_loss[loss=3.011, ArTop10Accuracy=0.7308, over 11873.84 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 09:16:02,982 INFO [trainer.py:765] (4/8) Epoch 4, batch 1100, train_loss[loss=2.965, ArTop10Accuracy=0.741, over 13602.00 frames. ], tot_loss[loss=3.011, ArTop10Accuracy=0.7308, over 11940.21 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 09:16:53,291 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.440e+02 1.636e+02 1.968e+02 7.702e+02, threshold=3.273e+02, percent-clipped=1.3 +2024-08-06 09:17:18,344 INFO [trainer.py:765] (4/8) Epoch 4, batch 1200, train_loss[loss=3.053, ArTop10Accuracy=0.7237, over 12633.00 frames. ], tot_loss[loss=3.01, ArTop10Accuracy=0.7309, over 11854.12 frames. ], batch size: 101, lr: 2.25e-02 +2024-08-06 09:18:17,349 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 09:20:17,170 INFO [trainer.py:765] (4/8) Epoch 5, batch 100, train_loss[loss=3.024, ArTop10Accuracy=0.725, over 14337.00 frames. ], tot_loss[loss=2.997, ArTop10Accuracy=0.7326, over 4765.65 frames. ], batch size: 62, lr: 2.10e-02 +2024-08-06 09:21:52,295 INFO [trainer.py:765] (4/8) Epoch 5, batch 200, train_loss[loss=2.968, ArTop10Accuracy=0.7412, over 13647.00 frames. ], tot_loss[loss=2.972, ArTop10Accuracy=0.7375, over 7764.93 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 09:23:19,240 INFO [trainer.py:765] (4/8) Epoch 5, batch 300, train_loss[loss=3.021, ArTop10Accuracy=0.7283, over 14367.00 frames. ], tot_loss[loss=2.966, ArTop10Accuracy=0.7392, over 9381.01 frames. ], batch size: 45, lr: 2.08e-02 +2024-08-06 09:24:53,536 INFO [trainer.py:765] (4/8) Epoch 5, batch 400, train_loss[loss=2.943, ArTop10Accuracy=0.7394, over 10296.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.74, over 10297.28 frames. ], batch size: 14, lr: 2.07e-02 +2024-08-06 09:26:19,417 INFO [trainer.py:765] (4/8) Epoch 5, batch 500, train_loss[loss=2.9, ArTop10Accuracy=0.7498, over 12066.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7404, over 10856.08 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 09:27:49,536 INFO [trainer.py:765] (4/8) Epoch 5, batch 600, train_loss[loss=3.014, ArTop10Accuracy=0.7307, over 11511.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.7401, over 11385.83 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 09:29:21,669 INFO [trainer.py:765] (4/8) Epoch 5, batch 700, train_loss[loss=2.976, ArTop10Accuracy=0.733, over 9171.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7393, over 11512.86 frames. ], batch size: 11, lr: 2.04e-02 +2024-08-06 09:30:44,692 INFO [trainer.py:765] (4/8) Epoch 5, batch 800, train_loss[loss=2.901, ArTop10Accuracy=0.7489, over 10170.00 frames. ], tot_loss[loss=2.971, ArTop10Accuracy=0.7385, over 11642.97 frames. ], batch size: 12, lr: 2.03e-02 +2024-08-06 09:31:51,238 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 09:32:00,762 INFO [trainer.py:811] (4/8) Epoch 5, validation: loss=2.926, ArTop10Accuracy=0.7466, over 1827537.00 frames. +2024-08-06 09:32:00,763 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 32729MB +2024-08-06 09:32:01,708 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.060e+02 1.349e+02 1.525e+02 1.806e+02 1.007e+03, threshold=3.049e+02, percent-clipped=2.3 +2024-08-06 09:32:10,553 INFO [trainer.py:765] (4/8) Epoch 5, batch 900, train_loss[loss=2.998, ArTop10Accuracy=0.7307, over 12870.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7404, over 11683.19 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 09:33:27,322 INFO [trainer.py:765] (4/8) Epoch 5, batch 1000, train_loss[loss=2.89, ArTop10Accuracy=0.7553, over 12822.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.7399, over 11878.40 frames. ], batch size: 27, lr: 2.01e-02 +2024-08-06 09:34:42,299 INFO [trainer.py:765] (4/8) Epoch 5, batch 1100, train_loss[loss=2.96, ArTop10Accuracy=0.7399, over 13461.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7402, over 11949.48 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 09:35:56,331 INFO [trainer.py:765] (4/8) Epoch 5, batch 1200, train_loss[loss=3.116, ArTop10Accuracy=0.7083, over 11610.00 frames. ], tot_loss[loss=2.957, ArTop10Accuracy=0.7409, over 11851.67 frames. ], batch size: 101, lr: 1.99e-02 +2024-08-06 09:36:55,326 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 09:38:52,664 INFO [trainer.py:765] (4/8) Epoch 6, batch 100, train_loss[loss=3.011, ArTop10Accuracy=0.7297, over 14652.00 frames. ], tot_loss[loss=2.953, ArTop10Accuracy=0.7414, over 4763.22 frames. ], batch size: 63, lr: 1.85e-02 +2024-08-06 09:40:19,833 INFO [trainer.py:765] (4/8) Epoch 6, batch 200, train_loss[loss=2.924, ArTop10Accuracy=0.749, over 13821.00 frames. ], tot_loss[loss=2.935, ArTop10Accuracy=0.7448, over 7747.45 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 09:41:52,964 INFO [trainer.py:765] (4/8) Epoch 6, batch 300, train_loss[loss=2.895, ArTop10Accuracy=0.7482, over 14133.00 frames. ], tot_loss[loss=2.931, ArTop10Accuracy=0.7456, over 9381.03 frames. ], batch size: 44, lr: 1.83e-02 +2024-08-06 09:43:17,827 INFO [trainer.py:765] (4/8) Epoch 6, batch 400, train_loss[loss=2.934, ArTop10Accuracy=0.7428, over 10410.00 frames. ], tot_loss[loss=2.927, ArTop10Accuracy=0.7465, over 10294.73 frames. ], batch size: 14, lr: 1.83e-02 +2024-08-06 09:44:54,127 INFO [trainer.py:765] (4/8) Epoch 6, batch 500, train_loss[loss=2.914, ArTop10Accuracy=0.7514, over 12327.00 frames. ], tot_loss[loss=2.916, ArTop10Accuracy=0.7488, over 10858.88 frames. ], batch size: 22, lr: 1.82e-02 +2024-08-06 09:46:22,872 INFO [trainer.py:765] (4/8) Epoch 6, batch 600, train_loss[loss=2.957, ArTop10Accuracy=0.7467, over 11445.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7477, over 11366.12 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 09:46:37,219 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.339e+02 1.480e+02 1.701e+02 7.506e+02, threshold=2.959e+02, percent-clipped=1.1 +2024-08-06 09:47:57,869 INFO [trainer.py:765] (4/8) Epoch 6, batch 700, train_loss[loss=2.881, ArTop10Accuracy=0.7528, over 10191.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.7467, over 11534.85 frames. ], batch size: 12, lr: 1.80e-02 +2024-08-06 09:49:15,954 INFO [trainer.py:765] (4/8) Epoch 6, batch 800, train_loss[loss=2.915, ArTop10Accuracy=0.7503, over 9345.00 frames. ], tot_loss[loss=2.927, ArTop10Accuracy=0.7465, over 11644.71 frames. ], batch size: 11, lr: 1.79e-02 +2024-08-06 09:50:32,134 INFO [trainer.py:765] (4/8) Epoch 6, batch 900, train_loss[loss=2.904, ArTop10Accuracy=0.7514, over 13062.00 frames. ], tot_loss[loss=2.923, ArTop10Accuracy=0.7472, over 11680.49 frames. ], batch size: 27, lr: 1.78e-02 +2024-08-06 09:51:47,298 INFO [trainer.py:765] (4/8) Epoch 6, batch 1000, train_loss[loss=2.913, ArTop10Accuracy=0.752, over 12957.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.747, over 11873.10 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 09:53:00,920 INFO [trainer.py:765] (4/8) Epoch 6, batch 1100, train_loss[loss=2.91, ArTop10Accuracy=0.7514, over 13686.00 frames. ], tot_loss[loss=2.931, ArTop10Accuracy=0.7458, over 11944.91 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 09:54:14,336 INFO [trainer.py:765] (4/8) Epoch 6, batch 1200, train_loss[loss=3.056, ArTop10Accuracy=0.7208, over 12609.00 frames. ], tot_loss[loss=2.93, ArTop10Accuracy=0.7461, over 11868.14 frames. ], batch size: 101, lr: 1.76e-02 +2024-08-06 09:55:13,263 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 09:57:06,699 INFO [trainer.py:765] (4/8) Epoch 7, batch 100, train_loss[loss=2.98, ArTop10Accuracy=0.7353, over 14310.00 frames. ], tot_loss[loss=2.918, ArTop10Accuracy=0.748, over 4748.26 frames. ], batch size: 62, lr: 1.64e-02 +2024-08-06 09:58:39,426 INFO [trainer.py:765] (4/8) Epoch 7, batch 200, train_loss[loss=2.916, ArTop10Accuracy=0.7468, over 13752.00 frames. ], tot_loss[loss=2.906, ArTop10Accuracy=0.7504, over 7746.19 frames. ], batch size: 34, lr: 1.64e-02 +2024-08-06 10:00:06,083 INFO [trainer.py:765] (4/8) Epoch 7, batch 300, train_loss[loss=2.979, ArTop10Accuracy=0.7354, over 13800.00 frames. ], tot_loss[loss=2.898, ArTop10Accuracy=0.7517, over 9374.86 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 10:00:40,509 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 10:00:50,245 INFO [trainer.py:811] (4/8) Epoch 7, validation: loss=2.88, ArTop10Accuracy=0.7554, over 1827537.00 frames. +2024-08-06 10:00:50,246 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 32729MB +2024-08-06 10:00:50,977 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.002e+02 1.286e+02 1.429e+02 1.605e+02 1.020e+03, threshold=2.857e+02, percent-clipped=1.5 +2024-08-06 10:01:49,118 INFO [trainer.py:765] (4/8) Epoch 7, batch 400, train_loss[loss=2.852, ArTop10Accuracy=0.765, over 10215.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7524, over 10288.13 frames. ], batch size: 14, lr: 1.62e-02 +2024-08-06 10:03:21,457 INFO [trainer.py:765] (4/8) Epoch 7, batch 500, train_loss[loss=2.888, ArTop10Accuracy=0.7599, over 12327.00 frames. ], tot_loss[loss=2.891, ArTop10Accuracy=0.7534, over 10853.35 frames. ], batch size: 22, lr: 1.61e-02 +2024-08-06 10:04:51,882 INFO [trainer.py:765] (4/8) Epoch 7, batch 600, train_loss[loss=2.909, ArTop10Accuracy=0.7526, over 11847.00 frames. ], tot_loss[loss=2.895, ArTop10Accuracy=0.7529, over 11343.07 frames. ], batch size: 19, lr: 1.61e-02 +2024-08-06 10:06:25,112 INFO [trainer.py:765] (4/8) Epoch 7, batch 700, train_loss[loss=2.939, ArTop10Accuracy=0.7475, over 9381.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7526, over 11502.86 frames. ], batch size: 11, lr: 1.60e-02 +2024-08-06 10:07:46,948 INFO [trainer.py:765] (4/8) Epoch 7, batch 800, train_loss[loss=2.904, ArTop10Accuracy=0.7507, over 10071.00 frames. ], tot_loss[loss=2.902, ArTop10Accuracy=0.7514, over 11636.93 frames. ], batch size: 12, lr: 1.59e-02 +2024-08-06 10:09:02,824 INFO [trainer.py:765] (4/8) Epoch 7, batch 900, train_loss[loss=2.835, ArTop10Accuracy=0.7591, over 12993.00 frames. ], tot_loss[loss=2.893, ArTop10Accuracy=0.753, over 11682.69 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 10:10:19,636 INFO [trainer.py:765] (4/8) Epoch 7, batch 1000, train_loss[loss=2.856, ArTop10Accuracy=0.7663, over 12762.00 frames. ], tot_loss[loss=2.898, ArTop10Accuracy=0.7523, over 11896.36 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 10:11:35,208 INFO [trainer.py:765] (4/8) Epoch 7, batch 1100, train_loss[loss=2.936, ArTop10Accuracy=0.7445, over 13755.00 frames. ], tot_loss[loss=2.902, ArTop10Accuracy=0.7512, over 11966.74 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 10:12:48,204 INFO [trainer.py:765] (4/8) Epoch 7, batch 1200, train_loss[loss=3.002, ArTop10Accuracy=0.7326, over 12930.00 frames. ], tot_loss[loss=2.898, ArTop10Accuracy=0.7519, over 11872.20 frames. ], batch size: 101, lr: 1.57e-02 +2024-08-06 10:13:46,750 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 10:15:03,600 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.017e+02 1.283e+02 1.410e+02 1.601e+02 1.017e+03, threshold=2.820e+02, percent-clipped=0.9 +2024-08-06 10:15:40,820 INFO [trainer.py:765] (4/8) Epoch 8, batch 100, train_loss[loss=3.008, ArTop10Accuracy=0.7324, over 14205.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.754, over 4763.79 frames. ], batch size: 62, lr: 1.47e-02 +2024-08-06 10:17:12,861 INFO [trainer.py:765] (4/8) Epoch 8, batch 200, train_loss[loss=2.874, ArTop10Accuracy=0.7629, over 13785.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.757, over 7763.58 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 10:18:37,897 INFO [trainer.py:765] (4/8) Epoch 8, batch 300, train_loss[loss=2.891, ArTop10Accuracy=0.7523, over 14205.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.7584, over 9375.13 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 10:20:06,341 INFO [trainer.py:765] (4/8) Epoch 8, batch 400, train_loss[loss=2.892, ArTop10Accuracy=0.751, over 10953.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.7583, over 10289.35 frames. ], batch size: 15, lr: 1.45e-02 +2024-08-06 10:21:32,410 INFO [trainer.py:765] (4/8) Epoch 8, batch 500, train_loss[loss=2.888, ArTop10Accuracy=0.7557, over 12642.00 frames. ], tot_loss[loss=2.859, ArTop10Accuracy=0.7591, over 10849.37 frames. ], batch size: 23, lr: 1.45e-02 +2024-08-06 10:23:00,973 INFO [trainer.py:765] (4/8) Epoch 8, batch 600, train_loss[loss=2.915, ArTop10Accuracy=0.7512, over 11388.00 frames. ], tot_loss[loss=2.862, ArTop10Accuracy=0.7587, over 11353.14 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 10:24:37,787 INFO [trainer.py:765] (4/8) Epoch 8, batch 700, train_loss[loss=2.855, ArTop10Accuracy=0.7624, over 10257.00 frames. ], tot_loss[loss=2.866, ArTop10Accuracy=0.7579, over 11516.65 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 10:25:56,086 INFO [trainer.py:765] (4/8) Epoch 8, batch 800, train_loss[loss=2.831, ArTop10Accuracy=0.7627, over 10239.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7568, over 11657.44 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 10:27:12,244 INFO [trainer.py:765] (4/8) Epoch 8, batch 900, train_loss[loss=2.981, ArTop10Accuracy=0.7387, over 13305.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.7582, over 11699.99 frames. ], batch size: 28, lr: 1.42e-02 +2024-08-06 10:28:25,262 INFO [trainer.py:765] (4/8) Epoch 8, batch 1000, train_loss[loss=2.895, ArTop10Accuracy=0.7544, over 13005.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7573, over 11892.93 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 10:29:07,154 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 10:29:16,831 INFO [trainer.py:811] (4/8) Epoch 8, validation: loss=2.858, ArTop10Accuracy=0.7594, over 1827537.00 frames. +2024-08-06 10:29:16,831 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 32729MB +2024-08-06 10:29:17,490 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.275e+02 1.390e+02 1.547e+02 3.717e+02, threshold=2.781e+02, percent-clipped=0.7 +2024-08-06 10:29:51,731 INFO [trainer.py:765] (4/8) Epoch 8, batch 1100, train_loss[loss=2.842, ArTop10Accuracy=0.762, over 13689.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7568, over 11939.37 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 10:31:05,947 INFO [trainer.py:765] (4/8) Epoch 8, batch 1200, train_loss[loss=2.955, ArTop10Accuracy=0.743, over 12402.00 frames. ], tot_loss[loss=2.875, ArTop10Accuracy=0.7565, over 11857.03 frames. ], batch size: 101, lr: 1.40e-02 +2024-08-06 10:32:05,791 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 10:34:01,255 INFO [trainer.py:765] (4/8) Epoch 9, batch 100, train_loss[loss=2.904, ArTop10Accuracy=0.7568, over 14307.00 frames. ], tot_loss[loss=2.863, ArTop10Accuracy=0.7586, over 4737.05 frames. ], batch size: 62, lr: 1.32e-02 +2024-08-06 10:35:31,771 INFO [trainer.py:765] (4/8) Epoch 9, batch 200, train_loss[loss=2.822, ArTop10Accuracy=0.7641, over 13818.00 frames. ], tot_loss[loss=2.855, ArTop10Accuracy=0.76, over 7743.59 frames. ], batch size: 35, lr: 1.32e-02 +2024-08-06 10:36:57,926 INFO [trainer.py:765] (4/8) Epoch 9, batch 300, train_loss[loss=2.909, ArTop10Accuracy=0.7482, over 13983.00 frames. ], tot_loss[loss=2.849, ArTop10Accuracy=0.7611, over 9372.82 frames. ], batch size: 44, lr: 1.31e-02 +2024-08-06 10:38:32,696 INFO [trainer.py:765] (4/8) Epoch 9, batch 400, train_loss[loss=2.76, ArTop10Accuracy=0.7829, over 10272.00 frames. ], tot_loss[loss=2.849, ArTop10Accuracy=0.7612, over 10289.62 frames. ], batch size: 14, lr: 1.31e-02 +2024-08-06 10:39:59,255 INFO [trainer.py:765] (4/8) Epoch 9, batch 500, train_loss[loss=2.807, ArTop10Accuracy=0.7688, over 12525.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.7624, over 10855.66 frames. ], batch size: 23, lr: 1.30e-02 +2024-08-06 10:41:29,689 INFO [trainer.py:765] (4/8) Epoch 9, batch 600, train_loss[loss=2.761, ArTop10Accuracy=0.7807, over 11481.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7619, over 11380.55 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 10:42:58,439 INFO [trainer.py:765] (4/8) Epoch 9, batch 700, train_loss[loss=2.828, ArTop10Accuracy=0.7617, over 10086.00 frames. ], tot_loss[loss=2.849, ArTop10Accuracy=0.7613, over 11524.13 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 10:44:02,952 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.039e+02 1.253e+02 1.352e+02 1.493e+02 7.010e+02, threshold=2.704e+02, percent-clipped=0.6 +2024-08-06 10:44:19,668 INFO [trainer.py:765] (4/8) Epoch 9, batch 800, train_loss[loss=2.768, ArTop10Accuracy=0.7773, over 10254.00 frames. ], tot_loss[loss=2.852, ArTop10Accuracy=0.7609, over 11649.42 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 10:45:35,718 INFO [trainer.py:765] (4/8) Epoch 9, batch 900, train_loss[loss=2.88, ArTop10Accuracy=0.753, over 13434.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7618, over 11670.68 frames. ], batch size: 28, lr: 1.28e-02 +2024-08-06 10:46:51,270 INFO [trainer.py:765] (4/8) Epoch 9, batch 1000, train_loss[loss=2.869, ArTop10Accuracy=0.7541, over 12966.00 frames. ], tot_loss[loss=2.85, ArTop10Accuracy=0.761, over 11876.51 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 10:48:06,246 INFO [trainer.py:765] (4/8) Epoch 9, batch 1100, train_loss[loss=2.955, ArTop10Accuracy=0.7404, over 13590.00 frames. ], tot_loss[loss=2.856, ArTop10Accuracy=0.7598, over 11951.07 frames. ], batch size: 34, lr: 1.28e-02 +2024-08-06 10:49:21,052 INFO [trainer.py:765] (4/8) Epoch 9, batch 1200, train_loss[loss=2.974, ArTop10Accuracy=0.7371, over 12891.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7605, over 11860.30 frames. ], batch size: 101, lr: 1.27e-02 +2024-08-06 10:50:22,648 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 10:52:12,326 INFO [trainer.py:765] (4/8) Epoch 10, batch 100, train_loss[loss=2.903, ArTop10Accuracy=0.7494, over 14361.00 frames. ], tot_loss[loss=2.84, ArTop10Accuracy=0.7629, over 4760.61 frames. ], batch size: 62, lr: 1.20e-02 +2024-08-06 10:53:44,585 INFO [trainer.py:765] (4/8) Epoch 10, batch 200, train_loss[loss=2.808, ArTop10Accuracy=0.7728, over 13860.00 frames. ], tot_loss[loss=2.832, ArTop10Accuracy=0.7645, over 7751.90 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 10:55:08,089 INFO [trainer.py:765] (4/8) Epoch 10, batch 300, train_loss[loss=2.899, ArTop10Accuracy=0.7537, over 14238.00 frames. ], tot_loss[loss=2.829, ArTop10Accuracy=0.765, over 9382.80 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 10:56:41,176 INFO [trainer.py:765] (4/8) Epoch 10, batch 400, train_loss[loss=2.607, ArTop10Accuracy=0.8052, over 10920.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.7657, over 10285.01 frames. ], batch size: 15, lr: 1.19e-02 +2024-08-06 10:58:04,937 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 10:58:14,559 INFO [trainer.py:811] (4/8) Epoch 10, validation: loss=2.842, ArTop10Accuracy=0.7624, over 1827537.00 frames. +2024-08-06 10:58:14,560 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 32729MB +2024-08-06 10:58:15,573 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.228e+02 1.320e+02 1.458e+02 6.096e+02, threshold=2.641e+02, percent-clipped=0.6 +2024-08-06 10:58:15,577 INFO [trainer.py:765] (4/8) Epoch 10, batch 500, train_loss[loss=2.741, ArTop10Accuracy=0.7833, over 12168.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7663, over 10832.02 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 10:59:42,814 INFO [trainer.py:765] (4/8) Epoch 10, batch 600, train_loss[loss=2.833, ArTop10Accuracy=0.7675, over 11478.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7663, over 11348.40 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 11:01:18,107 INFO [trainer.py:765] (4/8) Epoch 10, batch 700, train_loss[loss=2.833, ArTop10Accuracy=0.77, over 10155.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.7646, over 11499.12 frames. ], batch size: 12, lr: 1.18e-02 +2024-08-06 11:02:36,917 INFO [trainer.py:765] (4/8) Epoch 10, batch 800, train_loss[loss=2.736, ArTop10Accuracy=0.7802, over 9588.00 frames. ], tot_loss[loss=2.834, ArTop10Accuracy=0.764, over 11598.00 frames. ], batch size: 11, lr: 1.17e-02 +2024-08-06 11:03:51,211 INFO [trainer.py:765] (4/8) Epoch 10, batch 900, train_loss[loss=2.81, ArTop10Accuracy=0.7674, over 12879.00 frames. ], tot_loss[loss=2.829, ArTop10Accuracy=0.7651, over 11668.20 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 11:05:06,351 INFO [trainer.py:765] (4/8) Epoch 10, batch 1000, train_loss[loss=2.774, ArTop10Accuracy=0.7766, over 13230.00 frames. ], tot_loss[loss=2.832, ArTop10Accuracy=0.7643, over 11870.10 frames. ], batch size: 28, lr: 1.17e-02 +2024-08-06 11:06:21,722 INFO [trainer.py:765] (4/8) Epoch 10, batch 1100, train_loss[loss=2.834, ArTop10Accuracy=0.7654, over 14001.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7635, over 11949.67 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 11:07:34,771 INFO [trainer.py:765] (4/8) Epoch 10, batch 1200, train_loss[loss=2.926, ArTop10Accuracy=0.7422, over 12183.00 frames. ], tot_loss[loss=2.839, ArTop10Accuracy=0.7631, over 11860.35 frames. ], batch size: 101, lr: 1.16e-02 +2024-08-06 11:08:33,545 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 11:10:29,955 INFO [trainer.py:765] (4/8) Epoch 11, batch 100, train_loss[loss=2.894, ArTop10Accuracy=0.7514, over 14163.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7656, over 4760.24 frames. ], batch size: 62, lr: 1.10e-02 +2024-08-06 11:12:04,675 INFO [trainer.py:765] (4/8) Epoch 11, batch 200, train_loss[loss=2.819, ArTop10Accuracy=0.7616, over 13581.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.7671, over 7747.57 frames. ], batch size: 34, lr: 1.10e-02 +2024-08-06 11:12:22,826 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 9.884e+01 1.240e+02 1.333e+02 1.457e+02 6.939e+02, threshold=2.667e+02, percent-clipped=0.1 +2024-08-06 11:13:31,548 INFO [trainer.py:765] (4/8) Epoch 11, batch 300, train_loss[loss=2.827, ArTop10Accuracy=0.7685, over 14136.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7695, over 9352.68 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 11:15:03,269 INFO [trainer.py:765] (4/8) Epoch 11, batch 400, train_loss[loss=2.65, ArTop10Accuracy=0.7958, over 10311.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7695, over 10269.57 frames. ], batch size: 14, lr: 1.09e-02 +2024-08-06 11:16:29,637 INFO [trainer.py:765] (4/8) Epoch 11, batch 500, train_loss[loss=2.803, ArTop10Accuracy=0.7683, over 12186.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7709, over 10871.11 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 11:18:00,517 INFO [trainer.py:765] (4/8) Epoch 11, batch 600, train_loss[loss=2.703, ArTop10Accuracy=0.7925, over 11367.00 frames. ], tot_loss[loss=2.802, ArTop10Accuracy=0.7702, over 11379.50 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 11:19:34,514 INFO [trainer.py:765] (4/8) Epoch 11, batch 700, train_loss[loss=2.706, ArTop10Accuracy=0.7954, over 10206.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7696, over 11531.85 frames. ], batch size: 12, lr: 1.08e-02 +2024-08-06 11:20:55,484 INFO [trainer.py:765] (4/8) Epoch 11, batch 800, train_loss[loss=2.785, ArTop10Accuracy=0.7746, over 10131.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.7683, over 11652.45 frames. ], batch size: 12, lr: 1.07e-02 +2024-08-06 11:22:13,705 INFO [trainer.py:765] (4/8) Epoch 11, batch 900, train_loss[loss=2.852, ArTop10Accuracy=0.7595, over 12939.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.7692, over 11699.34 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 11:23:31,799 INFO [trainer.py:765] (4/8) Epoch 11, batch 1000, train_loss[loss=2.784, ArTop10Accuracy=0.776, over 12765.00 frames. ], tot_loss[loss=2.811, ArTop10Accuracy=0.7685, over 11909.07 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 11:24:46,902 INFO [trainer.py:765] (4/8) Epoch 11, batch 1100, train_loss[loss=2.783, ArTop10Accuracy=0.7739, over 13785.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7666, over 11994.73 frames. ], batch size: 34, lr: 1.06e-02 +2024-08-06 11:26:00,733 INFO [trainer.py:765] (4/8) Epoch 11, batch 1200, train_loss[loss=2.906, ArTop10Accuracy=0.7499, over 12528.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7665, over 11900.31 frames. ], batch size: 101, lr: 1.06e-02 +2024-08-06 11:26:15,847 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 11:26:25,556 INFO [trainer.py:811] (4/8) Epoch 11, validation: loss=2.831, ArTop10Accuracy=0.7643, over 1827537.00 frames. +2024-08-06 11:26:25,557 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 32729MB +2024-08-06 11:26:26,185 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.251e+02 1.335e+02 1.441e+02 2.942e+02, threshold=2.669e+02, percent-clipped=0.1 +2024-08-06 11:27:09,520 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 11:29:03,450 INFO [trainer.py:765] (4/8) Epoch 12, batch 100, train_loss[loss=2.851, ArTop10Accuracy=0.7621, over 14574.00 frames. ], tot_loss[loss=2.803, ArTop10Accuracy=0.7693, over 4761.92 frames. ], batch size: 62, lr: 1.01e-02 +2024-08-06 11:30:30,674 INFO [trainer.py:765] (4/8) Epoch 12, batch 200, train_loss[loss=2.84, ArTop10Accuracy=0.7634, over 13653.00 frames. ], tot_loss[loss=2.801, ArTop10Accuracy=0.7697, over 7757.17 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 11:31:57,655 INFO [trainer.py:765] (4/8) Epoch 12, batch 300, train_loss[loss=2.84, ArTop10Accuracy=0.7657, over 14268.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7713, over 9378.27 frames. ], batch size: 44, lr: 1.01e-02 +2024-08-06 11:33:30,739 INFO [trainer.py:765] (4/8) Epoch 12, batch 400, train_loss[loss=2.648, ArTop10Accuracy=0.7979, over 10299.00 frames. ], tot_loss[loss=2.793, ArTop10Accuracy=0.7716, over 10283.00 frames. ], batch size: 14, lr: 1.00e-02 +2024-08-06 11:34:55,733 INFO [trainer.py:765] (4/8) Epoch 12, batch 500, train_loss[loss=2.764, ArTop10Accuracy=0.7742, over 12129.00 frames. ], tot_loss[loss=2.79, ArTop10Accuracy=0.7722, over 10856.75 frames. ], batch size: 22, lr: 1.00e-02 +2024-08-06 11:36:29,363 INFO [trainer.py:765] (4/8) Epoch 12, batch 600, train_loss[loss=2.737, ArTop10Accuracy=0.7859, over 11379.00 frames. ], tot_loss[loss=2.792, ArTop10Accuracy=0.7718, over 11376.60 frames. ], batch size: 18, lr: 9.97e-03 +2024-08-06 11:38:00,343 INFO [trainer.py:765] (4/8) Epoch 12, batch 700, train_loss[loss=2.838, ArTop10Accuracy=0.7632, over 10191.00 frames. ], tot_loss[loss=2.796, ArTop10Accuracy=0.771, over 11525.72 frames. ], batch size: 12, lr: 9.93e-03 +2024-08-06 11:39:23,610 INFO [trainer.py:765] (4/8) Epoch 12, batch 800, train_loss[loss=2.668, ArTop10Accuracy=0.7935, over 10128.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7706, over 11638.44 frames. ], batch size: 12, lr: 9.90e-03 +2024-08-06 11:40:39,889 INFO [trainer.py:765] (4/8) Epoch 12, batch 900, train_loss[loss=2.768, ArTop10Accuracy=0.7778, over 12996.00 frames. ], tot_loss[loss=2.794, ArTop10Accuracy=0.7715, over 11692.40 frames. ], batch size: 27, lr: 9.87e-03 +2024-08-06 11:41:13,995 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.041e+02 1.248e+02 1.348e+02 1.459e+02 5.540e+02, threshold=2.695e+02, percent-clipped=0.3 +2024-08-06 11:41:56,188 INFO [trainer.py:765] (4/8) Epoch 12, batch 1000, train_loss[loss=2.773, ArTop10Accuracy=0.7733, over 12681.00 frames. ], tot_loss[loss=2.797, ArTop10Accuracy=0.7706, over 11884.29 frames. ], batch size: 27, lr: 9.85e-03 +2024-08-06 11:43:14,321 INFO [trainer.py:765] (4/8) Epoch 12, batch 1100, train_loss[loss=2.818, ArTop10Accuracy=0.7713, over 13422.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7694, over 11954.58 frames. ], batch size: 34, lr: 9.82e-03 +2024-08-06 11:44:26,156 INFO [trainer.py:765] (4/8) Epoch 12, batch 1200, train_loss[loss=2.938, ArTop10Accuracy=0.7448, over 12807.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7695, over 11862.54 frames. ], batch size: 101, lr: 9.79e-03 +2024-08-06 11:45:26,431 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 11:47:26,599 INFO [trainer.py:765] (4/8) Epoch 13, batch 100, train_loss[loss=2.825, ArTop10Accuracy=0.766, over 14238.00 frames. ], tot_loss[loss=2.792, ArTop10Accuracy=0.7713, over 4764.73 frames. ], batch size: 62, lr: 9.37e-03 +2024-08-06 11:48:54,778 INFO [trainer.py:765] (4/8) Epoch 13, batch 200, train_loss[loss=2.834, ArTop10Accuracy=0.7646, over 13965.00 frames. ], tot_loss[loss=2.783, ArTop10Accuracy=0.773, over 7763.78 frames. ], batch size: 35, lr: 9.34e-03 +2024-08-06 11:50:20,514 INFO [trainer.py:765] (4/8) Epoch 13, batch 300, train_loss[loss=2.807, ArTop10Accuracy=0.7683, over 14352.00 frames. ], tot_loss[loss=2.779, ArTop10Accuracy=0.7743, over 9392.08 frames. ], batch size: 44, lr: 9.31e-03 +2024-08-06 11:51:48,764 INFO [trainer.py:765] (4/8) Epoch 13, batch 400, train_loss[loss=2.714, ArTop10Accuracy=0.785, over 10140.00 frames. ], tot_loss[loss=2.777, ArTop10Accuracy=0.7749, over 10285.29 frames. ], batch size: 14, lr: 9.28e-03 +2024-08-06 11:53:13,406 INFO [trainer.py:765] (4/8) Epoch 13, batch 500, train_loss[loss=2.727, ArTop10Accuracy=0.7851, over 12174.00 frames. ], tot_loss[loss=2.769, ArTop10Accuracy=0.7765, over 10845.60 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 11:54:52,222 INFO [trainer.py:765] (4/8) Epoch 13, batch 600, train_loss[loss=2.715, ArTop10Accuracy=0.7804, over 11475.00 frames. ], tot_loss[loss=2.776, ArTop10Accuracy=0.775, over 11351.00 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 11:55:47,080 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 11:55:56,835 INFO [trainer.py:811] (4/8) Epoch 13, validation: loss=2.824, ArTop10Accuracy=0.7662, over 1827537.00 frames. +2024-08-06 11:55:56,835 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 32729MB +2024-08-06 11:55:57,711 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.064e+02 1.255e+02 1.343e+02 1.452e+02 4.888e+02, threshold=2.687e+02, percent-clipped=0.1 +2024-08-06 11:56:28,464 INFO [trainer.py:765] (4/8) Epoch 13, batch 700, train_loss[loss=2.75, ArTop10Accuracy=0.7831, over 10083.00 frames. ], tot_loss[loss=2.777, ArTop10Accuracy=0.7747, over 11501.17 frames. ], batch size: 12, lr: 9.20e-03 +2024-08-06 11:57:46,682 INFO [trainer.py:765] (4/8) Epoch 13, batch 800, train_loss[loss=2.675, ArTop10Accuracy=0.7886, over 10248.00 frames. ], tot_loss[loss=2.779, ArTop10Accuracy=0.7744, over 11619.44 frames. ], batch size: 12, lr: 9.18e-03 +2024-08-06 11:59:03,286 INFO [trainer.py:765] (4/8) Epoch 13, batch 900, train_loss[loss=2.771, ArTop10Accuracy=0.7767, over 12915.00 frames. ], tot_loss[loss=2.775, ArTop10Accuracy=0.7752, over 11675.58 frames. ], batch size: 27, lr: 9.15e-03 +2024-08-06 12:00:19,173 INFO [trainer.py:765] (4/8) Epoch 13, batch 1000, train_loss[loss=2.828, ArTop10Accuracy=0.7686, over 12804.00 frames. ], tot_loss[loss=2.785, ArTop10Accuracy=0.7734, over 11872.36 frames. ], batch size: 27, lr: 9.13e-03 +2024-08-06 12:01:34,880 INFO [trainer.py:765] (4/8) Epoch 13, batch 1100, train_loss[loss=2.804, ArTop10Accuracy=0.7651, over 13485.00 frames. ], tot_loss[loss=2.79, ArTop10Accuracy=0.7723, over 11951.26 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 12:02:48,662 INFO [trainer.py:765] (4/8) Epoch 13, batch 1200, train_loss[loss=2.902, ArTop10Accuracy=0.7484, over 12114.00 frames. ], tot_loss[loss=2.79, ArTop10Accuracy=0.7723, over 11865.52 frames. ], batch size: 101, lr: 9.08e-03 +2024-08-06 12:03:48,339 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 12:05:45,333 INFO [trainer.py:765] (4/8) Epoch 14, batch 100, train_loss[loss=2.835, ArTop10Accuracy=0.762, over 14472.00 frames. ], tot_loss[loss=2.776, ArTop10Accuracy=0.7746, over 4782.62 frames. ], batch size: 62, lr: 8.71e-03 +2024-08-06 12:07:16,602 INFO [trainer.py:765] (4/8) Epoch 14, batch 200, train_loss[loss=2.814, ArTop10Accuracy=0.7662, over 13683.00 frames. ], tot_loss[loss=2.773, ArTop10Accuracy=0.7753, over 7786.17 frames. ], batch size: 34, lr: 8.69e-03 +2024-08-06 12:08:44,310 INFO [trainer.py:765] (4/8) Epoch 14, batch 300, train_loss[loss=2.756, ArTop10Accuracy=0.7761, over 14625.00 frames. ], tot_loss[loss=2.766, ArTop10Accuracy=0.7768, over 9408.24 frames. ], batch size: 45, lr: 8.66e-03 +2024-08-06 12:10:01,130 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.266e+02 1.374e+02 1.483e+02 6.480e+02, threshold=2.748e+02, percent-clipped=0.2 +2024-08-06 12:10:10,225 INFO [trainer.py:765] (4/8) Epoch 14, batch 400, train_loss[loss=2.663, ArTop10Accuracy=0.7999, over 10509.00 frames. ], tot_loss[loss=2.765, ArTop10Accuracy=0.7769, over 10317.67 frames. ], batch size: 14, lr: 8.64e-03 +2024-08-06 12:11:36,149 INFO [trainer.py:765] (4/8) Epoch 14, batch 500, train_loss[loss=2.836, ArTop10Accuracy=0.7666, over 12150.00 frames. ], tot_loss[loss=2.764, ArTop10Accuracy=0.7771, over 10867.61 frames. ], batch size: 22, lr: 8.62e-03 +2024-08-06 12:13:05,992 INFO [trainer.py:765] (4/8) Epoch 14, batch 600, train_loss[loss=2.737, ArTop10Accuracy=0.7799, over 11397.00 frames. ], tot_loss[loss=2.766, ArTop10Accuracy=0.777, over 11388.73 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 12:14:38,553 INFO [trainer.py:765] (4/8) Epoch 14, batch 700, train_loss[loss=2.761, ArTop10Accuracy=0.7818, over 9318.00 frames. ], tot_loss[loss=2.771, ArTop10Accuracy=0.7759, over 11531.31 frames. ], batch size: 11, lr: 8.57e-03 +2024-08-06 12:15:58,068 INFO [trainer.py:765] (4/8) Epoch 14, batch 800, train_loss[loss=2.574, ArTop10Accuracy=0.8119, over 10068.00 frames. ], tot_loss[loss=2.774, ArTop10Accuracy=0.7752, over 11637.37 frames. ], batch size: 12, lr: 8.55e-03 +2024-08-06 12:17:12,864 INFO [trainer.py:765] (4/8) Epoch 14, batch 900, train_loss[loss=2.758, ArTop10Accuracy=0.7791, over 13287.00 frames. ], tot_loss[loss=2.767, ArTop10Accuracy=0.7766, over 11696.62 frames. ], batch size: 28, lr: 8.52e-03 +2024-08-06 12:18:29,613 INFO [trainer.py:765] (4/8) Epoch 14, batch 1000, train_loss[loss=2.746, ArTop10Accuracy=0.7813, over 12909.00 frames. ], tot_loss[loss=2.771, ArTop10Accuracy=0.7758, over 11892.84 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 12:19:45,375 INFO [trainer.py:765] (4/8) Epoch 14, batch 1100, train_loss[loss=2.739, ArTop10Accuracy=0.7804, over 13647.00 frames. ], tot_loss[loss=2.775, ArTop10Accuracy=0.7752, over 11926.48 frames. ], batch size: 34, lr: 8.48e-03 +2024-08-06 12:20:59,277 INFO [trainer.py:765] (4/8) Epoch 14, batch 1200, train_loss[loss=2.904, ArTop10Accuracy=0.7477, over 12768.00 frames. ], tot_loss[loss=2.774, ArTop10Accuracy=0.7754, over 11863.44 frames. ], batch size: 101, lr: 8.46e-03 +2024-08-06 12:21:58,313 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 12:23:51,961 INFO [trainer.py:765] (4/8) Epoch 15, batch 100, train_loss[loss=2.757, ArTop10Accuracy=0.7769, over 14058.00 frames. ], tot_loss[loss=2.763, ArTop10Accuracy=0.7767, over 4741.67 frames. ], batch size: 62, lr: 8.14e-03 +2024-08-06 12:24:00,599 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 12:24:10,290 INFO [trainer.py:811] (4/8) Epoch 15, validation: loss=2.819, ArTop10Accuracy=0.7675, over 1827537.00 frames. +2024-08-06 12:24:10,291 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 32729MB +2024-08-06 12:24:11,094 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.284e+02 1.371e+02 1.488e+02 4.667e+02, threshold=2.743e+02, percent-clipped=0.2 +2024-08-06 12:25:29,988 INFO [trainer.py:765] (4/8) Epoch 15, batch 200, train_loss[loss=2.727, ArTop10Accuracy=0.7861, over 13497.00 frames. ], tot_loss[loss=2.756, ArTop10Accuracy=0.7786, over 7747.87 frames. ], batch size: 34, lr: 8.12e-03 +2024-08-06 12:26:58,694 INFO [trainer.py:765] (4/8) Epoch 15, batch 300, train_loss[loss=2.79, ArTop10Accuracy=0.7734, over 14127.00 frames. ], tot_loss[loss=2.755, ArTop10Accuracy=0.7789, over 9366.22 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 12:28:28,533 INFO [trainer.py:765] (4/8) Epoch 15, batch 400, train_loss[loss=2.737, ArTop10Accuracy=0.7757, over 10197.00 frames. ], tot_loss[loss=2.75, ArTop10Accuracy=0.7798, over 10275.21 frames. ], batch size: 14, lr: 8.07e-03 +2024-08-06 12:29:54,032 INFO [trainer.py:765] (4/8) Epoch 15, batch 500, train_loss[loss=2.684, ArTop10Accuracy=0.7923, over 11910.00 frames. ], tot_loss[loss=2.745, ArTop10Accuracy=0.7806, over 10839.43 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 12:31:23,292 INFO [trainer.py:765] (4/8) Epoch 15, batch 600, train_loss[loss=2.711, ArTop10Accuracy=0.7829, over 11328.00 frames. ], tot_loss[loss=2.751, ArTop10Accuracy=0.7795, over 11360.31 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 12:32:53,175 INFO [trainer.py:765] (4/8) Epoch 15, batch 700, train_loss[loss=2.798, ArTop10Accuracy=0.7657, over 9354.00 frames. ], tot_loss[loss=2.755, ArTop10Accuracy=0.7787, over 11509.82 frames. ], batch size: 11, lr: 8.01e-03 +2024-08-06 12:34:18,254 INFO [trainer.py:765] (4/8) Epoch 15, batch 800, train_loss[loss=2.694, ArTop10Accuracy=0.7858, over 9429.00 frames. ], tot_loss[loss=2.759, ArTop10Accuracy=0.7778, over 11617.64 frames. ], batch size: 11, lr: 7.99e-03 +2024-08-06 12:35:34,726 INFO [trainer.py:765] (4/8) Epoch 15, batch 900, train_loss[loss=2.779, ArTop10Accuracy=0.7811, over 13008.00 frames. ], tot_loss[loss=2.754, ArTop10Accuracy=0.7789, over 11663.27 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 12:36:50,540 INFO [trainer.py:765] (4/8) Epoch 15, batch 1000, train_loss[loss=2.755, ArTop10Accuracy=0.7811, over 12786.00 frames. ], tot_loss[loss=2.758, ArTop10Accuracy=0.7782, over 11867.55 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 12:38:05,179 INFO [trainer.py:765] (4/8) Epoch 15, batch 1100, train_loss[loss=2.727, ArTop10Accuracy=0.781, over 13656.00 frames. ], tot_loss[loss=2.765, ArTop10Accuracy=0.7768, over 11960.61 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 12:38:12,841 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.293e+02 1.379e+02 1.467e+02 2.824e+02, threshold=2.759e+02, percent-clipped=0.1 +2024-08-06 12:39:18,788 INFO [trainer.py:765] (4/8) Epoch 15, batch 1200, train_loss[loss=2.875, ArTop10Accuracy=0.7581, over 12324.00 frames. ], tot_loss[loss=2.767, ArTop10Accuracy=0.7764, over 11867.43 frames. ], batch size: 101, lr: 7.91e-03 +2024-08-06 12:40:18,729 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 12:42:17,617 INFO [trainer.py:765] (4/8) Epoch 16, batch 100, train_loss[loss=2.72, ArTop10Accuracy=0.7843, over 14628.00 frames. ], tot_loss[loss=2.754, ArTop10Accuracy=0.7785, over 4756.83 frames. ], batch size: 63, lr: 7.63e-03 +2024-08-06 12:43:49,563 INFO [trainer.py:765] (4/8) Epoch 16, batch 200, train_loss[loss=2.772, ArTop10Accuracy=0.7808, over 13596.00 frames. ], tot_loss[loss=2.748, ArTop10Accuracy=0.7796, over 7758.14 frames. ], batch size: 34, lr: 7.61e-03 +2024-08-06 12:45:18,501 INFO [trainer.py:765] (4/8) Epoch 16, batch 300, train_loss[loss=2.786, ArTop10Accuracy=0.7746, over 14376.00 frames. ], tot_loss[loss=2.742, ArTop10Accuracy=0.7808, over 9384.75 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 12:46:45,207 INFO [trainer.py:765] (4/8) Epoch 16, batch 400, train_loss[loss=2.673, ArTop10Accuracy=0.7931, over 10800.00 frames. ], tot_loss[loss=2.738, ArTop10Accuracy=0.7816, over 10273.40 frames. ], batch size: 15, lr: 7.58e-03 +2024-08-06 12:48:16,309 INFO [trainer.py:765] (4/8) Epoch 16, batch 500, train_loss[loss=2.668, ArTop10Accuracy=0.7959, over 12543.00 frames. ], tot_loss[loss=2.733, ArTop10Accuracy=0.7828, over 10823.89 frames. ], batch size: 23, lr: 7.56e-03 +2024-08-06 12:49:46,641 INFO [trainer.py:765] (4/8) Epoch 16, batch 600, train_loss[loss=2.696, ArTop10Accuracy=0.7945, over 11832.00 frames. ], tot_loss[loss=2.739, ArTop10Accuracy=0.7818, over 11356.77 frames. ], batch size: 19, lr: 7.54e-03 +2024-08-06 12:51:23,681 INFO [trainer.py:765] (4/8) Epoch 16, batch 700, train_loss[loss=2.622, ArTop10Accuracy=0.8066, over 9279.00 frames. ], tot_loss[loss=2.742, ArTop10Accuracy=0.7812, over 11496.79 frames. ], batch size: 11, lr: 7.52e-03 +2024-08-06 12:52:43,500 INFO [trainer.py:765] (4/8) Epoch 16, batch 800, train_loss[loss=2.665, ArTop10Accuracy=0.7968, over 9534.00 frames. ], tot_loss[loss=2.748, ArTop10Accuracy=0.7802, over 11622.91 frames. ], batch size: 11, lr: 7.51e-03 +2024-08-06 12:53:06,015 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 12:53:15,497 INFO [trainer.py:811] (4/8) Epoch 16, validation: loss=2.816, ArTop10Accuracy=0.7678, over 1827537.00 frames. +2024-08-06 12:53:15,497 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 32729MB +2024-08-06 12:53:16,186 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.112e+02 1.291e+02 1.391e+02 1.487e+02 3.459e+02, threshold=2.783e+02, percent-clipped=0.1 +2024-08-06 12:54:06,480 INFO [trainer.py:765] (4/8) Epoch 16, batch 900, train_loss[loss=2.758, ArTop10Accuracy=0.7755, over 12792.00 frames. ], tot_loss[loss=2.743, ArTop10Accuracy=0.7814, over 11673.14 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 12:55:19,790 INFO [trainer.py:765] (4/8) Epoch 16, batch 1000, train_loss[loss=2.729, ArTop10Accuracy=0.7823, over 12786.00 frames. ], tot_loss[loss=2.748, ArTop10Accuracy=0.7803, over 11883.80 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 12:56:33,162 INFO [trainer.py:765] (4/8) Epoch 16, batch 1100, train_loss[loss=2.841, ArTop10Accuracy=0.761, over 13731.00 frames. ], tot_loss[loss=2.755, ArTop10Accuracy=0.7788, over 11965.31 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 12:57:48,484 INFO [trainer.py:765] (4/8) Epoch 16, batch 1200, train_loss[loss=2.889, ArTop10Accuracy=0.7509, over 13242.00 frames. ], tot_loss[loss=2.758, ArTop10Accuracy=0.7784, over 11864.74 frames. ], batch size: 101, lr: 7.44e-03 +2024-08-06 12:58:48,452 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 13:00:47,899 INFO [trainer.py:765] (4/8) Epoch 17, batch 100, train_loss[loss=2.808, ArTop10Accuracy=0.7735, over 14139.00 frames. ], tot_loss[loss=2.737, ArTop10Accuracy=0.782, over 4762.26 frames. ], batch size: 62, lr: 7.18e-03 +2024-08-06 13:02:19,301 INFO [trainer.py:765] (4/8) Epoch 17, batch 200, train_loss[loss=2.696, ArTop10Accuracy=0.7905, over 13575.00 frames. ], tot_loss[loss=2.731, ArTop10Accuracy=0.783, over 7754.67 frames. ], batch size: 34, lr: 7.17e-03 +2024-08-06 13:03:45,516 INFO [trainer.py:765] (4/8) Epoch 17, batch 300, train_loss[loss=2.778, ArTop10Accuracy=0.774, over 14085.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7836, over 9361.00 frames. ], batch size: 44, lr: 7.15e-03 +2024-08-06 13:05:21,759 INFO [trainer.py:765] (4/8) Epoch 17, batch 400, train_loss[loss=2.697, ArTop10Accuracy=0.7889, over 10224.00 frames. ], tot_loss[loss=2.729, ArTop10Accuracy=0.7835, over 10286.59 frames. ], batch size: 14, lr: 7.14e-03 +2024-08-06 13:06:47,020 INFO [trainer.py:765] (4/8) Epoch 17, batch 500, train_loss[loss=2.703, ArTop10Accuracy=0.7954, over 12390.00 frames. ], tot_loss[loss=2.722, ArTop10Accuracy=0.7849, over 10860.98 frames. ], batch size: 23, lr: 7.12e-03 +2024-08-06 13:07:39,878 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.140e+02 1.293e+02 1.386e+02 1.488e+02 3.253e+02, threshold=2.772e+02, percent-clipped=0.1 +2024-08-06 13:08:22,687 INFO [trainer.py:765] (4/8) Epoch 17, batch 600, train_loss[loss=2.644, ArTop10Accuracy=0.8019, over 11319.00 frames. ], tot_loss[loss=2.725, ArTop10Accuracy=0.7842, over 11399.75 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 13:09:54,835 INFO [trainer.py:765] (4/8) Epoch 17, batch 700, train_loss[loss=2.647, ArTop10Accuracy=0.7977, over 9441.00 frames. ], tot_loss[loss=2.732, ArTop10Accuracy=0.7829, over 11531.09 frames. ], batch size: 11, lr: 7.09e-03 +2024-08-06 13:11:19,480 INFO [trainer.py:765] (4/8) Epoch 17, batch 800, train_loss[loss=2.671, ArTop10Accuracy=0.7933, over 9414.00 frames. ], tot_loss[loss=2.736, ArTop10Accuracy=0.7824, over 11649.00 frames. ], batch size: 11, lr: 7.07e-03 +2024-08-06 13:12:35,669 INFO [trainer.py:765] (4/8) Epoch 17, batch 900, train_loss[loss=2.667, ArTop10Accuracy=0.7941, over 12930.00 frames. ], tot_loss[loss=2.731, ArTop10Accuracy=0.7833, over 11681.78 frames. ], batch size: 27, lr: 7.06e-03 +2024-08-06 13:13:53,061 INFO [trainer.py:765] (4/8) Epoch 17, batch 1000, train_loss[loss=2.74, ArTop10Accuracy=0.7801, over 13290.00 frames. ], tot_loss[loss=2.738, ArTop10Accuracy=0.7819, over 11875.05 frames. ], batch size: 28, lr: 7.04e-03 +2024-08-06 13:15:08,483 INFO [trainer.py:765] (4/8) Epoch 17, batch 1100, train_loss[loss=2.772, ArTop10Accuracy=0.7688, over 13890.00 frames. ], tot_loss[loss=2.746, ArTop10Accuracy=0.7805, over 11955.85 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 13:16:22,387 INFO [trainer.py:765] (4/8) Epoch 17, batch 1200, train_loss[loss=2.87, ArTop10Accuracy=0.7565, over 12078.00 frames. ], tot_loss[loss=2.745, ArTop10Accuracy=0.7806, over 11841.53 frames. ], batch size: 101, lr: 7.01e-03 +2024-08-06 13:17:21,505 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 13:19:15,993 INFO [trainer.py:765] (4/8) Epoch 18, batch 100, train_loss[loss=2.768, ArTop10Accuracy=0.7747, over 14724.00 frames. ], tot_loss[loss=2.726, ArTop10Accuracy=0.7841, over 4762.63 frames. ], batch size: 62, lr: 6.78e-03 +2024-08-06 13:20:46,601 INFO [trainer.py:765] (4/8) Epoch 18, batch 200, train_loss[loss=2.718, ArTop10Accuracy=0.7839, over 13710.00 frames. ], tot_loss[loss=2.72, ArTop10Accuracy=0.7852, over 7740.19 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 13:21:55,104 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 13:22:04,751 INFO [trainer.py:811] (4/8) Epoch 18, validation: loss=2.817, ArTop10Accuracy=0.768, over 1827537.00 frames. +2024-08-06 13:22:04,752 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 32729MB +2024-08-06 13:22:05,473 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.323e+02 1.409e+02 1.514e+02 3.209e+02, threshold=2.818e+02, percent-clipped=0.1 +2024-08-06 13:22:26,580 INFO [trainer.py:765] (4/8) Epoch 18, batch 300, train_loss[loss=2.816, ArTop10Accuracy=0.7642, over 14331.00 frames. ], tot_loss[loss=2.719, ArTop10Accuracy=0.7853, over 9360.96 frames. ], batch size: 45, lr: 6.76e-03 +2024-08-06 13:23:57,929 INFO [trainer.py:765] (4/8) Epoch 18, batch 400, train_loss[loss=2.63, ArTop10Accuracy=0.8021, over 10269.00 frames. ], tot_loss[loss=2.719, ArTop10Accuracy=0.7853, over 10295.59 frames. ], batch size: 14, lr: 6.74e-03 +2024-08-06 13:25:34,012 INFO [trainer.py:765] (4/8) Epoch 18, batch 500, train_loss[loss=2.756, ArTop10Accuracy=0.7784, over 12132.00 frames. ], tot_loss[loss=2.718, ArTop10Accuracy=0.7855, over 10847.92 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 13:27:00,633 INFO [trainer.py:765] (4/8) Epoch 18, batch 600, train_loss[loss=2.646, ArTop10Accuracy=0.8065, over 11325.00 frames. ], tot_loss[loss=2.719, ArTop10Accuracy=0.7854, over 11377.58 frames. ], batch size: 18, lr: 6.71e-03 +2024-08-06 13:28:33,581 INFO [trainer.py:765] (4/8) Epoch 18, batch 700, train_loss[loss=2.732, ArTop10Accuracy=0.7826, over 10032.00 frames. ], tot_loss[loss=2.721, ArTop10Accuracy=0.785, over 11521.88 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 13:29:54,984 INFO [trainer.py:765] (4/8) Epoch 18, batch 800, train_loss[loss=2.64, ArTop10Accuracy=0.804, over 9444.00 frames. ], tot_loss[loss=2.725, ArTop10Accuracy=0.7844, over 11624.46 frames. ], batch size: 11, lr: 6.68e-03 +2024-08-06 13:31:12,518 INFO [trainer.py:765] (4/8) Epoch 18, batch 900, train_loss[loss=2.733, ArTop10Accuracy=0.7867, over 13254.00 frames. ], tot_loss[loss=2.722, ArTop10Accuracy=0.7851, over 11690.93 frames. ], batch size: 28, lr: 6.67e-03 +2024-08-06 13:32:26,550 INFO [trainer.py:765] (4/8) Epoch 18, batch 1000, train_loss[loss=2.754, ArTop10Accuracy=0.7792, over 12873.00 frames. ], tot_loss[loss=2.729, ArTop10Accuracy=0.7838, over 11892.74 frames. ], batch size: 27, lr: 6.66e-03 +2024-08-06 13:33:41,496 INFO [trainer.py:765] (4/8) Epoch 18, batch 1100, train_loss[loss=2.731, ArTop10Accuracy=0.7878, over 13854.00 frames. ], tot_loss[loss=2.734, ArTop10Accuracy=0.7828, over 11966.56 frames. ], batch size: 34, lr: 6.64e-03 +2024-08-06 13:34:54,673 INFO [trainer.py:765] (4/8) Epoch 18, batch 1200, train_loss[loss=2.876, ArTop10Accuracy=0.755, over 11688.00 frames. ], tot_loss[loss=2.733, ArTop10Accuracy=0.7828, over 11879.62 frames. ], batch size: 103, lr: 6.63e-03 +2024-08-06 13:35:51,064 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.124e+02 1.340e+02 1.433e+02 1.533e+02 2.444e+02, threshold=2.867e+02, percent-clipped=0.0 +2024-08-06 13:35:54,218 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 13:37:48,623 INFO [trainer.py:765] (4/8) Epoch 19, batch 100, train_loss[loss=2.786, ArTop10Accuracy=0.773, over 14562.00 frames. ], tot_loss[loss=2.709, ArTop10Accuracy=0.7871, over 4763.34 frames. ], batch size: 62, lr: 6.43e-03 +2024-08-06 13:39:23,256 INFO [trainer.py:765] (4/8) Epoch 19, batch 200, train_loss[loss=2.706, ArTop10Accuracy=0.782, over 13527.00 frames. ], tot_loss[loss=2.711, ArTop10Accuracy=0.7867, over 7744.85 frames. ], batch size: 34, lr: 6.41e-03 +2024-08-06 13:40:48,358 INFO [trainer.py:765] (4/8) Epoch 19, batch 300, train_loss[loss=2.735, ArTop10Accuracy=0.7868, over 14472.00 frames. ], tot_loss[loss=2.71, ArTop10Accuracy=0.7871, over 9377.25 frames. ], batch size: 46, lr: 6.40e-03 +2024-08-06 13:42:21,067 INFO [trainer.py:765] (4/8) Epoch 19, batch 400, train_loss[loss=2.586, ArTop10Accuracy=0.8117, over 10197.00 frames. ], tot_loss[loss=2.703, ArTop10Accuracy=0.7883, over 10290.26 frames. ], batch size: 14, lr: 6.39e-03 +2024-08-06 13:43:44,954 INFO [trainer.py:765] (4/8) Epoch 19, batch 500, train_loss[loss=2.667, ArTop10Accuracy=0.7974, over 12102.00 frames. ], tot_loss[loss=2.697, ArTop10Accuracy=0.7896, over 10853.44 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 13:45:16,681 INFO [trainer.py:765] (4/8) Epoch 19, batch 600, train_loss[loss=2.625, ArTop10Accuracy=0.8068, over 11361.00 frames. ], tot_loss[loss=2.703, ArTop10Accuracy=0.7886, over 11367.34 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 13:46:48,324 INFO [trainer.py:765] (4/8) Epoch 19, batch 700, train_loss[loss=2.687, ArTop10Accuracy=0.7831, over 10386.00 frames. ], tot_loss[loss=2.713, ArTop10Accuracy=0.7867, over 11508.06 frames. ], batch size: 12, lr: 6.35e-03 +2024-08-06 13:48:11,883 INFO [trainer.py:765] (4/8) Epoch 19, batch 800, train_loss[loss=2.696, ArTop10Accuracy=0.7907, over 10185.00 frames. ], tot_loss[loss=2.717, ArTop10Accuracy=0.7858, over 11635.75 frames. ], batch size: 12, lr: 6.34e-03 +2024-08-06 13:49:27,258 INFO [trainer.py:765] (4/8) Epoch 19, batch 900, train_loss[loss=2.68, ArTop10Accuracy=0.7937, over 12957.00 frames. ], tot_loss[loss=2.711, ArTop10Accuracy=0.7868, over 11686.07 frames. ], batch size: 27, lr: 6.32e-03 +2024-08-06 13:50:40,653 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 13:50:50,537 INFO [trainer.py:811] (4/8) Epoch 19, validation: loss=2.818, ArTop10Accuracy=0.7679, over 1827537.00 frames. +2024-08-06 13:50:50,537 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 32729MB +2024-08-06 13:50:51,489 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.371e+02 1.455e+02 1.550e+02 3.697e+02, threshold=2.909e+02, percent-clipped=0.2 +2024-08-06 13:50:52,915 INFO [trainer.py:765] (4/8) Epoch 19, batch 1000, train_loss[loss=2.761, ArTop10Accuracy=0.7747, over 12699.00 frames. ], tot_loss[loss=2.72, ArTop10Accuracy=0.7853, over 11884.10 frames. ], batch size: 27, lr: 6.31e-03 +2024-08-06 13:52:08,265 INFO [trainer.py:765] (4/8) Epoch 19, batch 1100, train_loss[loss=2.701, ArTop10Accuracy=0.7904, over 13695.00 frames. ], tot_loss[loss=2.724, ArTop10Accuracy=0.7845, over 11953.71 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 13:53:22,313 INFO [trainer.py:765] (4/8) Epoch 19, batch 1200, train_loss[loss=2.831, ArTop10Accuracy=0.7577, over 12249.00 frames. ], tot_loss[loss=2.726, ArTop10Accuracy=0.7842, over 11861.23 frames. ], batch size: 101, lr: 6.28e-03 +2024-08-06 13:54:21,708 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 13:56:12,907 INFO [trainer.py:765] (4/8) Epoch 20, batch 100, train_loss[loss=2.789, ArTop10Accuracy=0.7679, over 14760.00 frames. ], tot_loss[loss=2.713, ArTop10Accuracy=0.7857, over 4756.56 frames. ], batch size: 62, lr: 6.10e-03 +2024-08-06 13:57:42,497 INFO [trainer.py:765] (4/8) Epoch 20, batch 200, train_loss[loss=2.639, ArTop10Accuracy=0.8007, over 13737.00 frames. ], tot_loss[loss=2.705, ArTop10Accuracy=0.7879, over 7746.44 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 13:59:15,430 INFO [trainer.py:765] (4/8) Epoch 20, batch 300, train_loss[loss=2.762, ArTop10Accuracy=0.7798, over 14253.00 frames. ], tot_loss[loss=2.699, ArTop10Accuracy=0.789, over 9366.73 frames. ], batch size: 45, lr: 6.08e-03 +2024-08-06 14:00:44,356 INFO [trainer.py:765] (4/8) Epoch 20, batch 400, train_loss[loss=2.555, ArTop10Accuracy=0.8139, over 10905.00 frames. ], tot_loss[loss=2.696, ArTop10Accuracy=0.7895, over 10302.20 frames. ], batch size: 15, lr: 6.07e-03 +2024-08-06 14:02:14,855 INFO [trainer.py:765] (4/8) Epoch 20, batch 500, train_loss[loss=2.66, ArTop10Accuracy=0.7958, over 12114.00 frames. ], tot_loss[loss=2.692, ArTop10Accuracy=0.7904, over 10858.12 frames. ], batch size: 22, lr: 6.06e-03 +2024-08-06 14:03:40,856 INFO [trainer.py:765] (4/8) Epoch 20, batch 600, train_loss[loss=2.597, ArTop10Accuracy=0.8091, over 11571.00 frames. ], tot_loss[loss=2.695, ArTop10Accuracy=0.7899, over 11385.90 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 14:05:13,864 INFO [trainer.py:765] (4/8) Epoch 20, batch 700, train_loss[loss=2.717, ArTop10Accuracy=0.7839, over 9984.00 frames. ], tot_loss[loss=2.699, ArTop10Accuracy=0.7892, over 11521.50 frames. ], batch size: 12, lr: 6.03e-03 +2024-08-06 14:05:30,791 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.365e+02 1.456e+02 1.550e+02 3.525e+02, threshold=2.913e+02, percent-clipped=0.1 +2024-08-06 14:06:34,509 INFO [trainer.py:765] (4/8) Epoch 20, batch 800, train_loss[loss=2.721, ArTop10Accuracy=0.7837, over 10083.00 frames. ], tot_loss[loss=2.705, ArTop10Accuracy=0.7881, over 11637.46 frames. ], batch size: 12, lr: 6.02e-03 +2024-08-06 14:07:50,944 INFO [trainer.py:765] (4/8) Epoch 20, batch 900, train_loss[loss=2.635, ArTop10Accuracy=0.8005, over 12861.00 frames. ], tot_loss[loss=2.704, ArTop10Accuracy=0.7881, over 11700.37 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 14:09:07,173 INFO [trainer.py:765] (4/8) Epoch 20, batch 1000, train_loss[loss=2.693, ArTop10Accuracy=0.7967, over 12675.00 frames. ], tot_loss[loss=2.708, ArTop10Accuracy=0.7876, over 11883.15 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 14:10:21,210 INFO [trainer.py:765] (4/8) Epoch 20, batch 1100, train_loss[loss=2.709, ArTop10Accuracy=0.7851, over 13629.00 frames. ], tot_loss[loss=2.714, ArTop10Accuracy=0.7864, over 11931.12 frames. ], batch size: 34, lr: 5.99e-03 +2024-08-06 14:11:37,813 INFO [trainer.py:765] (4/8) Epoch 20, batch 1200, train_loss[loss=2.855, ArTop10Accuracy=0.7594, over 11973.00 frames. ], tot_loss[loss=2.714, ArTop10Accuracy=0.7863, over 11830.00 frames. ], batch size: 105, lr: 5.98e-03 +2024-08-06 14:12:37,299 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 14:12:37,301 INFO [trainer.py:1069] (4/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-08-06-14-5 b/libritts-r/log/log-train-2024-08-06-08-06-14-5 new file mode 100644 index 0000000000000000000000000000000000000000..5773d2f9e1159c86f2ee868d5f7ae6316f58c0d4 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-06-14-5 @@ -0,0 +1,336 @@ +2024-08-06 08:06:14,312 INFO [trainer.py:870] (5/8) Training started +2024-08-06 08:06:14,313 INFO [trainer.py:889] (5/8) Device: cuda:5 +2024-08-06 08:06:14,314 INFO [trainer.py:890] (5/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:06:14,314 INFO [trainer.py:892] (5/8) About to create model +2024-08-06 08:06:15,008 INFO [trainer.py:899] (5/8) Number of model parameters: 367386628 +2024-08-06 08:06:16,222 INFO [trainer.py:914] (5/8) Using DDP +2024-08-06 08:06:19,151 INFO [datamodule.py:427] (5/8) About to get train cuts +2024-08-06 08:06:19,153 INFO [datamodule.py:434] (5/8) About to get dev cuts +2024-08-06 08:06:19,155 INFO [datamodule.py:292] (5/8) Disable SpecAugment +2024-08-06 08:06:19,155 INFO [datamodule.py:294] (5/8) About to create train dataset +2024-08-06 08:06:19,155 INFO [datamodule.py:323] (5/8) Using DynamicBucketingSampler +2024-08-06 08:06:19,766 INFO [datamodule.py:344] (5/8) About to create train dataloader +2024-08-06 08:06:19,766 INFO [datamodule.py:367] (5/8) About to create dev dataset +2024-08-06 08:06:20,091 INFO [datamodule.py:388] (5/8) About to create dev dataloader +2024-08-06 08:08:02,120 INFO [trainer.py:765] (5/8) Epoch 1, batch 100, train_loss[loss=4.267, ArTop10Accuracy=0.5104, over 13962.00 frames. ], tot_loss[loss=5.049, ArTop10Accuracy=0.3742, over 4764.60 frames. ], batch size: 62, lr: 2.25e-02 +2024-08-06 08:09:28,828 INFO [trainer.py:765] (5/8) Epoch 1, batch 200, train_loss[loss=4.009, ArTop10Accuracy=0.5501, over 13701.00 frames. ], tot_loss[loss=4.489, ArTop10Accuracy=0.4683, over 7752.76 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 08:10:52,429 INFO [trainer.py:765] (5/8) Epoch 1, batch 300, train_loss[loss=3.902, ArTop10Accuracy=0.5643, over 14151.00 frames. ], tot_loss[loss=4.21, ArTop10Accuracy=0.5149, over 9369.31 frames. ], batch size: 44, lr: 3.00e-02 +2024-08-06 08:12:12,699 INFO [trainer.py:765] (5/8) Epoch 1, batch 400, train_loss[loss=3.705, ArTop10Accuracy=0.605, over 10998.00 frames. ], tot_loss[loss=4.023, ArTop10Accuracy=0.5465, over 10273.59 frames. ], batch size: 15, lr: 3.00e-02 +2024-08-06 08:13:40,050 INFO [trainer.py:765] (5/8) Epoch 1, batch 500, train_loss[loss=3.613, ArTop10Accuracy=0.6219, over 12171.00 frames. ], tot_loss[loss=3.878, ArTop10Accuracy=0.5715, over 10848.61 frames. ], batch size: 22, lr: 2.99e-02 +2024-08-06 08:15:00,243 INFO [trainer.py:765] (5/8) Epoch 1, batch 600, train_loss[loss=3.56, ArTop10Accuracy=0.6298, over 11346.00 frames. ], tot_loss[loss=3.765, ArTop10Accuracy=0.5916, over 11350.53 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 08:16:26,424 INFO [trainer.py:765] (5/8) Epoch 1, batch 700, train_loss[loss=3.414, ArTop10Accuracy=0.6566, over 10089.00 frames. ], tot_loss[loss=3.691, ArTop10Accuracy=0.6047, over 11494.81 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 08:17:43,017 INFO [trainer.py:765] (5/8) Epoch 1, batch 800, train_loss[loss=3.544, ArTop10Accuracy=0.6297, over 9378.00 frames. ], tot_loss[loss=3.625, ArTop10Accuracy=0.6168, over 11636.05 frames. ], batch size: 11, lr: 2.98e-02 +2024-08-06 08:18:56,151 INFO [trainer.py:765] (5/8) Epoch 1, batch 900, train_loss[loss=3.446, ArTop10Accuracy=0.649, over 12843.00 frames. ], tot_loss[loss=3.57, ArTop10Accuracy=0.6266, over 11681.46 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 08:20:12,862 INFO [trainer.py:765] (5/8) Epoch 1, batch 1000, train_loss[loss=3.474, ArTop10Accuracy=0.6463, over 12768.00 frames. ], tot_loss[loss=3.531, ArTop10Accuracy=0.6336, over 11878.58 frames. ], batch size: 27, lr: 2.97e-02 +2024-08-06 08:20:13,539 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 9.300e+01 1.871e+02 2.675e+02 4.030e+02 9.119e+03, threshold=5.351e+02, percent-clipped=0.0 +2024-08-06 08:21:29,155 INFO [trainer.py:765] (5/8) Epoch 1, batch 1100, train_loss[loss=3.411, ArTop10Accuracy=0.6511, over 13878.00 frames. ], tot_loss[loss=3.494, ArTop10Accuracy=0.6401, over 11961.72 frames. ], batch size: 35, lr: 2.96e-02 +2024-08-06 08:22:45,411 INFO [trainer.py:765] (5/8) Epoch 1, batch 1200, train_loss[loss=3.503, ArTop10Accuracy=0.6388, over 11898.00 frames. ], tot_loss[loss=3.465, ArTop10Accuracy=0.6456, over 11873.58 frames. ], batch size: 101, lr: 2.96e-02 +2024-08-06 08:23:45,288 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 08:25:36,237 INFO [trainer.py:765] (5/8) Epoch 2, batch 100, train_loss[loss=3.392, ArTop10Accuracy=0.655, over 14622.00 frames. ], tot_loss[loss=3.424, ArTop10Accuracy=0.652, over 4773.72 frames. ], batch size: 62, lr: 2.90e-02 +2024-08-06 08:26:58,955 INFO [trainer.py:765] (5/8) Epoch 2, batch 200, train_loss[loss=3.378, ArTop10Accuracy=0.6575, over 13425.00 frames. ], tot_loss[loss=3.386, ArTop10Accuracy=0.6597, over 7764.85 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 08:28:25,534 INFO [trainer.py:765] (5/8) Epoch 2, batch 300, train_loss[loss=3.37, ArTop10Accuracy=0.6646, over 14022.00 frames. ], tot_loss[loss=3.371, ArTop10Accuracy=0.6625, over 9389.25 frames. ], batch size: 44, lr: 2.89e-02 +2024-08-06 08:29:48,637 INFO [trainer.py:765] (5/8) Epoch 2, batch 400, train_loss[loss=3.421, ArTop10Accuracy=0.6513, over 10383.00 frames. ], tot_loss[loss=3.358, ArTop10Accuracy=0.6654, over 10290.14 frames. ], batch size: 14, lr: 2.88e-02 +2024-08-06 08:31:22,902 INFO [trainer.py:765] (5/8) Epoch 2, batch 500, train_loss[loss=3.412, ArTop10Accuracy=0.6543, over 12753.00 frames. ], tot_loss[loss=3.343, ArTop10Accuracy=0.6681, over 10849.81 frames. ], batch size: 23, lr: 2.87e-02 +2024-08-06 08:32:45,687 INFO [trainer.py:765] (5/8) Epoch 2, batch 600, train_loss[loss=3.332, ArTop10Accuracy=0.67, over 11454.00 frames. ], tot_loss[loss=3.331, ArTop10Accuracy=0.6706, over 11349.06 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 08:34:13,583 INFO [trainer.py:765] (5/8) Epoch 2, batch 700, train_loss[loss=3.326, ArTop10Accuracy=0.6678, over 10104.00 frames. ], tot_loss[loss=3.326, ArTop10Accuracy=0.6716, over 11504.73 frames. ], batch size: 12, lr: 2.85e-02 +2024-08-06 08:34:31,175 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 08:34:40,888 INFO [trainer.py:811] (5/8) Epoch 2, validation: loss=3.277, ArTop10Accuracy=0.6803, over 1827537.00 frames. +2024-08-06 08:34:40,889 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 28868MB +2024-08-06 08:34:41,700 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 7.953e+01 1.592e+02 2.200e+02 3.344e+02 2.949e+03, threshold=4.400e+02, percent-clipped=8.6 +2024-08-06 08:35:39,877 INFO [trainer.py:765] (5/8) Epoch 2, batch 800, train_loss[loss=3.238, ArTop10Accuracy=0.6932, over 9108.00 frames. ], tot_loss[loss=3.319, ArTop10Accuracy=0.6731, over 11622.67 frames. ], batch size: 11, lr: 2.84e-02 +2024-08-06 08:36:56,372 INFO [trainer.py:765] (5/8) Epoch 2, batch 900, train_loss[loss=3.164, ArTop10Accuracy=0.6972, over 12777.00 frames. ], tot_loss[loss=3.305, ArTop10Accuracy=0.6756, over 11673.76 frames. ], batch size: 27, lr: 2.83e-02 +2024-08-06 08:38:10,511 INFO [trainer.py:765] (5/8) Epoch 2, batch 1000, train_loss[loss=3.184, ArTop10Accuracy=0.6983, over 13158.00 frames. ], tot_loss[loss=3.299, ArTop10Accuracy=0.6768, over 11869.74 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 08:39:25,059 INFO [trainer.py:765] (5/8) Epoch 2, batch 1100, train_loss[loss=3.256, ArTop10Accuracy=0.6888, over 13548.00 frames. ], tot_loss[loss=3.292, ArTop10Accuracy=0.6782, over 11932.93 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 08:40:38,220 INFO [trainer.py:765] (5/8) Epoch 2, batch 1200, train_loss[loss=3.328, ArTop10Accuracy=0.6618, over 12423.00 frames. ], tot_loss[loss=3.283, ArTop10Accuracy=0.6796, over 11869.40 frames. ], batch size: 101, lr: 2.80e-02 +2024-08-06 08:41:38,289 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 08:43:36,649 INFO [trainer.py:765] (5/8) Epoch 3, batch 100, train_loss[loss=3.275, ArTop10Accuracy=0.6816, over 14241.00 frames. ], tot_loss[loss=3.244, ArTop10Accuracy=0.6861, over 4767.69 frames. ], batch size: 62, lr: 2.67e-02 +2024-08-06 08:45:10,500 INFO [trainer.py:765] (5/8) Epoch 3, batch 200, train_loss[loss=3.273, ArTop10Accuracy=0.6772, over 13731.00 frames. ], tot_loss[loss=3.223, ArTop10Accuracy=0.6902, over 7752.86 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 08:46:29,257 INFO [trainer.py:765] (5/8) Epoch 3, batch 300, train_loss[loss=3.183, ArTop10Accuracy=0.6995, over 14106.00 frames. ], tot_loss[loss=3.205, ArTop10Accuracy=0.6939, over 9389.07 frames. ], batch size: 44, lr: 2.64e-02 +2024-08-06 08:48:04,218 INFO [trainer.py:765] (5/8) Epoch 3, batch 400, train_loss[loss=3.116, ArTop10Accuracy=0.7163, over 10269.00 frames. ], tot_loss[loss=3.19, ArTop10Accuracy=0.6969, over 10295.87 frames. ], batch size: 14, lr: 2.63e-02 +2024-08-06 08:48:40,881 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 9.282e+01 1.561e+02 1.981e+02 2.686e+02 1.768e+03, threshold=3.962e+02, percent-clipped=7.6 +2024-08-06 08:49:25,541 INFO [trainer.py:765] (5/8) Epoch 3, batch 500, train_loss[loss=3.086, ArTop10Accuracy=0.7171, over 12081.00 frames. ], tot_loss[loss=3.174, ArTop10Accuracy=0.7001, over 10836.69 frames. ], batch size: 22, lr: 2.62e-02 +2024-08-06 08:51:00,476 INFO [trainer.py:765] (5/8) Epoch 3, batch 600, train_loss[loss=3.137, ArTop10Accuracy=0.7034, over 11385.00 frames. ], tot_loss[loss=3.153, ArTop10Accuracy=0.7042, over 11343.41 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 08:52:31,617 INFO [trainer.py:765] (5/8) Epoch 3, batch 700, train_loss[loss=3.084, ArTop10Accuracy=0.7191, over 9357.00 frames. ], tot_loss[loss=3.145, ArTop10Accuracy=0.7056, over 11508.14 frames. ], batch size: 11, lr: 2.60e-02 +2024-08-06 08:53:57,388 INFO [trainer.py:765] (5/8) Epoch 3, batch 800, train_loss[loss=3.123, ArTop10Accuracy=0.7103, over 9462.00 frames. ], tot_loss[loss=3.138, ArTop10Accuracy=0.7073, over 11610.68 frames. ], batch size: 11, lr: 2.59e-02 +2024-08-06 08:55:15,117 INFO [trainer.py:765] (5/8) Epoch 3, batch 900, train_loss[loss=2.989, ArTop10Accuracy=0.7359, over 12843.00 frames. ], tot_loss[loss=3.12, ArTop10Accuracy=0.7107, over 11648.07 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 08:56:31,557 INFO [trainer.py:765] (5/8) Epoch 3, batch 1000, train_loss[loss=3.13, ArTop10Accuracy=0.7095, over 13044.00 frames. ], tot_loss[loss=3.112, ArTop10Accuracy=0.712, over 11857.18 frames. ], batch size: 27, lr: 2.56e-02 +2024-08-06 08:57:46,505 INFO [trainer.py:765] (5/8) Epoch 3, batch 1100, train_loss[loss=3.133, ArTop10Accuracy=0.7092, over 13536.00 frames. ], tot_loss[loss=3.108, ArTop10Accuracy=0.7126, over 11942.38 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 08:59:01,399 INFO [trainer.py:765] (5/8) Epoch 3, batch 1200, train_loss[loss=3.122, ArTop10Accuracy=0.7062, over 11571.00 frames. ], tot_loss[loss=3.098, ArTop10Accuracy=0.7146, over 11849.80 frames. ], batch size: 101, lr: 2.54e-02 +2024-08-06 09:00:01,730 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 09:01:50,741 INFO [trainer.py:765] (5/8) Epoch 4, batch 100, train_loss[loss=3.116, ArTop10Accuracy=0.7056, over 14949.00 frames. ], tot_loss[loss=3.071, ArTop10Accuracy=0.7187, over 4748.08 frames. ], batch size: 62, lr: 2.38e-02 +2024-08-06 09:02:52,858 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 09:03:02,384 INFO [trainer.py:811] (5/8) Epoch 4, validation: loss=2.997, ArTop10Accuracy=0.7338, over 1827537.00 frames. +2024-08-06 09:03:02,385 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 29481MB +2024-08-06 09:03:03,364 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.499e+02 1.782e+02 2.273e+02 1.100e+03, threshold=3.565e+02, percent-clipped=4.7 +2024-08-06 09:03:29,273 INFO [trainer.py:765] (5/8) Epoch 4, batch 200, train_loss[loss=2.949, ArTop10Accuracy=0.744, over 13677.00 frames. ], tot_loss[loss=3.048, ArTop10Accuracy=0.7237, over 7770.03 frames. ], batch size: 34, lr: 2.37e-02 +2024-08-06 09:05:01,732 INFO [trainer.py:765] (5/8) Epoch 4, batch 300, train_loss[loss=3.044, ArTop10Accuracy=0.7282, over 14460.00 frames. ], tot_loss[loss=3.036, ArTop10Accuracy=0.7261, over 9385.26 frames. ], batch size: 45, lr: 2.36e-02 +2024-08-06 09:06:28,150 INFO [trainer.py:765] (5/8) Epoch 4, batch 400, train_loss[loss=2.864, ArTop10Accuracy=0.7614, over 10161.00 frames. ], tot_loss[loss=3.032, ArTop10Accuracy=0.7271, over 10287.27 frames. ], batch size: 14, lr: 2.34e-02 +2024-08-06 09:08:01,924 INFO [trainer.py:765] (5/8) Epoch 4, batch 500, train_loss[loss=2.968, ArTop10Accuracy=0.7405, over 12393.00 frames. ], tot_loss[loss=3.022, ArTop10Accuracy=0.729, over 10846.36 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 09:09:28,540 INFO [trainer.py:765] (5/8) Epoch 4, batch 600, train_loss[loss=3.045, ArTop10Accuracy=0.7254, over 11475.00 frames. ], tot_loss[loss=3.019, ArTop10Accuracy=0.7295, over 11367.69 frames. ], batch size: 18, lr: 2.32e-02 +2024-08-06 09:10:59,865 INFO [trainer.py:765] (5/8) Epoch 4, batch 700, train_loss[loss=2.952, ArTop10Accuracy=0.7418, over 10227.00 frames. ], tot_loss[loss=3.023, ArTop10Accuracy=0.7287, over 11527.79 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 09:12:17,512 INFO [trainer.py:765] (5/8) Epoch 4, batch 800, train_loss[loss=3.026, ArTop10Accuracy=0.7246, over 9501.00 frames. ], tot_loss[loss=3.023, ArTop10Accuracy=0.7287, over 11616.75 frames. ], batch size: 11, lr: 2.30e-02 +2024-08-06 09:13:33,212 INFO [trainer.py:765] (5/8) Epoch 4, batch 900, train_loss[loss=2.961, ArTop10Accuracy=0.7415, over 12981.00 frames. ], tot_loss[loss=3.014, ArTop10Accuracy=0.7305, over 11675.99 frames. ], batch size: 27, lr: 2.29e-02 +2024-08-06 09:14:47,519 INFO [trainer.py:765] (5/8) Epoch 4, batch 1000, train_loss[loss=3.037, ArTop10Accuracy=0.7291, over 12837.00 frames. ], tot_loss[loss=3.011, ArTop10Accuracy=0.731, over 11879.91 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 09:16:02,981 INFO [trainer.py:765] (5/8) Epoch 4, batch 1100, train_loss[loss=3.094, ArTop10Accuracy=0.7178, over 13704.00 frames. ], tot_loss[loss=3.013, ArTop10Accuracy=0.7306, over 11943.31 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 09:16:53,291 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.440e+02 1.636e+02 1.968e+02 7.702e+02, threshold=3.273e+02, percent-clipped=1.3 +2024-08-06 09:17:18,344 INFO [trainer.py:765] (5/8) Epoch 4, batch 1200, train_loss[loss=3.051, ArTop10Accuracy=0.7233, over 12387.00 frames. ], tot_loss[loss=3.011, ArTop10Accuracy=0.7312, over 11857.01 frames. ], batch size: 101, lr: 2.25e-02 +2024-08-06 09:18:17,719 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 09:20:17,170 INFO [trainer.py:765] (5/8) Epoch 5, batch 100, train_loss[loss=3.023, ArTop10Accuracy=0.731, over 14667.00 frames. ], tot_loss[loss=2.989, ArTop10Accuracy=0.7344, over 4786.09 frames. ], batch size: 62, lr: 2.10e-02 +2024-08-06 09:21:52,295 INFO [trainer.py:765] (5/8) Epoch 5, batch 200, train_loss[loss=2.991, ArTop10Accuracy=0.7299, over 13593.00 frames. ], tot_loss[loss=2.981, ArTop10Accuracy=0.736, over 7754.44 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 09:23:19,240 INFO [trainer.py:765] (5/8) Epoch 5, batch 300, train_loss[loss=2.998, ArTop10Accuracy=0.7285, over 14391.00 frames. ], tot_loss[loss=2.972, ArTop10Accuracy=0.7377, over 9395.87 frames. ], batch size: 44, lr: 2.08e-02 +2024-08-06 09:24:53,536 INFO [trainer.py:765] (5/8) Epoch 5, batch 400, train_loss[loss=2.832, ArTop10Accuracy=0.7682, over 10143.00 frames. ], tot_loss[loss=2.969, ArTop10Accuracy=0.7386, over 10302.88 frames. ], batch size: 14, lr: 2.07e-02 +2024-08-06 09:26:19,417 INFO [trainer.py:765] (5/8) Epoch 5, batch 500, train_loss[loss=3.012, ArTop10Accuracy=0.7274, over 12156.00 frames. ], tot_loss[loss=2.965, ArTop10Accuracy=0.7393, over 10871.70 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 09:27:49,537 INFO [trainer.py:765] (5/8) Epoch 5, batch 600, train_loss[loss=2.916, ArTop10Accuracy=0.7567, over 11493.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7399, over 11392.96 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 09:29:21,669 INFO [trainer.py:765] (5/8) Epoch 5, batch 700, train_loss[loss=2.98, ArTop10Accuracy=0.7363, over 10116.00 frames. ], tot_loss[loss=2.966, ArTop10Accuracy=0.7392, over 11554.05 frames. ], batch size: 12, lr: 2.04e-02 +2024-08-06 09:30:44,692 INFO [trainer.py:765] (5/8) Epoch 5, batch 800, train_loss[loss=3.029, ArTop10Accuracy=0.7166, over 10044.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7391, over 11668.31 frames. ], batch size: 12, lr: 2.03e-02 +2024-08-06 09:31:51,238 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 09:32:00,762 INFO [trainer.py:811] (5/8) Epoch 5, validation: loss=2.926, ArTop10Accuracy=0.7466, over 1827537.00 frames. +2024-08-06 09:32:00,763 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 29481MB +2024-08-06 09:32:01,708 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.060e+02 1.349e+02 1.525e+02 1.806e+02 1.007e+03, threshold=3.049e+02, percent-clipped=2.3 +2024-08-06 09:32:10,553 INFO [trainer.py:765] (5/8) Epoch 5, batch 900, train_loss[loss=2.925, ArTop10Accuracy=0.7434, over 12921.00 frames. ], tot_loss[loss=2.959, ArTop10Accuracy=0.7408, over 11719.21 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 09:33:27,323 INFO [trainer.py:765] (5/8) Epoch 5, batch 1000, train_loss[loss=2.894, ArTop10Accuracy=0.7564, over 12846.00 frames. ], tot_loss[loss=2.96, ArTop10Accuracy=0.7406, over 11894.84 frames. ], batch size: 27, lr: 2.01e-02 +2024-08-06 09:34:42,300 INFO [trainer.py:765] (5/8) Epoch 5, batch 1100, train_loss[loss=2.979, ArTop10Accuracy=0.7327, over 13755.00 frames. ], tot_loss[loss=2.966, ArTop10Accuracy=0.7394, over 11957.99 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 09:35:56,331 INFO [trainer.py:765] (5/8) Epoch 5, batch 1200, train_loss[loss=3.056, ArTop10Accuracy=0.7168, over 12480.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7402, over 11875.87 frames. ], batch size: 101, lr: 1.99e-02 +2024-08-06 09:36:55,627 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 09:38:52,664 INFO [trainer.py:765] (5/8) Epoch 6, batch 100, train_loss[loss=3.02, ArTop10Accuracy=0.7256, over 14367.00 frames. ], tot_loss[loss=2.952, ArTop10Accuracy=0.7414, over 4758.76 frames. ], batch size: 62, lr: 1.85e-02 +2024-08-06 09:40:19,833 INFO [trainer.py:765] (5/8) Epoch 6, batch 200, train_loss[loss=2.915, ArTop10Accuracy=0.7538, over 13674.00 frames. ], tot_loss[loss=2.94, ArTop10Accuracy=0.7438, over 7757.35 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 09:41:52,964 INFO [trainer.py:765] (5/8) Epoch 6, batch 300, train_loss[loss=2.919, ArTop10Accuracy=0.7497, over 14718.00 frames. ], tot_loss[loss=2.932, ArTop10Accuracy=0.7457, over 9372.92 frames. ], batch size: 45, lr: 1.83e-02 +2024-08-06 09:43:17,827 INFO [trainer.py:765] (5/8) Epoch 6, batch 400, train_loss[loss=2.957, ArTop10Accuracy=0.7436, over 10209.00 frames. ], tot_loss[loss=2.93, ArTop10Accuracy=0.7461, over 10291.45 frames. ], batch size: 14, lr: 1.83e-02 +2024-08-06 09:44:54,128 INFO [trainer.py:765] (5/8) Epoch 6, batch 500, train_loss[loss=2.95, ArTop10Accuracy=0.7403, over 12219.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.748, over 10847.88 frames. ], batch size: 22, lr: 1.82e-02 +2024-08-06 09:46:22,872 INFO [trainer.py:765] (5/8) Epoch 6, batch 600, train_loss[loss=2.853, ArTop10Accuracy=0.7617, over 11301.00 frames. ], tot_loss[loss=2.918, ArTop10Accuracy=0.7483, over 11379.44 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 09:46:37,219 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.339e+02 1.480e+02 1.701e+02 7.506e+02, threshold=2.959e+02, percent-clipped=1.1 +2024-08-06 09:47:57,870 INFO [trainer.py:765] (5/8) Epoch 6, batch 700, train_loss[loss=2.774, ArTop10Accuracy=0.7798, over 10119.00 frames. ], tot_loss[loss=2.924, ArTop10Accuracy=0.7473, over 11527.84 frames. ], batch size: 12, lr: 1.80e-02 +2024-08-06 09:49:15,954 INFO [trainer.py:765] (5/8) Epoch 6, batch 800, train_loss[loss=2.951, ArTop10Accuracy=0.7472, over 10164.00 frames. ], tot_loss[loss=2.927, ArTop10Accuracy=0.7466, over 11640.78 frames. ], batch size: 12, lr: 1.79e-02 +2024-08-06 09:50:32,135 INFO [trainer.py:765] (5/8) Epoch 6, batch 900, train_loss[loss=2.91, ArTop10Accuracy=0.7532, over 12720.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7477, over 11692.38 frames. ], batch size: 27, lr: 1.78e-02 +2024-08-06 09:51:47,298 INFO [trainer.py:765] (5/8) Epoch 6, batch 1000, train_loss[loss=2.88, ArTop10Accuracy=0.7574, over 12849.00 frames. ], tot_loss[loss=2.923, ArTop10Accuracy=0.7472, over 11888.94 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 09:53:00,920 INFO [trainer.py:765] (5/8) Epoch 6, batch 1100, train_loss[loss=2.869, ArTop10Accuracy=0.7568, over 13800.00 frames. ], tot_loss[loss=2.928, ArTop10Accuracy=0.7463, over 11949.08 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 09:54:14,336 INFO [trainer.py:765] (5/8) Epoch 6, batch 1200, train_loss[loss=2.998, ArTop10Accuracy=0.7337, over 12078.00 frames. ], tot_loss[loss=2.926, ArTop10Accuracy=0.7469, over 11859.24 frames. ], batch size: 101, lr: 1.76e-02 +2024-08-06 09:55:13,309 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 09:57:06,698 INFO [trainer.py:765] (5/8) Epoch 7, batch 100, train_loss[loss=2.986, ArTop10Accuracy=0.7338, over 14781.00 frames. ], tot_loss[loss=2.912, ArTop10Accuracy=0.7488, over 4773.34 frames. ], batch size: 64, lr: 1.64e-02 +2024-08-06 09:58:39,425 INFO [trainer.py:765] (5/8) Epoch 7, batch 200, train_loss[loss=2.944, ArTop10Accuracy=0.7442, over 13797.00 frames. ], tot_loss[loss=2.904, ArTop10Accuracy=0.7506, over 7754.99 frames. ], batch size: 34, lr: 1.64e-02 +2024-08-06 10:00:06,083 INFO [trainer.py:765] (5/8) Epoch 7, batch 300, train_loss[loss=2.971, ArTop10Accuracy=0.7391, over 14244.00 frames. ], tot_loss[loss=2.9, ArTop10Accuracy=0.7514, over 9363.68 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 10:00:40,510 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 10:00:50,245 INFO [trainer.py:811] (5/8) Epoch 7, validation: loss=2.88, ArTop10Accuracy=0.7554, over 1827537.00 frames. +2024-08-06 10:00:50,246 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 29481MB +2024-08-06 10:00:50,976 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.002e+02 1.286e+02 1.429e+02 1.605e+02 1.020e+03, threshold=2.857e+02, percent-clipped=1.5 +2024-08-06 10:01:49,117 INFO [trainer.py:765] (5/8) Epoch 7, batch 400, train_loss[loss=2.894, ArTop10Accuracy=0.753, over 10830.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7526, over 10274.54 frames. ], batch size: 15, lr: 1.62e-02 +2024-08-06 10:03:21,459 INFO [trainer.py:765] (5/8) Epoch 7, batch 500, train_loss[loss=2.883, ArTop10Accuracy=0.7543, over 12246.00 frames. ], tot_loss[loss=2.891, ArTop10Accuracy=0.7534, over 10842.54 frames. ], batch size: 22, lr: 1.61e-02 +2024-08-06 10:04:51,882 INFO [trainer.py:765] (5/8) Epoch 7, batch 600, train_loss[loss=2.761, ArTop10Accuracy=0.7776, over 11928.00 frames. ], tot_loss[loss=2.89, ArTop10Accuracy=0.7535, over 11391.82 frames. ], batch size: 19, lr: 1.61e-02 +2024-08-06 10:06:25,111 INFO [trainer.py:765] (5/8) Epoch 7, batch 700, train_loss[loss=2.848, ArTop10Accuracy=0.7626, over 10137.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7521, over 11519.69 frames. ], batch size: 12, lr: 1.60e-02 +2024-08-06 10:07:46,948 INFO [trainer.py:765] (5/8) Epoch 7, batch 800, train_loss[loss=2.722, ArTop10Accuracy=0.7914, over 10032.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7523, over 11636.71 frames. ], batch size: 12, lr: 1.59e-02 +2024-08-06 10:09:02,824 INFO [trainer.py:765] (5/8) Epoch 7, batch 900, train_loss[loss=2.903, ArTop10Accuracy=0.7506, over 13089.00 frames. ], tot_loss[loss=2.891, ArTop10Accuracy=0.7534, over 11693.06 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 10:10:19,635 INFO [trainer.py:765] (5/8) Epoch 7, batch 1000, train_loss[loss=2.899, ArTop10Accuracy=0.7489, over 12903.00 frames. ], tot_loss[loss=2.895, ArTop10Accuracy=0.7529, over 11887.87 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 10:11:35,208 INFO [trainer.py:765] (5/8) Epoch 7, batch 1100, train_loss[loss=2.961, ArTop10Accuracy=0.7398, over 13668.00 frames. ], tot_loss[loss=2.899, ArTop10Accuracy=0.7522, over 11947.64 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 10:12:48,205 INFO [trainer.py:765] (5/8) Epoch 7, batch 1200, train_loss[loss=3.019, ArTop10Accuracy=0.7253, over 12288.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7522, over 11875.89 frames. ], batch size: 101, lr: 1.57e-02 +2024-08-06 10:13:46,878 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 10:15:03,601 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.017e+02 1.283e+02 1.410e+02 1.601e+02 1.017e+03, threshold=2.820e+02, percent-clipped=0.9 +2024-08-06 10:15:40,821 INFO [trainer.py:765] (5/8) Epoch 8, batch 100, train_loss[loss=2.939, ArTop10Accuracy=0.7438, over 14424.00 frames. ], tot_loss[loss=2.88, ArTop10Accuracy=0.7551, over 4781.58 frames. ], batch size: 62, lr: 1.47e-02 +2024-08-06 10:17:12,861 INFO [trainer.py:765] (5/8) Epoch 8, batch 200, train_loss[loss=2.922, ArTop10Accuracy=0.7507, over 13815.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7568, over 7780.65 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 10:18:37,898 INFO [trainer.py:765] (5/8) Epoch 8, batch 300, train_loss[loss=2.912, ArTop10Accuracy=0.743, over 14070.00 frames. ], tot_loss[loss=2.868, ArTop10Accuracy=0.7575, over 9380.95 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 10:20:06,342 INFO [trainer.py:765] (5/8) Epoch 8, batch 400, train_loss[loss=2.768, ArTop10Accuracy=0.7759, over 10383.00 frames. ], tot_loss[loss=2.867, ArTop10Accuracy=0.7579, over 10283.30 frames. ], batch size: 14, lr: 1.45e-02 +2024-08-06 10:21:32,411 INFO [trainer.py:765] (5/8) Epoch 8, batch 500, train_loss[loss=2.79, ArTop10Accuracy=0.7696, over 12282.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.7583, over 10854.83 frames. ], batch size: 22, lr: 1.45e-02 +2024-08-06 10:23:00,974 INFO [trainer.py:765] (5/8) Epoch 8, batch 600, train_loss[loss=2.803, ArTop10Accuracy=0.769, over 11862.00 frames. ], tot_loss[loss=2.863, ArTop10Accuracy=0.7586, over 11351.68 frames. ], batch size: 19, lr: 1.44e-02 +2024-08-06 10:24:37,787 INFO [trainer.py:765] (5/8) Epoch 8, batch 700, train_loss[loss=2.795, ArTop10Accuracy=0.7727, over 9330.00 frames. ], tot_loss[loss=2.87, ArTop10Accuracy=0.7574, over 11504.70 frames. ], batch size: 11, lr: 1.43e-02 +2024-08-06 10:25:56,085 INFO [trainer.py:765] (5/8) Epoch 8, batch 800, train_loss[loss=2.741, ArTop10Accuracy=0.7868, over 10188.00 frames. ], tot_loss[loss=2.875, ArTop10Accuracy=0.7567, over 11641.71 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 10:27:12,244 INFO [trainer.py:765] (5/8) Epoch 8, batch 900, train_loss[loss=2.89, ArTop10Accuracy=0.7553, over 12810.00 frames. ], tot_loss[loss=2.868, ArTop10Accuracy=0.7581, over 11686.35 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 10:28:25,263 INFO [trainer.py:765] (5/8) Epoch 8, batch 1000, train_loss[loss=2.892, ArTop10Accuracy=0.7476, over 13041.00 frames. ], tot_loss[loss=2.868, ArTop10Accuracy=0.7578, over 11879.42 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 10:29:07,155 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 10:29:16,831 INFO [trainer.py:811] (5/8) Epoch 8, validation: loss=2.858, ArTop10Accuracy=0.7594, over 1827537.00 frames. +2024-08-06 10:29:16,831 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 32717MB +2024-08-06 10:29:17,490 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.275e+02 1.390e+02 1.547e+02 3.717e+02, threshold=2.781e+02, percent-clipped=0.7 +2024-08-06 10:29:51,730 INFO [trainer.py:765] (5/8) Epoch 8, batch 1100, train_loss[loss=2.859, ArTop10Accuracy=0.7609, over 13695.00 frames. ], tot_loss[loss=2.874, ArTop10Accuracy=0.7563, over 11977.45 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 10:31:05,945 INFO [trainer.py:765] (5/8) Epoch 8, batch 1200, train_loss[loss=3.031, ArTop10Accuracy=0.7265, over 12033.00 frames. ], tot_loss[loss=2.875, ArTop10Accuracy=0.7565, over 11874.16 frames. ], batch size: 101, lr: 1.40e-02 +2024-08-06 10:32:05,333 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 10:34:01,255 INFO [trainer.py:765] (5/8) Epoch 9, batch 100, train_loss[loss=2.908, ArTop10Accuracy=0.7534, over 14391.00 frames. ], tot_loss[loss=2.861, ArTop10Accuracy=0.7583, over 4756.33 frames. ], batch size: 62, lr: 1.32e-02 +2024-08-06 10:35:31,771 INFO [trainer.py:765] (5/8) Epoch 9, batch 200, train_loss[loss=2.846, ArTop10Accuracy=0.761, over 13746.00 frames. ], tot_loss[loss=2.855, ArTop10Accuracy=0.7598, over 7743.84 frames. ], batch size: 34, lr: 1.32e-02 +2024-08-06 10:36:57,927 INFO [trainer.py:765] (5/8) Epoch 9, batch 300, train_loss[loss=2.858, ArTop10Accuracy=0.7584, over 14196.00 frames. ], tot_loss[loss=2.85, ArTop10Accuracy=0.7611, over 9395.83 frames. ], batch size: 44, lr: 1.31e-02 +2024-08-06 10:38:32,696 INFO [trainer.py:765] (5/8) Epoch 9, batch 400, train_loss[loss=2.8, ArTop10Accuracy=0.7774, over 10344.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7622, over 10296.88 frames. ], batch size: 14, lr: 1.31e-02 +2024-08-06 10:39:59,255 INFO [trainer.py:765] (5/8) Epoch 9, batch 500, train_loss[loss=2.821, ArTop10Accuracy=0.7724, over 12003.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.7627, over 10841.83 frames. ], batch size: 22, lr: 1.30e-02 +2024-08-06 10:41:29,689 INFO [trainer.py:765] (5/8) Epoch 9, batch 600, train_loss[loss=2.779, ArTop10Accuracy=0.7783, over 11358.00 frames. ], tot_loss[loss=2.844, ArTop10Accuracy=0.7625, over 11341.87 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 10:42:58,440 INFO [trainer.py:765] (5/8) Epoch 9, batch 700, train_loss[loss=2.838, ArTop10Accuracy=0.7582, over 9384.00 frames. ], tot_loss[loss=2.849, ArTop10Accuracy=0.7613, over 11516.65 frames. ], batch size: 11, lr: 1.29e-02 +2024-08-06 10:44:02,952 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.039e+02 1.253e+02 1.352e+02 1.493e+02 7.010e+02, threshold=2.704e+02, percent-clipped=0.6 +2024-08-06 10:44:19,668 INFO [trainer.py:765] (5/8) Epoch 9, batch 800, train_loss[loss=2.791, ArTop10Accuracy=0.765, over 9273.00 frames. ], tot_loss[loss=2.853, ArTop10Accuracy=0.7604, over 11630.55 frames. ], batch size: 11, lr: 1.29e-02 +2024-08-06 10:45:35,718 INFO [trainer.py:765] (5/8) Epoch 9, batch 900, train_loss[loss=2.822, ArTop10Accuracy=0.7728, over 13212.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7615, over 11669.97 frames. ], batch size: 28, lr: 1.28e-02 +2024-08-06 10:46:51,270 INFO [trainer.py:765] (5/8) Epoch 9, batch 1000, train_loss[loss=2.769, ArTop10Accuracy=0.7772, over 12984.00 frames. ], tot_loss[loss=2.851, ArTop10Accuracy=0.7609, over 11902.65 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 10:48:06,247 INFO [trainer.py:765] (5/8) Epoch 9, batch 1100, train_loss[loss=2.867, ArTop10Accuracy=0.7579, over 13776.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.7596, over 11970.09 frames. ], batch size: 34, lr: 1.28e-02 +2024-08-06 10:49:21,053 INFO [trainer.py:765] (5/8) Epoch 9, batch 1200, train_loss[loss=2.949, ArTop10Accuracy=0.7393, over 12036.00 frames. ], tot_loss[loss=2.858, ArTop10Accuracy=0.7596, over 11856.75 frames. ], batch size: 101, lr: 1.27e-02 +2024-08-06 10:50:22,407 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 10:52:12,325 INFO [trainer.py:765] (5/8) Epoch 10, batch 100, train_loss[loss=2.852, ArTop10Accuracy=0.758, over 14463.00 frames. ], tot_loss[loss=2.84, ArTop10Accuracy=0.7628, over 4742.65 frames. ], batch size: 62, lr: 1.20e-02 +2024-08-06 10:53:44,585 INFO [trainer.py:765] (5/8) Epoch 10, batch 200, train_loss[loss=2.813, ArTop10Accuracy=0.7699, over 13758.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.7645, over 7757.02 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 10:55:08,089 INFO [trainer.py:765] (5/8) Epoch 10, batch 300, train_loss[loss=2.914, ArTop10Accuracy=0.7479, over 14004.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7654, over 9373.26 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 10:56:41,176 INFO [trainer.py:765] (5/8) Epoch 10, batch 400, train_loss[loss=2.773, ArTop10Accuracy=0.7801, over 10770.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7663, over 10273.04 frames. ], batch size: 15, lr: 1.19e-02 +2024-08-06 10:58:04,937 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 10:58:14,559 INFO [trainer.py:811] (5/8) Epoch 10, validation: loss=2.842, ArTop10Accuracy=0.7624, over 1827537.00 frames. +2024-08-06 10:58:14,560 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 32720MB +2024-08-06 10:58:15,573 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.228e+02 1.320e+02 1.458e+02 6.096e+02, threshold=2.641e+02, percent-clipped=0.6 +2024-08-06 10:58:15,577 INFO [trainer.py:765] (5/8) Epoch 10, batch 500, train_loss[loss=2.785, ArTop10Accuracy=0.773, over 11997.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.7675, over 10852.04 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 10:59:42,814 INFO [trainer.py:765] (5/8) Epoch 10, batch 600, train_loss[loss=2.791, ArTop10Accuracy=0.7766, over 11439.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7666, over 11373.96 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 11:01:18,107 INFO [trainer.py:765] (5/8) Epoch 10, batch 700, train_loss[loss=2.726, ArTop10Accuracy=0.79, over 9531.00 frames. ], tot_loss[loss=2.824, ArTop10Accuracy=0.7659, over 11496.29 frames. ], batch size: 11, lr: 1.18e-02 +2024-08-06 11:02:36,917 INFO [trainer.py:765] (5/8) Epoch 10, batch 800, train_loss[loss=2.834, ArTop10Accuracy=0.7639, over 9489.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7648, over 11633.51 frames. ], batch size: 11, lr: 1.17e-02 +2024-08-06 11:03:51,212 INFO [trainer.py:765] (5/8) Epoch 10, batch 900, train_loss[loss=2.879, ArTop10Accuracy=0.7498, over 12795.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.7655, over 11693.93 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 11:05:06,351 INFO [trainer.py:765] (5/8) Epoch 10, batch 1000, train_loss[loss=2.797, ArTop10Accuracy=0.7712, over 12963.00 frames. ], tot_loss[loss=2.826, ArTop10Accuracy=0.7653, over 11888.02 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 11:06:21,721 INFO [trainer.py:765] (5/8) Epoch 10, batch 1100, train_loss[loss=2.854, ArTop10Accuracy=0.7611, over 13578.00 frames. ], tot_loss[loss=2.833, ArTop10Accuracy=0.7641, over 11957.09 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 11:07:34,771 INFO [trainer.py:765] (5/8) Epoch 10, batch 1200, train_loss[loss=2.927, ArTop10Accuracy=0.7442, over 12105.00 frames. ], tot_loss[loss=2.835, ArTop10Accuracy=0.7636, over 11866.59 frames. ], batch size: 101, lr: 1.16e-02 +2024-08-06 11:08:33,901 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 11:10:29,955 INFO [trainer.py:765] (5/8) Epoch 11, batch 100, train_loss[loss=2.92, ArTop10Accuracy=0.7496, over 14313.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.7665, over 4766.81 frames. ], batch size: 62, lr: 1.10e-02 +2024-08-06 11:12:04,674 INFO [trainer.py:765] (5/8) Epoch 11, batch 200, train_loss[loss=2.854, ArTop10Accuracy=0.7574, over 13833.00 frames. ], tot_loss[loss=2.813, ArTop10Accuracy=0.7673, over 7738.81 frames. ], batch size: 35, lr: 1.10e-02 +2024-08-06 11:12:22,826 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 9.884e+01 1.240e+02 1.333e+02 1.457e+02 6.939e+02, threshold=2.667e+02, percent-clipped=0.1 +2024-08-06 11:13:31,549 INFO [trainer.py:765] (5/8) Epoch 11, batch 300, train_loss[loss=2.892, ArTop10Accuracy=0.757, over 14262.00 frames. ], tot_loss[loss=2.809, ArTop10Accuracy=0.7685, over 9368.06 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 11:15:03,269 INFO [trainer.py:765] (5/8) Epoch 11, batch 400, train_loss[loss=2.728, ArTop10Accuracy=0.7802, over 10251.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7695, over 10282.75 frames. ], batch size: 14, lr: 1.09e-02 +2024-08-06 11:16:29,637 INFO [trainer.py:765] (5/8) Epoch 11, batch 500, train_loss[loss=2.883, ArTop10Accuracy=0.752, over 12291.00 frames. ], tot_loss[loss=2.802, ArTop10Accuracy=0.7702, over 10837.26 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 11:18:00,517 INFO [trainer.py:765] (5/8) Epoch 11, batch 600, train_loss[loss=2.792, ArTop10Accuracy=0.7794, over 11952.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7697, over 11348.71 frames. ], batch size: 19, lr: 1.08e-02 +2024-08-06 11:19:34,514 INFO [trainer.py:765] (5/8) Epoch 11, batch 700, train_loss[loss=2.643, ArTop10Accuracy=0.8037, over 10194.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.7689, over 11504.54 frames. ], batch size: 12, lr: 1.08e-02 +2024-08-06 11:20:55,484 INFO [trainer.py:765] (5/8) Epoch 11, batch 800, train_loss[loss=2.71, ArTop10Accuracy=0.797, over 9279.00 frames. ], tot_loss[loss=2.814, ArTop10Accuracy=0.7677, over 11622.96 frames. ], batch size: 11, lr: 1.07e-02 +2024-08-06 11:22:13,706 INFO [trainer.py:765] (5/8) Epoch 11, batch 900, train_loss[loss=2.818, ArTop10Accuracy=0.7658, over 13197.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7691, over 11678.82 frames. ], batch size: 28, lr: 1.07e-02 +2024-08-06 11:23:31,799 INFO [trainer.py:765] (5/8) Epoch 11, batch 1000, train_loss[loss=2.855, ArTop10Accuracy=0.7635, over 12798.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.7684, over 11876.21 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 11:24:46,902 INFO [trainer.py:765] (5/8) Epoch 11, batch 1100, train_loss[loss=2.822, ArTop10Accuracy=0.77, over 13593.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.7673, over 11945.29 frames. ], batch size: 34, lr: 1.06e-02 +2024-08-06 11:26:00,733 INFO [trainer.py:765] (5/8) Epoch 11, batch 1200, train_loss[loss=2.923, ArTop10Accuracy=0.7414, over 12033.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7665, over 11870.12 frames. ], batch size: 101, lr: 1.06e-02 +2024-08-06 11:26:15,848 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 11:26:25,556 INFO [trainer.py:811] (5/8) Epoch 11, validation: loss=2.831, ArTop10Accuracy=0.7643, over 1827537.00 frames. +2024-08-06 11:26:25,557 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33004MB +2024-08-06 11:26:26,186 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.251e+02 1.335e+02 1.441e+02 2.942e+02, threshold=2.669e+02, percent-clipped=0.1 +2024-08-06 11:27:09,681 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 11:29:03,451 INFO [trainer.py:765] (5/8) Epoch 12, batch 100, train_loss[loss=2.86, ArTop10Accuracy=0.7608, over 14634.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7687, over 4753.89 frames. ], batch size: 63, lr: 1.01e-02 +2024-08-06 11:30:30,674 INFO [trainer.py:765] (5/8) Epoch 12, batch 200, train_loss[loss=2.759, ArTop10Accuracy=0.7796, over 13662.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7702, over 7750.57 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 11:31:57,655 INFO [trainer.py:765] (5/8) Epoch 12, batch 300, train_loss[loss=2.788, ArTop10Accuracy=0.7695, over 14505.00 frames. ], tot_loss[loss=2.792, ArTop10Accuracy=0.772, over 9362.26 frames. ], batch size: 44, lr: 1.01e-02 +2024-08-06 11:33:30,737 INFO [trainer.py:765] (5/8) Epoch 12, batch 400, train_loss[loss=2.694, ArTop10Accuracy=0.7914, over 10332.00 frames. ], tot_loss[loss=2.791, ArTop10Accuracy=0.7723, over 10271.04 frames. ], batch size: 14, lr: 1.00e-02 +2024-08-06 11:34:55,733 INFO [trainer.py:765] (5/8) Epoch 12, batch 500, train_loss[loss=2.766, ArTop10Accuracy=0.7771, over 12225.00 frames. ], tot_loss[loss=2.786, ArTop10Accuracy=0.7732, over 10823.88 frames. ], batch size: 22, lr: 1.00e-02 +2024-08-06 11:36:29,361 INFO [trainer.py:765] (5/8) Epoch 12, batch 600, train_loss[loss=2.677, ArTop10Accuracy=0.792, over 11376.00 frames. ], tot_loss[loss=2.792, ArTop10Accuracy=0.7719, over 11337.58 frames. ], batch size: 18, lr: 9.97e-03 +2024-08-06 11:38:00,343 INFO [trainer.py:765] (5/8) Epoch 12, batch 700, train_loss[loss=2.785, ArTop10Accuracy=0.7774, over 9279.00 frames. ], tot_loss[loss=2.796, ArTop10Accuracy=0.7713, over 11485.09 frames. ], batch size: 11, lr: 9.93e-03 +2024-08-06 11:39:23,610 INFO [trainer.py:765] (5/8) Epoch 12, batch 800, train_loss[loss=2.656, ArTop10Accuracy=0.7993, over 10065.00 frames. ], tot_loss[loss=2.801, ArTop10Accuracy=0.7704, over 11623.21 frames. ], batch size: 12, lr: 9.90e-03 +2024-08-06 11:40:39,889 INFO [trainer.py:765] (5/8) Epoch 12, batch 900, train_loss[loss=2.795, ArTop10Accuracy=0.771, over 12933.00 frames. ], tot_loss[loss=2.797, ArTop10Accuracy=0.7712, over 11693.14 frames. ], batch size: 27, lr: 9.87e-03 +2024-08-06 11:41:13,995 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.041e+02 1.248e+02 1.348e+02 1.459e+02 5.540e+02, threshold=2.695e+02, percent-clipped=0.3 +2024-08-06 11:41:56,189 INFO [trainer.py:765] (5/8) Epoch 12, batch 1000, train_loss[loss=2.816, ArTop10Accuracy=0.7683, over 12939.00 frames. ], tot_loss[loss=2.8, ArTop10Accuracy=0.7707, over 11895.98 frames. ], batch size: 27, lr: 9.85e-03 +2024-08-06 11:43:14,320 INFO [trainer.py:765] (5/8) Epoch 12, batch 1100, train_loss[loss=2.788, ArTop10Accuracy=0.7715, over 13668.00 frames. ], tot_loss[loss=2.803, ArTop10Accuracy=0.7701, over 11955.74 frames. ], batch size: 34, lr: 9.82e-03 +2024-08-06 11:44:26,155 INFO [trainer.py:765] (5/8) Epoch 12, batch 1200, train_loss[loss=2.949, ArTop10Accuracy=0.7443, over 12600.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7698, over 11871.36 frames. ], batch size: 101, lr: 9.79e-03 +2024-08-06 11:45:26,265 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 11:47:26,600 INFO [trainer.py:765] (5/8) Epoch 13, batch 100, train_loss[loss=2.828, ArTop10Accuracy=0.7665, over 14454.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7707, over 4769.49 frames. ], batch size: 62, lr: 9.37e-03 +2024-08-06 11:48:54,779 INFO [trainer.py:765] (5/8) Epoch 13, batch 200, train_loss[loss=2.763, ArTop10Accuracy=0.7791, over 13644.00 frames. ], tot_loss[loss=2.785, ArTop10Accuracy=0.773, over 7764.91 frames. ], batch size: 34, lr: 9.34e-03 +2024-08-06 11:50:20,516 INFO [trainer.py:765] (5/8) Epoch 13, batch 300, train_loss[loss=2.849, ArTop10Accuracy=0.7621, over 14187.00 frames. ], tot_loss[loss=2.778, ArTop10Accuracy=0.7741, over 9372.51 frames. ], batch size: 45, lr: 9.31e-03 +2024-08-06 11:51:48,765 INFO [trainer.py:765] (5/8) Epoch 13, batch 400, train_loss[loss=2.674, ArTop10Accuracy=0.7952, over 10284.00 frames. ], tot_loss[loss=2.777, ArTop10Accuracy=0.7745, over 10272.45 frames. ], batch size: 14, lr: 9.28e-03 +2024-08-06 11:53:13,407 INFO [trainer.py:765] (5/8) Epoch 13, batch 500, train_loss[loss=2.734, ArTop10Accuracy=0.7823, over 12261.00 frames. ], tot_loss[loss=2.77, ArTop10Accuracy=0.7759, over 10847.65 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 11:54:52,223 INFO [trainer.py:765] (5/8) Epoch 13, batch 600, train_loss[loss=2.73, ArTop10Accuracy=0.7878, over 11385.00 frames. ], tot_loss[loss=2.777, ArTop10Accuracy=0.7746, over 11374.02 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 11:55:47,080 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 11:55:56,835 INFO [trainer.py:811] (5/8) Epoch 13, validation: loss=2.824, ArTop10Accuracy=0.7662, over 1827537.00 frames. +2024-08-06 11:55:56,835 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33004MB +2024-08-06 11:55:57,712 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.064e+02 1.255e+02 1.343e+02 1.452e+02 4.888e+02, threshold=2.687e+02, percent-clipped=0.1 +2024-08-06 11:56:28,465 INFO [trainer.py:765] (5/8) Epoch 13, batch 700, train_loss[loss=2.767, ArTop10Accuracy=0.7719, over 10383.00 frames. ], tot_loss[loss=2.779, ArTop10Accuracy=0.7743, over 11511.62 frames. ], batch size: 12, lr: 9.20e-03 +2024-08-06 11:57:46,684 INFO [trainer.py:765] (5/8) Epoch 13, batch 800, train_loss[loss=2.706, ArTop10Accuracy=0.789, over 10110.00 frames. ], tot_loss[loss=2.783, ArTop10Accuracy=0.7736, over 11648.41 frames. ], batch size: 12, lr: 9.18e-03 +2024-08-06 11:59:03,287 INFO [trainer.py:765] (5/8) Epoch 13, batch 900, train_loss[loss=2.762, ArTop10Accuracy=0.7781, over 12837.00 frames. ], tot_loss[loss=2.78, ArTop10Accuracy=0.7742, over 11698.79 frames. ], batch size: 27, lr: 9.15e-03 +2024-08-06 12:00:19,174 INFO [trainer.py:765] (5/8) Epoch 13, batch 1000, train_loss[loss=2.702, ArTop10Accuracy=0.7918, over 13026.00 frames. ], tot_loss[loss=2.783, ArTop10Accuracy=0.774, over 11892.62 frames. ], batch size: 27, lr: 9.13e-03 +2024-08-06 12:01:34,881 INFO [trainer.py:765] (5/8) Epoch 13, batch 1100, train_loss[loss=2.782, ArTop10Accuracy=0.7729, over 13821.00 frames. ], tot_loss[loss=2.791, ArTop10Accuracy=0.7723, over 11969.07 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 12:02:48,663 INFO [trainer.py:765] (5/8) Epoch 13, batch 1200, train_loss[loss=2.887, ArTop10Accuracy=0.7551, over 12504.00 frames. ], tot_loss[loss=2.79, ArTop10Accuracy=0.7727, over 11873.91 frames. ], batch size: 101, lr: 9.08e-03 +2024-08-06 12:03:48,484 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 12:05:45,334 INFO [trainer.py:765] (5/8) Epoch 14, batch 100, train_loss[loss=2.834, ArTop10Accuracy=0.7626, over 14121.00 frames. ], tot_loss[loss=2.778, ArTop10Accuracy=0.7746, over 4755.27 frames. ], batch size: 62, lr: 8.71e-03 +2024-08-06 12:07:16,604 INFO [trainer.py:765] (5/8) Epoch 14, batch 200, train_loss[loss=2.797, ArTop10Accuracy=0.7675, over 13734.00 frames. ], tot_loss[loss=2.767, ArTop10Accuracy=0.7765, over 7756.67 frames. ], batch size: 34, lr: 8.69e-03 +2024-08-06 12:08:44,311 INFO [trainer.py:765] (5/8) Epoch 14, batch 300, train_loss[loss=2.841, ArTop10Accuracy=0.76, over 14163.00 frames. ], tot_loss[loss=2.766, ArTop10Accuracy=0.7764, over 9360.38 frames. ], batch size: 44, lr: 8.66e-03 +2024-08-06 12:10:01,130 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.266e+02 1.374e+02 1.483e+02 6.480e+02, threshold=2.748e+02, percent-clipped=0.2 +2024-08-06 12:10:10,226 INFO [trainer.py:765] (5/8) Epoch 14, batch 400, train_loss[loss=2.808, ArTop10Accuracy=0.7624, over 10140.00 frames. ], tot_loss[loss=2.766, ArTop10Accuracy=0.7767, over 10281.93 frames. ], batch size: 14, lr: 8.64e-03 +2024-08-06 12:11:36,150 INFO [trainer.py:765] (5/8) Epoch 14, batch 500, train_loss[loss=2.77, ArTop10Accuracy=0.7749, over 12003.00 frames. ], tot_loss[loss=2.758, ArTop10Accuracy=0.7782, over 10833.99 frames. ], batch size: 22, lr: 8.62e-03 +2024-08-06 12:13:05,993 INFO [trainer.py:765] (5/8) Epoch 14, batch 600, train_loss[loss=2.66, ArTop10Accuracy=0.7942, over 11427.00 frames. ], tot_loss[loss=2.764, ArTop10Accuracy=0.7773, over 11372.99 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 12:14:38,552 INFO [trainer.py:765] (5/8) Epoch 14, batch 700, train_loss[loss=2.681, ArTop10Accuracy=0.7937, over 10236.00 frames. ], tot_loss[loss=2.769, ArTop10Accuracy=0.7763, over 11512.30 frames. ], batch size: 12, lr: 8.57e-03 +2024-08-06 12:15:58,069 INFO [trainer.py:765] (5/8) Epoch 14, batch 800, train_loss[loss=2.677, ArTop10Accuracy=0.7916, over 9333.00 frames. ], tot_loss[loss=2.771, ArTop10Accuracy=0.7759, over 11636.50 frames. ], batch size: 11, lr: 8.55e-03 +2024-08-06 12:17:12,866 INFO [trainer.py:765] (5/8) Epoch 14, batch 900, train_loss[loss=2.798, ArTop10Accuracy=0.7775, over 12933.00 frames. ], tot_loss[loss=2.764, ArTop10Accuracy=0.7774, over 11669.10 frames. ], batch size: 27, lr: 8.52e-03 +2024-08-06 12:18:29,615 INFO [trainer.py:765] (5/8) Epoch 14, batch 1000, train_loss[loss=2.859, ArTop10Accuracy=0.7621, over 13071.00 frames. ], tot_loss[loss=2.769, ArTop10Accuracy=0.7765, over 11877.89 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 12:19:45,377 INFO [trainer.py:765] (5/8) Epoch 14, batch 1100, train_loss[loss=2.76, ArTop10Accuracy=0.7808, over 13647.00 frames. ], tot_loss[loss=2.778, ArTop10Accuracy=0.7747, over 11945.73 frames. ], batch size: 34, lr: 8.48e-03 +2024-08-06 12:20:59,279 INFO [trainer.py:765] (5/8) Epoch 14, batch 1200, train_loss[loss=2.936, ArTop10Accuracy=0.7421, over 11955.00 frames. ], tot_loss[loss=2.779, ArTop10Accuracy=0.7744, over 11860.24 frames. ], batch size: 101, lr: 8.46e-03 +2024-08-06 12:21:57,889 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 12:23:51,962 INFO [trainer.py:765] (5/8) Epoch 15, batch 100, train_loss[loss=2.862, ArTop10Accuracy=0.7631, over 14766.00 frames. ], tot_loss[loss=2.767, ArTop10Accuracy=0.7762, over 4772.35 frames. ], batch size: 63, lr: 8.14e-03 +2024-08-06 12:24:00,598 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 12:24:10,290 INFO [trainer.py:811] (5/8) Epoch 15, validation: loss=2.819, ArTop10Accuracy=0.7675, over 1827537.00 frames. +2024-08-06 12:24:10,291 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33004MB +2024-08-06 12:24:11,094 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.284e+02 1.371e+02 1.488e+02 4.667e+02, threshold=2.743e+02, percent-clipped=0.2 +2024-08-06 12:25:29,989 INFO [trainer.py:765] (5/8) Epoch 15, batch 200, train_loss[loss=2.767, ArTop10Accuracy=0.7776, over 13629.00 frames. ], tot_loss[loss=2.759, ArTop10Accuracy=0.7778, over 7753.42 frames. ], batch size: 34, lr: 8.12e-03 +2024-08-06 12:26:58,695 INFO [trainer.py:765] (5/8) Epoch 15, batch 300, train_loss[loss=2.774, ArTop10Accuracy=0.7769, over 13872.00 frames. ], tot_loss[loss=2.75, ArTop10Accuracy=0.7796, over 9374.66 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 12:28:28,535 INFO [trainer.py:765] (5/8) Epoch 15, batch 400, train_loss[loss=2.682, ArTop10Accuracy=0.7932, over 10188.00 frames. ], tot_loss[loss=2.748, ArTop10Accuracy=0.7801, over 10289.44 frames. ], batch size: 14, lr: 8.07e-03 +2024-08-06 12:29:54,031 INFO [trainer.py:765] (5/8) Epoch 15, batch 500, train_loss[loss=2.693, ArTop10Accuracy=0.7891, over 12228.00 frames. ], tot_loss[loss=2.747, ArTop10Accuracy=0.7803, over 10854.86 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 12:31:23,293 INFO [trainer.py:765] (5/8) Epoch 15, batch 600, train_loss[loss=2.795, ArTop10Accuracy=0.7702, over 11559.00 frames. ], tot_loss[loss=2.751, ArTop10Accuracy=0.7795, over 11372.03 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 12:32:53,176 INFO [trainer.py:765] (5/8) Epoch 15, batch 700, train_loss[loss=2.946, ArTop10Accuracy=0.7441, over 9426.00 frames. ], tot_loss[loss=2.755, ArTop10Accuracy=0.7785, over 11509.96 frames. ], batch size: 11, lr: 8.01e-03 +2024-08-06 12:34:18,254 INFO [trainer.py:765] (5/8) Epoch 15, batch 800, train_loss[loss=2.731, ArTop10Accuracy=0.7853, over 10077.00 frames. ], tot_loss[loss=2.759, ArTop10Accuracy=0.7778, over 11655.46 frames. ], batch size: 12, lr: 7.99e-03 +2024-08-06 12:35:34,726 INFO [trainer.py:765] (5/8) Epoch 15, batch 900, train_loss[loss=2.748, ArTop10Accuracy=0.7775, over 12918.00 frames. ], tot_loss[loss=2.757, ArTop10Accuracy=0.7782, over 11703.63 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 12:36:50,540 INFO [trainer.py:765] (5/8) Epoch 15, batch 1000, train_loss[loss=2.679, ArTop10Accuracy=0.7947, over 12975.00 frames. ], tot_loss[loss=2.758, ArTop10Accuracy=0.7781, over 11898.75 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 12:38:05,179 INFO [trainer.py:765] (5/8) Epoch 15, batch 1100, train_loss[loss=2.721, ArTop10Accuracy=0.7841, over 13785.00 frames. ], tot_loss[loss=2.765, ArTop10Accuracy=0.7767, over 11973.07 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 12:38:12,841 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.293e+02 1.379e+02 1.467e+02 2.824e+02, threshold=2.759e+02, percent-clipped=0.1 +2024-08-06 12:39:18,789 INFO [trainer.py:765] (5/8) Epoch 15, batch 1200, train_loss[loss=2.935, ArTop10Accuracy=0.7409, over 12156.00 frames. ], tot_loss[loss=2.767, ArTop10Accuracy=0.7763, over 11863.35 frames. ], batch size: 101, lr: 7.91e-03 +2024-08-06 12:40:18,769 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 12:42:17,619 INFO [trainer.py:765] (5/8) Epoch 16, batch 100, train_loss[loss=2.761, ArTop10Accuracy=0.7795, over 14676.00 frames. ], tot_loss[loss=2.752, ArTop10Accuracy=0.779, over 4775.40 frames. ], batch size: 62, lr: 7.63e-03 +2024-08-06 12:43:49,563 INFO [trainer.py:765] (5/8) Epoch 16, batch 200, train_loss[loss=2.658, ArTop10Accuracy=0.7943, over 13419.00 frames. ], tot_loss[loss=2.746, ArTop10Accuracy=0.7803, over 7764.60 frames. ], batch size: 34, lr: 7.61e-03 +2024-08-06 12:45:18,502 INFO [trainer.py:765] (5/8) Epoch 16, batch 300, train_loss[loss=2.827, ArTop10Accuracy=0.7627, over 14460.00 frames. ], tot_loss[loss=2.742, ArTop10Accuracy=0.7812, over 9390.93 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 12:46:45,209 INFO [trainer.py:765] (5/8) Epoch 16, batch 400, train_loss[loss=2.647, ArTop10Accuracy=0.8053, over 10230.00 frames. ], tot_loss[loss=2.739, ArTop10Accuracy=0.7819, over 10315.27 frames. ], batch size: 14, lr: 7.58e-03 +2024-08-06 12:48:16,311 INFO [trainer.py:765] (5/8) Epoch 16, batch 500, train_loss[loss=2.762, ArTop10Accuracy=0.7751, over 12336.00 frames. ], tot_loss[loss=2.737, ArTop10Accuracy=0.7821, over 10875.67 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 12:49:46,642 INFO [trainer.py:765] (5/8) Epoch 16, batch 600, train_loss[loss=2.76, ArTop10Accuracy=0.7807, over 11430.00 frames. ], tot_loss[loss=2.74, ArTop10Accuracy=0.7817, over 11381.50 frames. ], batch size: 18, lr: 7.54e-03 +2024-08-06 12:51:23,681 INFO [trainer.py:765] (5/8) Epoch 16, batch 700, train_loss[loss=2.55, ArTop10Accuracy=0.8138, over 9411.00 frames. ], tot_loss[loss=2.74, ArTop10Accuracy=0.7817, over 11524.49 frames. ], batch size: 11, lr: 7.52e-03 +2024-08-06 12:52:43,501 INFO [trainer.py:765] (5/8) Epoch 16, batch 800, train_loss[loss=2.647, ArTop10Accuracy=0.8003, over 10152.00 frames. ], tot_loss[loss=2.745, ArTop10Accuracy=0.7807, over 11650.13 frames. ], batch size: 12, lr: 7.51e-03 +2024-08-06 12:53:06,015 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 12:53:15,497 INFO [trainer.py:811] (5/8) Epoch 16, validation: loss=2.816, ArTop10Accuracy=0.7678, over 1827537.00 frames. +2024-08-06 12:53:15,497 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33004MB +2024-08-06 12:53:16,186 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.112e+02 1.291e+02 1.391e+02 1.487e+02 3.459e+02, threshold=2.783e+02, percent-clipped=0.1 +2024-08-06 12:54:06,480 INFO [trainer.py:765] (5/8) Epoch 16, batch 900, train_loss[loss=2.733, ArTop10Accuracy=0.7827, over 13089.00 frames. ], tot_loss[loss=2.74, ArTop10Accuracy=0.7817, over 11714.19 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 12:55:19,791 INFO [trainer.py:765] (5/8) Epoch 16, batch 1000, train_loss[loss=2.714, ArTop10Accuracy=0.7895, over 12903.00 frames. ], tot_loss[loss=2.744, ArTop10Accuracy=0.7809, over 11908.25 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 12:56:33,163 INFO [trainer.py:765] (5/8) Epoch 16, batch 1100, train_loss[loss=2.734, ArTop10Accuracy=0.7784, over 13548.00 frames. ], tot_loss[loss=2.756, ArTop10Accuracy=0.7786, over 11968.05 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 12:57:48,485 INFO [trainer.py:765] (5/8) Epoch 16, batch 1200, train_loss[loss=2.914, ArTop10Accuracy=0.7486, over 12042.00 frames. ], tot_loss[loss=2.755, ArTop10Accuracy=0.7789, over 11883.70 frames. ], batch size: 101, lr: 7.44e-03 +2024-08-06 12:58:48,462 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 13:00:47,900 INFO [trainer.py:765] (5/8) Epoch 17, batch 100, train_loss[loss=2.79, ArTop10Accuracy=0.7739, over 14514.00 frames. ], tot_loss[loss=2.744, ArTop10Accuracy=0.7802, over 4746.89 frames. ], batch size: 62, lr: 7.18e-03 +2024-08-06 13:02:19,302 INFO [trainer.py:765] (5/8) Epoch 17, batch 200, train_loss[loss=2.762, ArTop10Accuracy=0.7752, over 13503.00 frames. ], tot_loss[loss=2.737, ArTop10Accuracy=0.7818, over 7745.07 frames. ], batch size: 34, lr: 7.17e-03 +2024-08-06 13:03:45,517 INFO [trainer.py:765] (5/8) Epoch 17, batch 300, train_loss[loss=2.704, ArTop10Accuracy=0.7863, over 14109.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7837, over 9363.13 frames. ], batch size: 44, lr: 7.15e-03 +2024-08-06 13:05:21,760 INFO [trainer.py:765] (5/8) Epoch 17, batch 400, train_loss[loss=2.597, ArTop10Accuracy=0.8068, over 10308.00 frames. ], tot_loss[loss=2.727, ArTop10Accuracy=0.784, over 10298.50 frames. ], batch size: 14, lr: 7.14e-03 +2024-08-06 13:06:47,021 INFO [trainer.py:765] (5/8) Epoch 17, batch 500, train_loss[loss=2.65, ArTop10Accuracy=0.7994, over 12138.00 frames. ], tot_loss[loss=2.723, ArTop10Accuracy=0.7848, over 10855.95 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 13:07:39,878 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.140e+02 1.293e+02 1.386e+02 1.488e+02 3.253e+02, threshold=2.772e+02, percent-clipped=0.1 +2024-08-06 13:08:22,688 INFO [trainer.py:765] (5/8) Epoch 17, batch 600, train_loss[loss=2.731, ArTop10Accuracy=0.7827, over 11508.00 frames. ], tot_loss[loss=2.729, ArTop10Accuracy=0.7836, over 11353.38 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 13:09:54,835 INFO [trainer.py:765] (5/8) Epoch 17, batch 700, train_loss[loss=2.661, ArTop10Accuracy=0.8012, over 10047.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7837, over 11499.97 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 13:11:19,480 INFO [trainer.py:765] (5/8) Epoch 17, batch 800, train_loss[loss=2.683, ArTop10Accuracy=0.7903, over 9426.00 frames. ], tot_loss[loss=2.731, ArTop10Accuracy=0.7833, over 11612.45 frames. ], batch size: 11, lr: 7.07e-03 +2024-08-06 13:12:35,669 INFO [trainer.py:765] (5/8) Epoch 17, batch 900, train_loss[loss=2.679, ArTop10Accuracy=0.7909, over 13299.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.784, over 11667.58 frames. ], batch size: 28, lr: 7.06e-03 +2024-08-06 13:13:53,061 INFO [trainer.py:765] (5/8) Epoch 17, batch 1000, train_loss[loss=2.747, ArTop10Accuracy=0.7768, over 12849.00 frames. ], tot_loss[loss=2.733, ArTop10Accuracy=0.7829, over 11871.51 frames. ], batch size: 27, lr: 7.04e-03 +2024-08-06 13:15:08,484 INFO [trainer.py:765] (5/8) Epoch 17, batch 1100, train_loss[loss=2.783, ArTop10Accuracy=0.7756, over 13917.00 frames. ], tot_loss[loss=2.743, ArTop10Accuracy=0.781, over 11963.27 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 13:16:22,388 INFO [trainer.py:765] (5/8) Epoch 17, batch 1200, train_loss[loss=2.881, ArTop10Accuracy=0.7528, over 11922.00 frames. ], tot_loss[loss=2.742, ArTop10Accuracy=0.7812, over 11883.35 frames. ], batch size: 101, lr: 7.01e-03 +2024-08-06 13:17:21,256 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 13:19:15,994 INFO [trainer.py:765] (5/8) Epoch 18, batch 100, train_loss[loss=2.836, ArTop10Accuracy=0.7618, over 14517.00 frames. ], tot_loss[loss=2.734, ArTop10Accuracy=0.7823, over 4741.07 frames. ], batch size: 62, lr: 6.78e-03 +2024-08-06 13:20:46,600 INFO [trainer.py:765] (5/8) Epoch 18, batch 200, train_loss[loss=2.73, ArTop10Accuracy=0.7836, over 13572.00 frames. ], tot_loss[loss=2.723, ArTop10Accuracy=0.7844, over 7726.79 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 13:21:55,105 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 13:22:04,751 INFO [trainer.py:811] (5/8) Epoch 18, validation: loss=2.817, ArTop10Accuracy=0.768, over 1827537.00 frames. +2024-08-06 13:22:04,752 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33004MB +2024-08-06 13:22:05,473 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.323e+02 1.409e+02 1.514e+02 3.209e+02, threshold=2.818e+02, percent-clipped=0.1 +2024-08-06 13:22:26,581 INFO [trainer.py:765] (5/8) Epoch 18, batch 300, train_loss[loss=2.857, ArTop10Accuracy=0.7546, over 14451.00 frames. ], tot_loss[loss=2.716, ArTop10Accuracy=0.7859, over 9365.24 frames. ], batch size: 44, lr: 6.76e-03 +2024-08-06 13:23:57,930 INFO [trainer.py:765] (5/8) Epoch 18, batch 400, train_loss[loss=2.635, ArTop10Accuracy=0.7974, over 10905.00 frames. ], tot_loss[loss=2.714, ArTop10Accuracy=0.7862, over 10266.45 frames. ], batch size: 15, lr: 6.74e-03 +2024-08-06 13:25:34,013 INFO [trainer.py:765] (5/8) Epoch 18, batch 500, train_loss[loss=2.698, ArTop10Accuracy=0.7882, over 12288.00 frames. ], tot_loss[loss=2.708, ArTop10Accuracy=0.7875, over 10841.88 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 13:27:00,633 INFO [trainer.py:765] (5/8) Epoch 18, batch 600, train_loss[loss=2.6, ArTop10Accuracy=0.8053, over 11361.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.786, over 11357.44 frames. ], batch size: 18, lr: 6.71e-03 +2024-08-06 13:28:33,583 INFO [trainer.py:765] (5/8) Epoch 18, batch 700, train_loss[loss=2.631, ArTop10Accuracy=0.7964, over 9192.00 frames. ], tot_loss[loss=2.72, ArTop10Accuracy=0.785, over 11507.07 frames. ], batch size: 11, lr: 6.70e-03 +2024-08-06 13:29:54,984 INFO [trainer.py:765] (5/8) Epoch 18, batch 800, train_loss[loss=2.628, ArTop10Accuracy=0.8056, over 9312.00 frames. ], tot_loss[loss=2.721, ArTop10Accuracy=0.7848, over 11606.69 frames. ], batch size: 11, lr: 6.68e-03 +2024-08-06 13:31:12,519 INFO [trainer.py:765] (5/8) Epoch 18, batch 900, train_loss[loss=2.791, ArTop10Accuracy=0.7691, over 12933.00 frames. ], tot_loss[loss=2.719, ArTop10Accuracy=0.7852, over 11666.15 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 13:32:26,551 INFO [trainer.py:765] (5/8) Epoch 18, batch 1000, train_loss[loss=2.696, ArTop10Accuracy=0.7862, over 12858.00 frames. ], tot_loss[loss=2.729, ArTop10Accuracy=0.7835, over 11872.20 frames. ], batch size: 27, lr: 6.66e-03 +2024-08-06 13:33:41,497 INFO [trainer.py:765] (5/8) Epoch 18, batch 1100, train_loss[loss=2.694, ArTop10Accuracy=0.788, over 13674.00 frames. ], tot_loss[loss=2.736, ArTop10Accuracy=0.7822, over 11940.32 frames. ], batch size: 34, lr: 6.64e-03 +2024-08-06 13:34:54,674 INFO [trainer.py:765] (5/8) Epoch 18, batch 1200, train_loss[loss=2.855, ArTop10Accuracy=0.7583, over 12042.00 frames. ], tot_loss[loss=2.736, ArTop10Accuracy=0.782, over 11858.01 frames. ], batch size: 101, lr: 6.63e-03 +2024-08-06 13:35:51,064 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.124e+02 1.340e+02 1.433e+02 1.533e+02 2.444e+02, threshold=2.867e+02, percent-clipped=0.0 +2024-08-06 13:35:54,247 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 13:37:48,624 INFO [trainer.py:765] (5/8) Epoch 19, batch 100, train_loss[loss=2.784, ArTop10Accuracy=0.7731, over 14520.00 frames. ], tot_loss[loss=2.72, ArTop10Accuracy=0.7851, over 4772.92 frames. ], batch size: 62, lr: 6.43e-03 +2024-08-06 13:39:23,257 INFO [trainer.py:765] (5/8) Epoch 19, batch 200, train_loss[loss=2.721, ArTop10Accuracy=0.7922, over 13578.00 frames. ], tot_loss[loss=2.714, ArTop10Accuracy=0.7863, over 7762.37 frames. ], batch size: 34, lr: 6.41e-03 +2024-08-06 13:40:48,360 INFO [trainer.py:765] (5/8) Epoch 19, batch 300, train_loss[loss=2.743, ArTop10Accuracy=0.7811, over 14157.00 frames. ], tot_loss[loss=2.712, ArTop10Accuracy=0.7867, over 9395.02 frames. ], batch size: 44, lr: 6.40e-03 +2024-08-06 13:42:21,067 INFO [trainer.py:765] (5/8) Epoch 19, batch 400, train_loss[loss=2.584, ArTop10Accuracy=0.8123, over 10896.00 frames. ], tot_loss[loss=2.706, ArTop10Accuracy=0.7878, over 10297.34 frames. ], batch size: 15, lr: 6.39e-03 +2024-08-06 13:43:44,955 INFO [trainer.py:765] (5/8) Epoch 19, batch 500, train_loss[loss=2.74, ArTop10Accuracy=0.7834, over 12279.00 frames. ], tot_loss[loss=2.701, ArTop10Accuracy=0.7886, over 10864.55 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 13:45:16,682 INFO [trainer.py:765] (5/8) Epoch 19, batch 600, train_loss[loss=2.702, ArTop10Accuracy=0.7867, over 11439.00 frames. ], tot_loss[loss=2.706, ArTop10Accuracy=0.788, over 11385.29 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 13:46:48,324 INFO [trainer.py:765] (5/8) Epoch 19, batch 700, train_loss[loss=2.638, ArTop10Accuracy=0.8023, over 10281.00 frames. ], tot_loss[loss=2.71, ArTop10Accuracy=0.7873, over 11528.57 frames. ], batch size: 12, lr: 6.35e-03 +2024-08-06 13:48:11,884 INFO [trainer.py:765] (5/8) Epoch 19, batch 800, train_loss[loss=2.69, ArTop10Accuracy=0.7886, over 9447.00 frames. ], tot_loss[loss=2.716, ArTop10Accuracy=0.786, over 11643.01 frames. ], batch size: 11, lr: 6.34e-03 +2024-08-06 13:49:27,259 INFO [trainer.py:765] (5/8) Epoch 19, batch 900, train_loss[loss=2.693, ArTop10Accuracy=0.7895, over 12918.00 frames. ], tot_loss[loss=2.709, ArTop10Accuracy=0.7871, over 11699.57 frames. ], batch size: 27, lr: 6.32e-03 +2024-08-06 13:50:40,655 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 13:50:50,537 INFO [trainer.py:811] (5/8) Epoch 19, validation: loss=2.818, ArTop10Accuracy=0.7679, over 1827537.00 frames. +2024-08-06 13:50:50,537 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33004MB +2024-08-06 13:50:51,490 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.371e+02 1.455e+02 1.550e+02 3.697e+02, threshold=2.909e+02, percent-clipped=0.2 +2024-08-06 13:50:52,916 INFO [trainer.py:765] (5/8) Epoch 19, batch 1000, train_loss[loss=2.719, ArTop10Accuracy=0.7876, over 12822.00 frames. ], tot_loss[loss=2.717, ArTop10Accuracy=0.7857, over 11884.36 frames. ], batch size: 27, lr: 6.31e-03 +2024-08-06 13:52:08,266 INFO [trainer.py:765] (5/8) Epoch 19, batch 1100, train_loss[loss=2.753, ArTop10Accuracy=0.7806, over 13827.00 frames. ], tot_loss[loss=2.725, ArTop10Accuracy=0.7842, over 11932.60 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 13:53:22,314 INFO [trainer.py:765] (5/8) Epoch 19, batch 1200, train_loss[loss=2.875, ArTop10Accuracy=0.754, over 12696.00 frames. ], tot_loss[loss=2.727, ArTop10Accuracy=0.7839, over 11864.89 frames. ], batch size: 101, lr: 6.28e-03 +2024-08-06 13:54:21,608 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 13:56:12,905 INFO [trainer.py:765] (5/8) Epoch 20, batch 100, train_loss[loss=2.771, ArTop10Accuracy=0.774, over 14373.00 frames. ], tot_loss[loss=2.713, ArTop10Accuracy=0.7863, over 4760.92 frames. ], batch size: 62, lr: 6.10e-03 +2024-08-06 13:57:42,495 INFO [trainer.py:765] (5/8) Epoch 20, batch 200, train_loss[loss=2.687, ArTop10Accuracy=0.7925, over 13419.00 frames. ], tot_loss[loss=2.704, ArTop10Accuracy=0.788, over 7758.04 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 13:59:15,430 INFO [trainer.py:765] (5/8) Epoch 20, batch 300, train_loss[loss=2.741, ArTop10Accuracy=0.7801, over 13872.00 frames. ], tot_loss[loss=2.698, ArTop10Accuracy=0.7891, over 9376.52 frames. ], batch size: 44, lr: 6.08e-03 +2024-08-06 14:00:44,357 INFO [trainer.py:765] (5/8) Epoch 20, batch 400, train_loss[loss=2.606, ArTop10Accuracy=0.8038, over 10281.00 frames. ], tot_loss[loss=2.703, ArTop10Accuracy=0.7882, over 10286.27 frames. ], batch size: 14, lr: 6.07e-03 +2024-08-06 14:02:14,855 INFO [trainer.py:765] (5/8) Epoch 20, batch 500, train_loss[loss=2.693, ArTop10Accuracy=0.7924, over 12243.00 frames. ], tot_loss[loss=2.7, ArTop10Accuracy=0.7887, over 10847.60 frames. ], batch size: 22, lr: 6.06e-03 +2024-08-06 14:03:40,856 INFO [trainer.py:765] (5/8) Epoch 20, batch 600, train_loss[loss=2.575, ArTop10Accuracy=0.8142, over 11583.00 frames. ], tot_loss[loss=2.701, ArTop10Accuracy=0.7885, over 11371.01 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 14:05:13,864 INFO [trainer.py:765] (5/8) Epoch 20, batch 700, train_loss[loss=2.636, ArTop10Accuracy=0.8101, over 10137.00 frames. ], tot_loss[loss=2.704, ArTop10Accuracy=0.788, over 11525.42 frames. ], batch size: 12, lr: 6.03e-03 +2024-08-06 14:05:30,791 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.365e+02 1.456e+02 1.550e+02 3.525e+02, threshold=2.913e+02, percent-clipped=0.1 +2024-08-06 14:06:34,509 INFO [trainer.py:765] (5/8) Epoch 20, batch 800, train_loss[loss=2.63, ArTop10Accuracy=0.805, over 9363.00 frames. ], tot_loss[loss=2.708, ArTop10Accuracy=0.7871, over 11647.49 frames. ], batch size: 11, lr: 6.02e-03 +2024-08-06 14:07:50,944 INFO [trainer.py:765] (5/8) Epoch 20, batch 900, train_loss[loss=2.69, ArTop10Accuracy=0.7948, over 12888.00 frames. ], tot_loss[loss=2.704, ArTop10Accuracy=0.7881, over 11693.94 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 14:09:07,173 INFO [trainer.py:765] (5/8) Epoch 20, batch 1000, train_loss[loss=2.754, ArTop10Accuracy=0.7762, over 12837.00 frames. ], tot_loss[loss=2.709, ArTop10Accuracy=0.7873, over 11889.47 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 14:10:21,209 INFO [trainer.py:765] (5/8) Epoch 20, batch 1100, train_loss[loss=2.775, ArTop10Accuracy=0.776, over 13869.00 frames. ], tot_loss[loss=2.716, ArTop10Accuracy=0.7859, over 11949.23 frames. ], batch size: 35, lr: 5.99e-03 +2024-08-06 14:11:37,813 INFO [trainer.py:765] (5/8) Epoch 20, batch 1200, train_loss[loss=2.857, ArTop10Accuracy=0.7624, over 11853.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.7862, over 11859.92 frames. ], batch size: 101, lr: 5.98e-03 +2024-08-06 14:12:37,384 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 14:12:37,386 INFO [trainer.py:1069] (5/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-08-06-14-6 b/libritts-r/log/log-train-2024-08-06-08-06-14-6 new file mode 100644 index 0000000000000000000000000000000000000000..ccd7d8bdd9e10e26f186f9eaaad914543d81235d --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-06-14-6 @@ -0,0 +1,336 @@ +2024-08-06 08:06:14,313 INFO [trainer.py:870] (6/8) Training started +2024-08-06 08:06:14,315 INFO [trainer.py:889] (6/8) Device: cuda:6 +2024-08-06 08:06:14,315 INFO [trainer.py:890] (6/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:06:14,315 INFO [trainer.py:892] (6/8) About to create model +2024-08-06 08:06:15,003 INFO [trainer.py:899] (6/8) Number of model parameters: 367386628 +2024-08-06 08:06:16,221 INFO [trainer.py:914] (6/8) Using DDP +2024-08-06 08:06:19,152 INFO [datamodule.py:427] (6/8) About to get train cuts +2024-08-06 08:06:19,154 INFO [datamodule.py:434] (6/8) About to get dev cuts +2024-08-06 08:06:19,155 INFO [datamodule.py:292] (6/8) Disable SpecAugment +2024-08-06 08:06:19,155 INFO [datamodule.py:294] (6/8) About to create train dataset +2024-08-06 08:06:19,155 INFO [datamodule.py:323] (6/8) Using DynamicBucketingSampler +2024-08-06 08:06:19,762 INFO [datamodule.py:344] (6/8) About to create train dataloader +2024-08-06 08:06:19,762 INFO [datamodule.py:367] (6/8) About to create dev dataset +2024-08-06 08:06:20,082 INFO [datamodule.py:388] (6/8) About to create dev dataloader +2024-08-06 08:08:02,121 INFO [trainer.py:765] (6/8) Epoch 1, batch 100, train_loss[loss=4.388, ArTop10Accuracy=0.4801, over 14610.00 frames. ], tot_loss[loss=5.055, ArTop10Accuracy=0.3726, over 4770.95 frames. ], batch size: 63, lr: 2.25e-02 +2024-08-06 08:09:28,827 INFO [trainer.py:765] (6/8) Epoch 1, batch 200, train_loss[loss=3.986, ArTop10Accuracy=0.5533, over 13587.00 frames. ], tot_loss[loss=4.49, ArTop10Accuracy=0.4676, over 7758.92 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 08:10:52,429 INFO [trainer.py:765] (6/8) Epoch 1, batch 300, train_loss[loss=3.87, ArTop10Accuracy=0.5696, over 14733.00 frames. ], tot_loss[loss=4.219, ArTop10Accuracy=0.5124, over 9377.57 frames. ], batch size: 45, lr: 3.00e-02 +2024-08-06 08:12:12,699 INFO [trainer.py:765] (6/8) Epoch 1, batch 400, train_loss[loss=3.622, ArTop10Accuracy=0.6205, over 10365.00 frames. ], tot_loss[loss=4.032, ArTop10Accuracy=0.5444, over 10277.76 frames. ], batch size: 14, lr: 3.00e-02 +2024-08-06 08:13:40,049 INFO [trainer.py:765] (6/8) Epoch 1, batch 500, train_loss[loss=3.739, ArTop10Accuracy=0.5912, over 12429.00 frames. ], tot_loss[loss=3.887, ArTop10Accuracy=0.5694, over 10839.94 frames. ], batch size: 22, lr: 2.99e-02 +2024-08-06 08:15:00,242 INFO [trainer.py:765] (6/8) Epoch 1, batch 600, train_loss[loss=3.558, ArTop10Accuracy=0.6331, over 11361.00 frames. ], tot_loss[loss=3.772, ArTop10Accuracy=0.5898, over 11361.50 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 08:16:26,423 INFO [trainer.py:765] (6/8) Epoch 1, batch 700, train_loss[loss=3.451, ArTop10Accuracy=0.6472, over 10158.00 frames. ], tot_loss[loss=3.689, ArTop10Accuracy=0.6045, over 11517.12 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 08:17:43,017 INFO [trainer.py:765] (6/8) Epoch 1, batch 800, train_loss[loss=3.352, ArTop10Accuracy=0.6725, over 9489.00 frames. ], tot_loss[loss=3.627, ArTop10Accuracy=0.6162, over 11639.17 frames. ], batch size: 11, lr: 2.98e-02 +2024-08-06 08:18:56,150 INFO [trainer.py:765] (6/8) Epoch 1, batch 900, train_loss[loss=3.437, ArTop10Accuracy=0.6495, over 12990.00 frames. ], tot_loss[loss=3.567, ArTop10Accuracy=0.6269, over 11692.94 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 08:20:12,862 INFO [trainer.py:765] (6/8) Epoch 1, batch 1000, train_loss[loss=3.39, ArTop10Accuracy=0.6602, over 12999.00 frames. ], tot_loss[loss=3.525, ArTop10Accuracy=0.6346, over 11885.92 frames. ], batch size: 27, lr: 2.97e-02 +2024-08-06 08:20:13,538 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 9.300e+01 1.871e+02 2.675e+02 4.030e+02 9.119e+03, threshold=5.351e+02, percent-clipped=0.0 +2024-08-06 08:21:29,154 INFO [trainer.py:765] (6/8) Epoch 1, batch 1100, train_loss[loss=3.509, ArTop10Accuracy=0.6415, over 13734.00 frames. ], tot_loss[loss=3.494, ArTop10Accuracy=0.6401, over 11960.16 frames. ], batch size: 34, lr: 2.96e-02 +2024-08-06 08:22:45,411 INFO [trainer.py:765] (6/8) Epoch 1, batch 1200, train_loss[loss=3.423, ArTop10Accuracy=0.6589, over 11565.00 frames. ], tot_loss[loss=3.464, ArTop10Accuracy=0.6459, over 11864.25 frames. ], batch size: 101, lr: 2.96e-02 +2024-08-06 08:23:45,150 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 08:25:36,237 INFO [trainer.py:765] (6/8) Epoch 2, batch 100, train_loss[loss=3.402, ArTop10Accuracy=0.6551, over 14169.00 frames. ], tot_loss[loss=3.421, ArTop10Accuracy=0.6534, over 4752.27 frames. ], batch size: 62, lr: 2.90e-02 +2024-08-06 08:26:58,956 INFO [trainer.py:765] (6/8) Epoch 2, batch 200, train_loss[loss=3.363, ArTop10Accuracy=0.6631, over 13458.00 frames. ], tot_loss[loss=3.391, ArTop10Accuracy=0.6587, over 7750.75 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 08:28:25,532 INFO [trainer.py:765] (6/8) Epoch 2, batch 300, train_loss[loss=3.304, ArTop10Accuracy=0.6729, over 14391.00 frames. ], tot_loss[loss=3.375, ArTop10Accuracy=0.6616, over 9369.14 frames. ], batch size: 45, lr: 2.89e-02 +2024-08-06 08:29:48,636 INFO [trainer.py:765] (6/8) Epoch 2, batch 400, train_loss[loss=3.287, ArTop10Accuracy=0.6818, over 10422.00 frames. ], tot_loss[loss=3.355, ArTop10Accuracy=0.6658, over 10264.80 frames. ], batch size: 14, lr: 2.88e-02 +2024-08-06 08:31:22,901 INFO [trainer.py:765] (6/8) Epoch 2, batch 500, train_loss[loss=3.302, ArTop10Accuracy=0.6806, over 12180.00 frames. ], tot_loss[loss=3.337, ArTop10Accuracy=0.6695, over 10824.54 frames. ], batch size: 22, lr: 2.87e-02 +2024-08-06 08:32:45,686 INFO [trainer.py:765] (6/8) Epoch 2, batch 600, train_loss[loss=3.304, ArTop10Accuracy=0.6782, over 11436.00 frames. ], tot_loss[loss=3.329, ArTop10Accuracy=0.6711, over 11355.80 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 08:34:13,582 INFO [trainer.py:765] (6/8) Epoch 2, batch 700, train_loss[loss=3.079, ArTop10Accuracy=0.7212, over 9345.00 frames. ], tot_loss[loss=3.324, ArTop10Accuracy=0.6721, over 11500.32 frames. ], batch size: 11, lr: 2.85e-02 +2024-08-06 08:34:31,174 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 08:34:40,888 INFO [trainer.py:811] (6/8) Epoch 2, validation: loss=3.277, ArTop10Accuracy=0.6803, over 1827537.00 frames. +2024-08-06 08:34:40,889 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29113MB +2024-08-06 08:34:41,699 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 7.953e+01 1.592e+02 2.200e+02 3.344e+02 2.949e+03, threshold=4.400e+02, percent-clipped=8.6 +2024-08-06 08:35:39,876 INFO [trainer.py:765] (6/8) Epoch 2, batch 800, train_loss[loss=3.286, ArTop10Accuracy=0.6826, over 10155.00 frames. ], tot_loss[loss=3.321, ArTop10Accuracy=0.6728, over 11631.80 frames. ], batch size: 12, lr: 2.84e-02 +2024-08-06 08:36:56,370 INFO [trainer.py:765] (6/8) Epoch 2, batch 900, train_loss[loss=3.287, ArTop10Accuracy=0.6823, over 13350.00 frames. ], tot_loss[loss=3.309, ArTop10Accuracy=0.6751, over 11666.99 frames. ], batch size: 28, lr: 2.83e-02 +2024-08-06 08:38:10,510 INFO [trainer.py:765] (6/8) Epoch 2, batch 1000, train_loss[loss=3.288, ArTop10Accuracy=0.6815, over 13026.00 frames. ], tot_loss[loss=3.3, ArTop10Accuracy=0.6765, over 11879.59 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 08:39:25,058 INFO [trainer.py:765] (6/8) Epoch 2, batch 1100, train_loss[loss=3.25, ArTop10Accuracy=0.6835, over 13587.00 frames. ], tot_loss[loss=3.296, ArTop10Accuracy=0.6776, over 11944.38 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 08:40:38,219 INFO [trainer.py:765] (6/8) Epoch 2, batch 1200, train_loss[loss=3.286, ArTop10Accuracy=0.6777, over 11184.00 frames. ], tot_loss[loss=3.286, ArTop10Accuracy=0.6793, over 11847.29 frames. ], batch size: 101, lr: 2.80e-02 +2024-08-06 08:41:38,257 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 08:43:36,648 INFO [trainer.py:765] (6/8) Epoch 3, batch 100, train_loss[loss=3.289, ArTop10Accuracy=0.6722, over 14634.00 frames. ], tot_loss[loss=3.251, ArTop10Accuracy=0.6851, over 4755.45 frames. ], batch size: 62, lr: 2.67e-02 +2024-08-06 08:45:10,500 INFO [trainer.py:765] (6/8) Epoch 3, batch 200, train_loss[loss=3.181, ArTop10Accuracy=0.7005, over 13515.00 frames. ], tot_loss[loss=3.221, ArTop10Accuracy=0.6909, over 7745.42 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 08:46:29,257 INFO [trainer.py:765] (6/8) Epoch 3, batch 300, train_loss[loss=3.261, ArTop10Accuracy=0.6856, over 14103.00 frames. ], tot_loss[loss=3.206, ArTop10Accuracy=0.6938, over 9364.34 frames. ], batch size: 44, lr: 2.64e-02 +2024-08-06 08:48:04,218 INFO [trainer.py:765] (6/8) Epoch 3, batch 400, train_loss[loss=3.105, ArTop10Accuracy=0.7125, over 10353.00 frames. ], tot_loss[loss=3.191, ArTop10Accuracy=0.6967, over 10275.21 frames. ], batch size: 14, lr: 2.63e-02 +2024-08-06 08:48:40,881 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 9.282e+01 1.561e+02 1.981e+02 2.686e+02 1.768e+03, threshold=3.962e+02, percent-clipped=7.6 +2024-08-06 08:49:25,541 INFO [trainer.py:765] (6/8) Epoch 3, batch 500, train_loss[loss=3.089, ArTop10Accuracy=0.7152, over 12294.00 frames. ], tot_loss[loss=3.171, ArTop10Accuracy=0.7005, over 10826.07 frames. ], batch size: 22, lr: 2.62e-02 +2024-08-06 08:51:00,476 INFO [trainer.py:765] (6/8) Epoch 3, batch 600, train_loss[loss=3.138, ArTop10Accuracy=0.7107, over 11346.00 frames. ], tot_loss[loss=3.155, ArTop10Accuracy=0.7036, over 11363.23 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 08:52:31,618 INFO [trainer.py:765] (6/8) Epoch 3, batch 700, train_loss[loss=3.117, ArTop10Accuracy=0.7064, over 9513.00 frames. ], tot_loss[loss=3.144, ArTop10Accuracy=0.7058, over 11502.94 frames. ], batch size: 11, lr: 2.60e-02 +2024-08-06 08:53:57,388 INFO [trainer.py:765] (6/8) Epoch 3, batch 800, train_loss[loss=3.005, ArTop10Accuracy=0.7328, over 10665.00 frames. ], tot_loss[loss=3.136, ArTop10Accuracy=0.7073, over 11636.91 frames. ], batch size: 13, lr: 2.59e-02 +2024-08-06 08:55:15,117 INFO [trainer.py:765] (6/8) Epoch 3, batch 900, train_loss[loss=3.093, ArTop10Accuracy=0.7182, over 13086.00 frames. ], tot_loss[loss=3.119, ArTop10Accuracy=0.7107, over 11682.79 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 08:56:31,557 INFO [trainer.py:765] (6/8) Epoch 3, batch 1000, train_loss[loss=2.997, ArTop10Accuracy=0.7294, over 12915.00 frames. ], tot_loss[loss=3.109, ArTop10Accuracy=0.7126, over 11878.15 frames. ], batch size: 27, lr: 2.56e-02 +2024-08-06 08:57:46,506 INFO [trainer.py:765] (6/8) Epoch 3, batch 1100, train_loss[loss=3.086, ArTop10Accuracy=0.7154, over 13635.00 frames. ], tot_loss[loss=3.102, ArTop10Accuracy=0.7138, over 11951.69 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 08:59:01,399 INFO [trainer.py:765] (6/8) Epoch 3, batch 1200, train_loss[loss=3.159, ArTop10Accuracy=0.7025, over 11697.00 frames. ], tot_loss[loss=3.094, ArTop10Accuracy=0.7154, over 11858.17 frames. ], batch size: 101, lr: 2.54e-02 +2024-08-06 09:00:02,076 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 09:01:50,740 INFO [trainer.py:765] (6/8) Epoch 4, batch 100, train_loss[loss=3.078, ArTop10Accuracy=0.7206, over 14586.00 frames. ], tot_loss[loss=3.077, ArTop10Accuracy=0.7175, over 4762.22 frames. ], batch size: 62, lr: 2.38e-02 +2024-08-06 09:02:52,858 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 09:03:02,384 INFO [trainer.py:811] (6/8) Epoch 4, validation: loss=2.997, ArTop10Accuracy=0.7338, over 1827537.00 frames. +2024-08-06 09:03:02,385 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29374MB +2024-08-06 09:03:03,364 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.499e+02 1.782e+02 2.273e+02 1.100e+03, threshold=3.565e+02, percent-clipped=4.7 +2024-08-06 09:03:29,274 INFO [trainer.py:765] (6/8) Epoch 4, batch 200, train_loss[loss=3.044, ArTop10Accuracy=0.7246, over 13827.00 frames. ], tot_loss[loss=3.049, ArTop10Accuracy=0.7232, over 7750.55 frames. ], batch size: 35, lr: 2.37e-02 +2024-08-06 09:05:01,732 INFO [trainer.py:765] (6/8) Epoch 4, batch 300, train_loss[loss=3.137, ArTop10Accuracy=0.7043, over 14121.00 frames. ], tot_loss[loss=3.041, ArTop10Accuracy=0.7249, over 9386.87 frames. ], batch size: 44, lr: 2.36e-02 +2024-08-06 09:06:28,150 INFO [trainer.py:765] (6/8) Epoch 4, batch 400, train_loss[loss=3.036, ArTop10Accuracy=0.7249, over 10518.00 frames. ], tot_loss[loss=3.037, ArTop10Accuracy=0.7257, over 10309.40 frames. ], batch size: 14, lr: 2.34e-02 +2024-08-06 09:08:01,925 INFO [trainer.py:765] (6/8) Epoch 4, batch 500, train_loss[loss=2.881, ArTop10Accuracy=0.7551, over 12159.00 frames. ], tot_loss[loss=3.028, ArTop10Accuracy=0.7275, over 10832.97 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 09:09:28,540 INFO [trainer.py:765] (6/8) Epoch 4, batch 600, train_loss[loss=2.984, ArTop10Accuracy=0.7324, over 11529.00 frames. ], tot_loss[loss=3.025, ArTop10Accuracy=0.728, over 11364.42 frames. ], batch size: 18, lr: 2.32e-02 +2024-08-06 09:10:59,865 INFO [trainer.py:765] (6/8) Epoch 4, batch 700, train_loss[loss=2.895, ArTop10Accuracy=0.7564, over 10038.00 frames. ], tot_loss[loss=3.024, ArTop10Accuracy=0.7284, over 11504.85 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 09:12:17,513 INFO [trainer.py:765] (6/8) Epoch 4, batch 800, train_loss[loss=2.829, ArTop10Accuracy=0.7753, over 10317.00 frames. ], tot_loss[loss=3.021, ArTop10Accuracy=0.729, over 11618.34 frames. ], batch size: 12, lr: 2.30e-02 +2024-08-06 09:13:33,212 INFO [trainer.py:765] (6/8) Epoch 4, batch 900, train_loss[loss=2.953, ArTop10Accuracy=0.7436, over 13008.00 frames. ], tot_loss[loss=3.013, ArTop10Accuracy=0.7307, over 11683.53 frames. ], batch size: 27, lr: 2.29e-02 +2024-08-06 09:14:47,520 INFO [trainer.py:765] (6/8) Epoch 4, batch 1000, train_loss[loss=3.061, ArTop10Accuracy=0.7249, over 12897.00 frames. ], tot_loss[loss=3.013, ArTop10Accuracy=0.7306, over 11877.18 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 09:16:02,982 INFO [trainer.py:765] (6/8) Epoch 4, batch 1100, train_loss[loss=3.017, ArTop10Accuracy=0.7294, over 13554.00 frames. ], tot_loss[loss=3.015, ArTop10Accuracy=0.7302, over 11959.80 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 09:16:53,291 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.440e+02 1.636e+02 1.968e+02 7.702e+02, threshold=3.273e+02, percent-clipped=1.3 +2024-08-06 09:17:18,344 INFO [trainer.py:765] (6/8) Epoch 4, batch 1200, train_loss[loss=3.105, ArTop10Accuracy=0.712, over 12048.00 frames. ], tot_loss[loss=3.01, ArTop10Accuracy=0.7309, over 11874.78 frames. ], batch size: 103, lr: 2.25e-02 +2024-08-06 09:18:17,022 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 09:20:17,171 INFO [trainer.py:765] (6/8) Epoch 5, batch 100, train_loss[loss=3.024, ArTop10Accuracy=0.729, over 14520.00 frames. ], tot_loss[loss=2.998, ArTop10Accuracy=0.7323, over 4763.86 frames. ], batch size: 62, lr: 2.10e-02 +2024-08-06 09:21:52,296 INFO [trainer.py:765] (6/8) Epoch 5, batch 200, train_loss[loss=2.932, ArTop10Accuracy=0.7488, over 13890.00 frames. ], tot_loss[loss=2.978, ArTop10Accuracy=0.7364, over 7748.36 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 09:23:19,241 INFO [trainer.py:765] (6/8) Epoch 5, batch 300, train_loss[loss=2.922, ArTop10Accuracy=0.7481, over 14118.00 frames. ], tot_loss[loss=2.969, ArTop10Accuracy=0.7383, over 9371.93 frames. ], batch size: 45, lr: 2.08e-02 +2024-08-06 09:24:53,537 INFO [trainer.py:765] (6/8) Epoch 5, batch 400, train_loss[loss=2.855, ArTop10Accuracy=0.7614, over 10344.00 frames. ], tot_loss[loss=2.97, ArTop10Accuracy=0.7382, over 10282.75 frames. ], batch size: 14, lr: 2.07e-02 +2024-08-06 09:26:19,418 INFO [trainer.py:765] (6/8) Epoch 5, batch 500, train_loss[loss=2.962, ArTop10Accuracy=0.7411, over 12372.00 frames. ], tot_loss[loss=2.966, ArTop10Accuracy=0.739, over 10822.00 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 09:27:49,538 INFO [trainer.py:765] (6/8) Epoch 5, batch 600, train_loss[loss=2.859, ArTop10Accuracy=0.7623, over 11424.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.7396, over 11344.24 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 09:29:21,670 INFO [trainer.py:765] (6/8) Epoch 5, batch 700, train_loss[loss=2.829, ArTop10Accuracy=0.766, over 10110.00 frames. ], tot_loss[loss=2.964, ArTop10Accuracy=0.7394, over 11498.36 frames. ], batch size: 12, lr: 2.04e-02 +2024-08-06 09:30:44,694 INFO [trainer.py:765] (6/8) Epoch 5, batch 800, train_loss[loss=2.993, ArTop10Accuracy=0.7335, over 10182.00 frames. ], tot_loss[loss=2.968, ArTop10Accuracy=0.7386, over 11628.28 frames. ], batch size: 12, lr: 2.03e-02 +2024-08-06 09:31:51,240 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 09:32:00,762 INFO [trainer.py:811] (6/8) Epoch 5, validation: loss=2.926, ArTop10Accuracy=0.7466, over 1827537.00 frames. +2024-08-06 09:32:00,763 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29654MB +2024-08-06 09:32:01,709 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.060e+02 1.349e+02 1.525e+02 1.806e+02 1.007e+03, threshold=3.049e+02, percent-clipped=2.3 +2024-08-06 09:32:10,554 INFO [trainer.py:765] (6/8) Epoch 5, batch 900, train_loss[loss=2.987, ArTop10Accuracy=0.7291, over 13041.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7401, over 11691.73 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 09:33:27,323 INFO [trainer.py:765] (6/8) Epoch 5, batch 1000, train_loss[loss=2.989, ArTop10Accuracy=0.7325, over 13020.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.74, over 11890.41 frames. ], batch size: 27, lr: 2.01e-02 +2024-08-06 09:34:42,300 INFO [trainer.py:765] (6/8) Epoch 5, batch 1100, train_loss[loss=2.954, ArTop10Accuracy=0.7405, over 13821.00 frames. ], tot_loss[loss=2.964, ArTop10Accuracy=0.7393, over 11936.71 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 09:35:56,331 INFO [trainer.py:765] (6/8) Epoch 5, batch 1200, train_loss[loss=3.119, ArTop10Accuracy=0.7086, over 12636.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7402, over 11856.38 frames. ], batch size: 101, lr: 1.99e-02 +2024-08-06 09:36:55,716 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 09:38:52,665 INFO [trainer.py:765] (6/8) Epoch 6, batch 100, train_loss[loss=2.955, ArTop10Accuracy=0.7445, over 14772.00 frames. ], tot_loss[loss=2.951, ArTop10Accuracy=0.7416, over 4751.62 frames. ], batch size: 62, lr: 1.85e-02 +2024-08-06 09:40:19,834 INFO [trainer.py:765] (6/8) Epoch 6, batch 200, train_loss[loss=2.913, ArTop10Accuracy=0.7489, over 13596.00 frames. ], tot_loss[loss=2.936, ArTop10Accuracy=0.7445, over 7733.30 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 09:41:52,965 INFO [trainer.py:765] (6/8) Epoch 6, batch 300, train_loss[loss=2.968, ArTop10Accuracy=0.7397, over 13965.00 frames. ], tot_loss[loss=2.929, ArTop10Accuracy=0.7459, over 9366.45 frames. ], batch size: 44, lr: 1.83e-02 +2024-08-06 09:43:17,828 INFO [trainer.py:765] (6/8) Epoch 6, batch 400, train_loss[loss=2.856, ArTop10Accuracy=0.7616, over 10935.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.7471, over 10286.23 frames. ], batch size: 15, lr: 1.83e-02 +2024-08-06 09:44:54,128 INFO [trainer.py:765] (6/8) Epoch 6, batch 500, train_loss[loss=2.918, ArTop10Accuracy=0.7506, over 12210.00 frames. ], tot_loss[loss=2.923, ArTop10Accuracy=0.7475, over 10852.19 frames. ], batch size: 22, lr: 1.82e-02 +2024-08-06 09:46:22,873 INFO [trainer.py:765] (6/8) Epoch 6, batch 600, train_loss[loss=2.782, ArTop10Accuracy=0.7736, over 11358.00 frames. ], tot_loss[loss=2.92, ArTop10Accuracy=0.7481, over 11364.24 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 09:46:37,219 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.339e+02 1.480e+02 1.701e+02 7.506e+02, threshold=2.959e+02, percent-clipped=1.1 +2024-08-06 09:47:57,870 INFO [trainer.py:765] (6/8) Epoch 6, batch 700, train_loss[loss=2.855, ArTop10Accuracy=0.7598, over 10134.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.7472, over 11502.69 frames. ], batch size: 12, lr: 1.80e-02 +2024-08-06 09:49:15,955 INFO [trainer.py:765] (6/8) Epoch 6, batch 800, train_loss[loss=2.854, ArTop10Accuracy=0.759, over 10095.00 frames. ], tot_loss[loss=2.926, ArTop10Accuracy=0.7469, over 11620.14 frames. ], batch size: 12, lr: 1.79e-02 +2024-08-06 09:50:32,135 INFO [trainer.py:765] (6/8) Epoch 6, batch 900, train_loss[loss=2.932, ArTop10Accuracy=0.7518, over 13065.00 frames. ], tot_loss[loss=2.922, ArTop10Accuracy=0.7477, over 11670.94 frames. ], batch size: 27, lr: 1.78e-02 +2024-08-06 09:51:47,299 INFO [trainer.py:765] (6/8) Epoch 6, batch 1000, train_loss[loss=2.987, ArTop10Accuracy=0.735, over 12957.00 frames. ], tot_loss[loss=2.926, ArTop10Accuracy=0.7468, over 11889.76 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 09:53:00,921 INFO [trainer.py:765] (6/8) Epoch 6, batch 1100, train_loss[loss=2.839, ArTop10Accuracy=0.7588, over 13614.00 frames. ], tot_loss[loss=2.93, ArTop10Accuracy=0.7459, over 11947.39 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 09:54:14,337 INFO [trainer.py:765] (6/8) Epoch 6, batch 1200, train_loss[loss=3.013, ArTop10Accuracy=0.7295, over 12051.00 frames. ], tot_loss[loss=2.929, ArTop10Accuracy=0.746, over 11869.99 frames. ], batch size: 101, lr: 1.76e-02 +2024-08-06 09:55:13,008 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 09:57:06,699 INFO [trainer.py:765] (6/8) Epoch 7, batch 100, train_loss[loss=2.878, ArTop10Accuracy=0.7565, over 14559.00 frames. ], tot_loss[loss=2.912, ArTop10Accuracy=0.7495, over 4765.49 frames. ], batch size: 63, lr: 1.64e-02 +2024-08-06 09:58:39,426 INFO [trainer.py:765] (6/8) Epoch 7, batch 200, train_loss[loss=2.901, ArTop10Accuracy=0.7472, over 13467.00 frames. ], tot_loss[loss=2.9, ArTop10Accuracy=0.7514, over 7756.69 frames. ], batch size: 34, lr: 1.64e-02 +2024-08-06 10:00:06,083 INFO [trainer.py:765] (6/8) Epoch 7, batch 300, train_loss[loss=2.904, ArTop10Accuracy=0.7493, over 14169.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7525, over 9366.34 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 10:00:40,509 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 10:00:50,245 INFO [trainer.py:811] (6/8) Epoch 7, validation: loss=2.88, ArTop10Accuracy=0.7554, over 1827537.00 frames. +2024-08-06 10:00:50,246 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29654MB +2024-08-06 10:00:50,976 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.002e+02 1.286e+02 1.429e+02 1.605e+02 1.020e+03, threshold=2.857e+02, percent-clipped=1.5 +2024-08-06 10:01:49,115 INFO [trainer.py:765] (6/8) Epoch 7, batch 400, train_loss[loss=2.873, ArTop10Accuracy=0.7615, over 10770.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7531, over 10290.52 frames. ], batch size: 15, lr: 1.62e-02 +2024-08-06 10:03:21,455 INFO [trainer.py:765] (6/8) Epoch 7, batch 500, train_loss[loss=2.785, ArTop10Accuracy=0.779, over 12291.00 frames. ], tot_loss[loss=2.89, ArTop10Accuracy=0.7536, over 10870.88 frames. ], batch size: 22, lr: 1.61e-02 +2024-08-06 10:04:51,881 INFO [trainer.py:765] (6/8) Epoch 7, batch 600, train_loss[loss=2.849, ArTop10Accuracy=0.7606, over 11370.00 frames. ], tot_loss[loss=2.892, ArTop10Accuracy=0.753, over 11381.86 frames. ], batch size: 18, lr: 1.61e-02 +2024-08-06 10:06:25,112 INFO [trainer.py:765] (6/8) Epoch 7, batch 700, train_loss[loss=2.794, ArTop10Accuracy=0.7743, over 10017.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7523, over 11532.56 frames. ], batch size: 12, lr: 1.60e-02 +2024-08-06 10:07:46,948 INFO [trainer.py:765] (6/8) Epoch 7, batch 800, train_loss[loss=2.775, ArTop10Accuracy=0.7791, over 10104.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7525, over 11648.59 frames. ], batch size: 12, lr: 1.59e-02 +2024-08-06 10:09:02,822 INFO [trainer.py:765] (6/8) Epoch 7, batch 900, train_loss[loss=2.816, ArTop10Accuracy=0.7693, over 13011.00 frames. ], tot_loss[loss=2.89, ArTop10Accuracy=0.7538, over 11702.50 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 10:10:19,636 INFO [trainer.py:765] (6/8) Epoch 7, batch 1000, train_loss[loss=2.9, ArTop10Accuracy=0.7526, over 12774.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7523, over 11906.77 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 10:11:35,208 INFO [trainer.py:765] (6/8) Epoch 7, batch 1100, train_loss[loss=2.894, ArTop10Accuracy=0.7517, over 13650.00 frames. ], tot_loss[loss=2.902, ArTop10Accuracy=0.7512, over 11965.81 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 10:12:48,204 INFO [trainer.py:765] (6/8) Epoch 7, batch 1200, train_loss[loss=3.006, ArTop10Accuracy=0.7278, over 12288.00 frames. ], tot_loss[loss=2.898, ArTop10Accuracy=0.752, over 11848.72 frames. ], batch size: 101, lr: 1.57e-02 +2024-08-06 10:13:46,782 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 10:15:03,600 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.017e+02 1.283e+02 1.410e+02 1.601e+02 1.017e+03, threshold=2.820e+02, percent-clipped=0.9 +2024-08-06 10:15:40,820 INFO [trainer.py:765] (6/8) Epoch 8, batch 100, train_loss[loss=2.916, ArTop10Accuracy=0.7516, over 14502.00 frames. ], tot_loss[loss=2.882, ArTop10Accuracy=0.7548, over 4756.16 frames. ], batch size: 62, lr: 1.47e-02 +2024-08-06 10:17:12,861 INFO [trainer.py:765] (6/8) Epoch 8, batch 200, train_loss[loss=2.881, ArTop10Accuracy=0.7545, over 13539.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7563, over 7759.48 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 10:18:37,898 INFO [trainer.py:765] (6/8) Epoch 8, batch 300, train_loss[loss=2.96, ArTop10Accuracy=0.7408, over 13821.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7571, over 9369.67 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 10:20:06,341 INFO [trainer.py:765] (6/8) Epoch 8, batch 400, train_loss[loss=2.908, ArTop10Accuracy=0.7466, over 10806.00 frames. ], tot_loss[loss=2.868, ArTop10Accuracy=0.7576, over 10268.57 frames. ], batch size: 15, lr: 1.45e-02 +2024-08-06 10:21:32,411 INFO [trainer.py:765] (6/8) Epoch 8, batch 500, train_loss[loss=2.894, ArTop10Accuracy=0.7548, over 12519.00 frames. ], tot_loss[loss=2.863, ArTop10Accuracy=0.7589, over 10821.20 frames. ], batch size: 23, lr: 1.45e-02 +2024-08-06 10:23:00,974 INFO [trainer.py:765] (6/8) Epoch 8, batch 600, train_loss[loss=2.724, ArTop10Accuracy=0.786, over 11475.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.7585, over 11354.19 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 10:24:37,788 INFO [trainer.py:765] (6/8) Epoch 8, batch 700, train_loss[loss=2.836, ArTop10Accuracy=0.7659, over 10110.00 frames. ], tot_loss[loss=2.87, ArTop10Accuracy=0.7572, over 11488.52 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 10:25:56,085 INFO [trainer.py:765] (6/8) Epoch 8, batch 800, train_loss[loss=2.815, ArTop10Accuracy=0.7729, over 10086.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7568, over 11614.89 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 10:27:12,245 INFO [trainer.py:765] (6/8) Epoch 8, batch 900, train_loss[loss=2.778, ArTop10Accuracy=0.7739, over 13353.00 frames. ], tot_loss[loss=2.868, ArTop10Accuracy=0.758, over 11678.16 frames. ], batch size: 28, lr: 1.42e-02 +2024-08-06 10:28:25,263 INFO [trainer.py:765] (6/8) Epoch 8, batch 1000, train_loss[loss=2.856, ArTop10Accuracy=0.7623, over 12663.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7573, over 11880.24 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 10:29:07,155 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 10:29:16,831 INFO [trainer.py:811] (6/8) Epoch 8, validation: loss=2.858, ArTop10Accuracy=0.7594, over 1827537.00 frames. +2024-08-06 10:29:16,831 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29654MB +2024-08-06 10:29:17,491 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.275e+02 1.390e+02 1.547e+02 3.717e+02, threshold=2.781e+02, percent-clipped=0.7 +2024-08-06 10:29:51,731 INFO [trainer.py:765] (6/8) Epoch 8, batch 1100, train_loss[loss=2.932, ArTop10Accuracy=0.7465, over 13584.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7559, over 11946.18 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 10:31:05,946 INFO [trainer.py:765] (6/8) Epoch 8, batch 1200, train_loss[loss=2.927, ArTop10Accuracy=0.7476, over 12045.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7557, over 11857.21 frames. ], batch size: 101, lr: 1.40e-02 +2024-08-06 10:32:05,758 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 10:34:01,256 INFO [trainer.py:765] (6/8) Epoch 9, batch 100, train_loss[loss=2.9, ArTop10Accuracy=0.7515, over 14604.00 frames. ], tot_loss[loss=2.86, ArTop10Accuracy=0.7584, over 4783.71 frames. ], batch size: 62, lr: 1.32e-02 +2024-08-06 10:35:31,773 INFO [trainer.py:765] (6/8) Epoch 9, batch 200, train_loss[loss=2.829, ArTop10Accuracy=0.7662, over 13380.00 frames. ], tot_loss[loss=2.851, ArTop10Accuracy=0.7607, over 7756.80 frames. ], batch size: 34, lr: 1.32e-02 +2024-08-06 10:36:57,928 INFO [trainer.py:765] (6/8) Epoch 9, batch 300, train_loss[loss=2.847, ArTop10Accuracy=0.7628, over 14202.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7615, over 9387.52 frames. ], batch size: 44, lr: 1.31e-02 +2024-08-06 10:38:32,697 INFO [trainer.py:765] (6/8) Epoch 9, batch 400, train_loss[loss=2.716, ArTop10Accuracy=0.7936, over 10851.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7619, over 10294.73 frames. ], batch size: 15, lr: 1.31e-02 +2024-08-06 10:39:59,256 INFO [trainer.py:765] (6/8) Epoch 9, batch 500, train_loss[loss=2.8, ArTop10Accuracy=0.7729, over 12210.00 frames. ], tot_loss[loss=2.842, ArTop10Accuracy=0.7626, over 10867.43 frames. ], batch size: 22, lr: 1.30e-02 +2024-08-06 10:41:29,691 INFO [trainer.py:765] (6/8) Epoch 9, batch 600, train_loss[loss=2.781, ArTop10Accuracy=0.7675, over 11394.00 frames. ], tot_loss[loss=2.844, ArTop10Accuracy=0.7622, over 11375.14 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 10:42:58,441 INFO [trainer.py:765] (6/8) Epoch 9, batch 700, train_loss[loss=2.918, ArTop10Accuracy=0.7417, over 9318.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7614, over 11500.36 frames. ], batch size: 11, lr: 1.29e-02 +2024-08-06 10:44:02,952 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.039e+02 1.253e+02 1.352e+02 1.493e+02 7.010e+02, threshold=2.704e+02, percent-clipped=0.6 +2024-08-06 10:44:19,670 INFO [trainer.py:765] (6/8) Epoch 9, batch 800, train_loss[loss=2.715, ArTop10Accuracy=0.7933, over 10152.00 frames. ], tot_loss[loss=2.849, ArTop10Accuracy=0.7614, over 11616.55 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 10:45:35,720 INFO [trainer.py:765] (6/8) Epoch 9, batch 900, train_loss[loss=2.749, ArTop10Accuracy=0.7809, over 13059.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.7623, over 11671.85 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 10:46:51,272 INFO [trainer.py:765] (6/8) Epoch 9, batch 1000, train_loss[loss=2.793, ArTop10Accuracy=0.7717, over 13047.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7618, over 11855.39 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 10:48:06,247 INFO [trainer.py:765] (6/8) Epoch 9, batch 1100, train_loss[loss=2.91, ArTop10Accuracy=0.7485, over 13737.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7603, over 11939.27 frames. ], batch size: 34, lr: 1.28e-02 +2024-08-06 10:49:21,054 INFO [trainer.py:765] (6/8) Epoch 9, batch 1200, train_loss[loss=2.971, ArTop10Accuracy=0.7364, over 11847.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.7598, over 11871.90 frames. ], batch size: 101, lr: 1.27e-02 +2024-08-06 10:50:22,739 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 10:52:12,325 INFO [trainer.py:765] (6/8) Epoch 10, batch 100, train_loss[loss=2.875, ArTop10Accuracy=0.7539, over 14637.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.762, over 4761.86 frames. ], batch size: 63, lr: 1.20e-02 +2024-08-06 10:53:44,585 INFO [trainer.py:765] (6/8) Epoch 10, batch 200, train_loss[loss=2.909, ArTop10Accuracy=0.7444, over 13647.00 frames. ], tot_loss[loss=2.835, ArTop10Accuracy=0.7635, over 7759.99 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 10:55:08,089 INFO [trainer.py:765] (6/8) Epoch 10, batch 300, train_loss[loss=2.861, ArTop10Accuracy=0.761, over 14100.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.765, over 9360.91 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 10:56:41,175 INFO [trainer.py:765] (6/8) Epoch 10, batch 400, train_loss[loss=2.692, ArTop10Accuracy=0.7867, over 10296.00 frames. ], tot_loss[loss=2.823, ArTop10Accuracy=0.766, over 10289.78 frames. ], batch size: 14, lr: 1.19e-02 +2024-08-06 10:58:04,937 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 10:58:14,559 INFO [trainer.py:811] (6/8) Epoch 10, validation: loss=2.842, ArTop10Accuracy=0.7624, over 1827537.00 frames. +2024-08-06 10:58:14,560 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29654MB +2024-08-06 10:58:15,572 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.228e+02 1.320e+02 1.458e+02 6.096e+02, threshold=2.641e+02, percent-clipped=0.6 +2024-08-06 10:58:15,576 INFO [trainer.py:765] (6/8) Epoch 10, batch 500, train_loss[loss=2.872, ArTop10Accuracy=0.7575, over 12288.00 frames. ], tot_loss[loss=2.823, ArTop10Accuracy=0.7662, over 10829.88 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 10:59:42,814 INFO [trainer.py:765] (6/8) Epoch 10, batch 600, train_loss[loss=2.743, ArTop10Accuracy=0.776, over 11544.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.7657, over 11345.47 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 11:01:18,107 INFO [trainer.py:765] (6/8) Epoch 10, batch 700, train_loss[loss=2.692, ArTop10Accuracy=0.7882, over 10173.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.765, over 11518.39 frames. ], batch size: 12, lr: 1.18e-02 +2024-08-06 11:02:36,917 INFO [trainer.py:765] (6/8) Epoch 10, batch 800, train_loss[loss=2.795, ArTop10Accuracy=0.7671, over 9525.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7647, over 11631.64 frames. ], batch size: 11, lr: 1.17e-02 +2024-08-06 11:03:51,211 INFO [trainer.py:765] (6/8) Epoch 10, batch 900, train_loss[loss=2.774, ArTop10Accuracy=0.7739, over 12909.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.7651, over 11689.97 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 11:05:06,351 INFO [trainer.py:765] (6/8) Epoch 10, batch 1000, train_loss[loss=2.859, ArTop10Accuracy=0.7609, over 12915.00 frames. ], tot_loss[loss=2.834, ArTop10Accuracy=0.7639, over 11887.16 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 11:06:21,722 INFO [trainer.py:765] (6/8) Epoch 10, batch 1100, train_loss[loss=2.86, ArTop10Accuracy=0.7558, over 13509.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7633, over 11950.67 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 11:07:34,772 INFO [trainer.py:765] (6/8) Epoch 10, batch 1200, train_loss[loss=2.952, ArTop10Accuracy=0.7372, over 12240.00 frames. ], tot_loss[loss=2.841, ArTop10Accuracy=0.7625, over 11878.92 frames. ], batch size: 102, lr: 1.16e-02 +2024-08-06 11:08:33,387 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 11:10:29,954 INFO [trainer.py:765] (6/8) Epoch 11, batch 100, train_loss[loss=2.866, ArTop10Accuracy=0.7539, over 14262.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7664, over 4767.89 frames. ], batch size: 62, lr: 1.10e-02 +2024-08-06 11:12:04,673 INFO [trainer.py:765] (6/8) Epoch 11, batch 200, train_loss[loss=2.809, ArTop10Accuracy=0.7704, over 13536.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.7679, over 7753.99 frames. ], batch size: 34, lr: 1.10e-02 +2024-08-06 11:12:22,825 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 9.884e+01 1.240e+02 1.333e+02 1.457e+02 6.939e+02, threshold=2.667e+02, percent-clipped=0.1 +2024-08-06 11:13:31,547 INFO [trainer.py:765] (6/8) Epoch 11, batch 300, train_loss[loss=2.827, ArTop10Accuracy=0.7662, over 13899.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.7685, over 9366.37 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 11:15:03,268 INFO [trainer.py:765] (6/8) Epoch 11, batch 400, train_loss[loss=2.802, ArTop10Accuracy=0.7696, over 10353.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7689, over 10271.71 frames. ], batch size: 14, lr: 1.09e-02 +2024-08-06 11:16:29,636 INFO [trainer.py:765] (6/8) Epoch 11, batch 500, train_loss[loss=2.814, ArTop10Accuracy=0.7663, over 12087.00 frames. ], tot_loss[loss=2.801, ArTop10Accuracy=0.7703, over 10832.24 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 11:18:00,516 INFO [trainer.py:765] (6/8) Epoch 11, batch 600, train_loss[loss=2.82, ArTop10Accuracy=0.7657, over 11532.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7695, over 11361.59 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 11:19:34,513 INFO [trainer.py:765] (6/8) Epoch 11, batch 700, train_loss[loss=2.577, ArTop10Accuracy=0.8116, over 10242.00 frames. ], tot_loss[loss=2.806, ArTop10Accuracy=0.7693, over 11536.07 frames. ], batch size: 12, lr: 1.08e-02 +2024-08-06 11:20:55,482 INFO [trainer.py:765] (6/8) Epoch 11, batch 800, train_loss[loss=2.719, ArTop10Accuracy=0.7877, over 9489.00 frames. ], tot_loss[loss=2.811, ArTop10Accuracy=0.7682, over 11637.50 frames. ], batch size: 11, lr: 1.07e-02 +2024-08-06 11:22:13,704 INFO [trainer.py:765] (6/8) Epoch 11, batch 900, train_loss[loss=2.831, ArTop10Accuracy=0.7682, over 12921.00 frames. ], tot_loss[loss=2.809, ArTop10Accuracy=0.7688, over 11674.79 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 11:23:31,798 INFO [trainer.py:765] (6/8) Epoch 11, batch 1000, train_loss[loss=2.826, ArTop10Accuracy=0.7626, over 12708.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.7673, over 11858.76 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 11:24:46,901 INFO [trainer.py:765] (6/8) Epoch 11, batch 1100, train_loss[loss=2.864, ArTop10Accuracy=0.759, over 13605.00 frames. ], tot_loss[loss=2.826, ArTop10Accuracy=0.7654, over 11944.73 frames. ], batch size: 34, lr: 1.06e-02 +2024-08-06 11:26:00,732 INFO [trainer.py:765] (6/8) Epoch 11, batch 1200, train_loss[loss=2.94, ArTop10Accuracy=0.7391, over 12612.00 frames. ], tot_loss[loss=2.826, ArTop10Accuracy=0.7655, over 11881.65 frames. ], batch size: 101, lr: 1.06e-02 +2024-08-06 11:26:15,846 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 11:26:25,556 INFO [trainer.py:811] (6/8) Epoch 11, validation: loss=2.831, ArTop10Accuracy=0.7643, over 1827537.00 frames. +2024-08-06 11:26:25,557 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30358MB +2024-08-06 11:26:26,184 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.251e+02 1.335e+02 1.441e+02 2.942e+02, threshold=2.669e+02, percent-clipped=0.1 +2024-08-06 11:27:09,581 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 11:29:03,451 INFO [trainer.py:765] (6/8) Epoch 12, batch 100, train_loss[loss=2.865, ArTop10Accuracy=0.7524, over 14409.00 frames. ], tot_loss[loss=2.809, ArTop10Accuracy=0.7679, over 4752.94 frames. ], batch size: 62, lr: 1.01e-02 +2024-08-06 11:30:30,674 INFO [trainer.py:765] (6/8) Epoch 12, batch 200, train_loss[loss=2.826, ArTop10Accuracy=0.7657, over 13659.00 frames. ], tot_loss[loss=2.801, ArTop10Accuracy=0.7698, over 7751.77 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 11:31:57,655 INFO [trainer.py:765] (6/8) Epoch 12, batch 300, train_loss[loss=2.881, ArTop10Accuracy=0.7502, over 14427.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7709, over 9386.69 frames. ], batch size: 45, lr: 1.01e-02 +2024-08-06 11:33:30,738 INFO [trainer.py:765] (6/8) Epoch 12, batch 400, train_loss[loss=2.72, ArTop10Accuracy=0.7858, over 10209.00 frames. ], tot_loss[loss=2.791, ArTop10Accuracy=0.7717, over 10294.20 frames. ], batch size: 14, lr: 1.00e-02 +2024-08-06 11:34:55,733 INFO [trainer.py:765] (6/8) Epoch 12, batch 500, train_loss[loss=2.798, ArTop10Accuracy=0.7727, over 12255.00 frames. ], tot_loss[loss=2.787, ArTop10Accuracy=0.7727, over 10843.06 frames. ], batch size: 22, lr: 1.00e-02 +2024-08-06 11:36:29,361 INFO [trainer.py:765] (6/8) Epoch 12, batch 600, train_loss[loss=2.749, ArTop10Accuracy=0.7803, over 12000.00 frames. ], tot_loss[loss=2.79, ArTop10Accuracy=0.7723, over 11370.61 frames. ], batch size: 19, lr: 9.97e-03 +2024-08-06 11:38:00,344 INFO [trainer.py:765] (6/8) Epoch 12, batch 700, train_loss[loss=2.655, ArTop10Accuracy=0.8034, over 10221.00 frames. ], tot_loss[loss=2.797, ArTop10Accuracy=0.7707, over 11515.54 frames. ], batch size: 12, lr: 9.93e-03 +2024-08-06 11:39:23,611 INFO [trainer.py:765] (6/8) Epoch 12, batch 800, train_loss[loss=2.715, ArTop10Accuracy=0.7911, over 10062.00 frames. ], tot_loss[loss=2.8, ArTop10Accuracy=0.77, over 11640.74 frames. ], batch size: 12, lr: 9.90e-03 +2024-08-06 11:40:39,889 INFO [trainer.py:765] (6/8) Epoch 12, batch 900, train_loss[loss=2.857, ArTop10Accuracy=0.756, over 12738.00 frames. ], tot_loss[loss=2.797, ArTop10Accuracy=0.7708, over 11687.97 frames. ], batch size: 27, lr: 9.87e-03 +2024-08-06 11:41:13,995 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.041e+02 1.248e+02 1.348e+02 1.459e+02 5.540e+02, threshold=2.695e+02, percent-clipped=0.3 +2024-08-06 11:41:56,188 INFO [trainer.py:765] (6/8) Epoch 12, batch 1000, train_loss[loss=2.799, ArTop10Accuracy=0.7682, over 12924.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7705, over 11894.35 frames. ], batch size: 27, lr: 9.85e-03 +2024-08-06 11:43:14,320 INFO [trainer.py:765] (6/8) Epoch 12, batch 1100, train_loss[loss=2.824, ArTop10Accuracy=0.7649, over 13695.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7697, over 11970.40 frames. ], batch size: 34, lr: 9.82e-03 +2024-08-06 11:44:26,156 INFO [trainer.py:765] (6/8) Epoch 12, batch 1200, train_loss[loss=2.943, ArTop10Accuracy=0.7438, over 11631.00 frames. ], tot_loss[loss=2.806, ArTop10Accuracy=0.7694, over 11876.83 frames. ], batch size: 101, lr: 9.79e-03 +2024-08-06 11:45:26,431 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 11:47:26,600 INFO [trainer.py:765] (6/8) Epoch 13, batch 100, train_loss[loss=2.817, ArTop10Accuracy=0.7696, over 14379.00 frames. ], tot_loss[loss=2.798, ArTop10Accuracy=0.77, over 4763.63 frames. ], batch size: 62, lr: 9.37e-03 +2024-08-06 11:48:54,778 INFO [trainer.py:765] (6/8) Epoch 13, batch 200, train_loss[loss=2.781, ArTop10Accuracy=0.78, over 13704.00 frames. ], tot_loss[loss=2.788, ArTop10Accuracy=0.7723, over 7763.99 frames. ], batch size: 34, lr: 9.34e-03 +2024-08-06 11:50:20,515 INFO [trainer.py:765] (6/8) Epoch 13, batch 300, train_loss[loss=2.859, ArTop10Accuracy=0.7575, over 14160.00 frames. ], tot_loss[loss=2.779, ArTop10Accuracy=0.7741, over 9394.50 frames. ], batch size: 44, lr: 9.31e-03 +2024-08-06 11:51:48,764 INFO [trainer.py:765] (6/8) Epoch 13, batch 400, train_loss[loss=2.823, ArTop10Accuracy=0.7634, over 10341.00 frames. ], tot_loss[loss=2.776, ArTop10Accuracy=0.7748, over 10317.47 frames. ], batch size: 14, lr: 9.28e-03 +2024-08-06 11:53:13,406 INFO [trainer.py:765] (6/8) Epoch 13, batch 500, train_loss[loss=2.745, ArTop10Accuracy=0.7847, over 12111.00 frames. ], tot_loss[loss=2.774, ArTop10Accuracy=0.7749, over 10867.95 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 11:54:52,222 INFO [trainer.py:765] (6/8) Epoch 13, batch 600, train_loss[loss=2.773, ArTop10Accuracy=0.7749, over 11472.00 frames. ], tot_loss[loss=2.781, ArTop10Accuracy=0.7737, over 11389.84 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 11:55:47,079 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 11:55:56,834 INFO [trainer.py:811] (6/8) Epoch 13, validation: loss=2.824, ArTop10Accuracy=0.7662, over 1827537.00 frames. +2024-08-06 11:55:56,835 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 32996MB +2024-08-06 11:55:57,712 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.064e+02 1.255e+02 1.343e+02 1.452e+02 4.888e+02, threshold=2.687e+02, percent-clipped=0.1 +2024-08-06 11:56:28,464 INFO [trainer.py:765] (6/8) Epoch 13, batch 700, train_loss[loss=2.755, ArTop10Accuracy=0.7743, over 9240.00 frames. ], tot_loss[loss=2.785, ArTop10Accuracy=0.7732, over 11529.27 frames. ], batch size: 11, lr: 9.20e-03 +2024-08-06 11:57:46,683 INFO [trainer.py:765] (6/8) Epoch 13, batch 800, train_loss[loss=2.678, ArTop10Accuracy=0.7995, over 10161.00 frames. ], tot_loss[loss=2.787, ArTop10Accuracy=0.7728, over 11666.26 frames. ], batch size: 12, lr: 9.18e-03 +2024-08-06 11:59:03,286 INFO [trainer.py:765] (6/8) Epoch 13, batch 900, train_loss[loss=2.735, ArTop10Accuracy=0.7828, over 12972.00 frames. ], tot_loss[loss=2.78, ArTop10Accuracy=0.7743, over 11696.54 frames. ], batch size: 27, lr: 9.15e-03 +2024-08-06 12:00:19,174 INFO [trainer.py:765] (6/8) Epoch 13, batch 1000, train_loss[loss=2.802, ArTop10Accuracy=0.7653, over 13188.00 frames. ], tot_loss[loss=2.784, ArTop10Accuracy=0.7734, over 11875.95 frames. ], batch size: 28, lr: 9.13e-03 +2024-08-06 12:01:34,881 INFO [trainer.py:765] (6/8) Epoch 13, batch 1100, train_loss[loss=2.776, ArTop10Accuracy=0.7804, over 13716.00 frames. ], tot_loss[loss=2.791, ArTop10Accuracy=0.7721, over 11956.17 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 12:02:48,662 INFO [trainer.py:765] (6/8) Epoch 13, batch 1200, train_loss[loss=2.953, ArTop10Accuracy=0.7397, over 13203.00 frames. ], tot_loss[loss=2.793, ArTop10Accuracy=0.7716, over 11874.72 frames. ], batch size: 101, lr: 9.08e-03 +2024-08-06 12:03:48,490 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 12:05:45,334 INFO [trainer.py:765] (6/8) Epoch 14, batch 100, train_loss[loss=2.79, ArTop10Accuracy=0.7743, over 14433.00 frames. ], tot_loss[loss=2.776, ArTop10Accuracy=0.7745, over 4759.85 frames. ], batch size: 62, lr: 8.71e-03 +2024-08-06 12:07:16,604 INFO [trainer.py:765] (6/8) Epoch 14, batch 200, train_loss[loss=2.769, ArTop10Accuracy=0.779, over 13692.00 frames. ], tot_loss[loss=2.771, ArTop10Accuracy=0.7753, over 7748.44 frames. ], batch size: 34, lr: 8.69e-03 +2024-08-06 12:08:44,311 INFO [trainer.py:765] (6/8) Epoch 14, batch 300, train_loss[loss=2.83, ArTop10Accuracy=0.7658, over 14106.00 frames. ], tot_loss[loss=2.765, ArTop10Accuracy=0.7766, over 9362.74 frames. ], batch size: 44, lr: 8.66e-03 +2024-08-06 12:10:01,130 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.266e+02 1.374e+02 1.483e+02 6.480e+02, threshold=2.748e+02, percent-clipped=0.2 +2024-08-06 12:10:10,227 INFO [trainer.py:765] (6/8) Epoch 14, batch 400, train_loss[loss=2.692, ArTop10Accuracy=0.7942, over 10272.00 frames. ], tot_loss[loss=2.76, ArTop10Accuracy=0.7778, over 10277.08 frames. ], batch size: 14, lr: 8.64e-03 +2024-08-06 12:11:36,151 INFO [trainer.py:765] (6/8) Epoch 14, batch 500, train_loss[loss=2.784, ArTop10Accuracy=0.774, over 12231.00 frames. ], tot_loss[loss=2.76, ArTop10Accuracy=0.7779, over 10848.71 frames. ], batch size: 22, lr: 8.62e-03 +2024-08-06 12:13:05,994 INFO [trainer.py:765] (6/8) Epoch 14, batch 600, train_loss[loss=2.734, ArTop10Accuracy=0.782, over 11403.00 frames. ], tot_loss[loss=2.761, ArTop10Accuracy=0.7779, over 11369.47 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 12:14:38,553 INFO [trainer.py:765] (6/8) Epoch 14, batch 700, train_loss[loss=2.72, ArTop10Accuracy=0.7841, over 9363.00 frames. ], tot_loss[loss=2.765, ArTop10Accuracy=0.777, over 11518.27 frames. ], batch size: 11, lr: 8.57e-03 +2024-08-06 12:15:58,070 INFO [trainer.py:765] (6/8) Epoch 14, batch 800, train_loss[loss=2.736, ArTop10Accuracy=0.7816, over 10083.00 frames. ], tot_loss[loss=2.769, ArTop10Accuracy=0.7761, over 11641.03 frames. ], batch size: 12, lr: 8.55e-03 +2024-08-06 12:17:12,866 INFO [trainer.py:765] (6/8) Epoch 14, batch 900, train_loss[loss=2.893, ArTop10Accuracy=0.7504, over 12873.00 frames. ], tot_loss[loss=2.766, ArTop10Accuracy=0.7767, over 11702.17 frames. ], batch size: 27, lr: 8.52e-03 +2024-08-06 12:18:29,614 INFO [trainer.py:765] (6/8) Epoch 14, batch 1000, train_loss[loss=2.769, ArTop10Accuracy=0.775, over 12948.00 frames. ], tot_loss[loss=2.771, ArTop10Accuracy=0.7759, over 11886.54 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 12:19:45,377 INFO [trainer.py:765] (6/8) Epoch 14, batch 1100, train_loss[loss=2.792, ArTop10Accuracy=0.7721, over 13494.00 frames. ], tot_loss[loss=2.779, ArTop10Accuracy=0.7743, over 11923.88 frames. ], batch size: 34, lr: 8.48e-03 +2024-08-06 12:20:59,279 INFO [trainer.py:765] (6/8) Epoch 14, batch 1200, train_loss[loss=2.957, ArTop10Accuracy=0.7376, over 13002.00 frames. ], tot_loss[loss=2.779, ArTop10Accuracy=0.7743, over 11856.84 frames. ], batch size: 104, lr: 8.46e-03 +2024-08-06 12:21:58,343 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 12:23:51,961 INFO [trainer.py:765] (6/8) Epoch 15, batch 100, train_loss[loss=2.806, ArTop10Accuracy=0.7697, over 14721.00 frames. ], tot_loss[loss=2.764, ArTop10Accuracy=0.7764, over 4782.17 frames. ], batch size: 62, lr: 8.14e-03 +2024-08-06 12:24:00,599 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 12:24:10,290 INFO [trainer.py:811] (6/8) Epoch 15, validation: loss=2.819, ArTop10Accuracy=0.7675, over 1827537.00 frames. +2024-08-06 12:24:10,291 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 32996MB +2024-08-06 12:24:11,094 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.284e+02 1.371e+02 1.488e+02 4.667e+02, threshold=2.743e+02, percent-clipped=0.2 +2024-08-06 12:25:29,987 INFO [trainer.py:765] (6/8) Epoch 15, batch 200, train_loss[loss=2.735, ArTop10Accuracy=0.784, over 13680.00 frames. ], tot_loss[loss=2.757, ArTop10Accuracy=0.7782, over 7762.01 frames. ], batch size: 34, lr: 8.12e-03 +2024-08-06 12:26:58,695 INFO [trainer.py:765] (6/8) Epoch 15, batch 300, train_loss[loss=2.759, ArTop10Accuracy=0.778, over 14037.00 frames. ], tot_loss[loss=2.754, ArTop10Accuracy=0.7789, over 9372.04 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 12:28:28,533 INFO [trainer.py:765] (6/8) Epoch 15, batch 400, train_loss[loss=2.646, ArTop10Accuracy=0.7968, over 10359.00 frames. ], tot_loss[loss=2.752, ArTop10Accuracy=0.7791, over 10297.47 frames. ], batch size: 14, lr: 8.07e-03 +2024-08-06 12:29:54,031 INFO [trainer.py:765] (6/8) Epoch 15, batch 500, train_loss[loss=2.737, ArTop10Accuracy=0.7873, over 12321.00 frames. ], tot_loss[loss=2.746, ArTop10Accuracy=0.7803, over 10856.16 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 12:31:23,292 INFO [trainer.py:765] (6/8) Epoch 15, batch 600, train_loss[loss=2.682, ArTop10Accuracy=0.7952, over 11307.00 frames. ], tot_loss[loss=2.749, ArTop10Accuracy=0.7797, over 11384.85 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 12:32:53,175 INFO [trainer.py:765] (6/8) Epoch 15, batch 700, train_loss[loss=2.718, ArTop10Accuracy=0.7914, over 10119.00 frames. ], tot_loss[loss=2.752, ArTop10Accuracy=0.7794, over 11527.71 frames. ], batch size: 12, lr: 8.01e-03 +2024-08-06 12:34:18,254 INFO [trainer.py:765] (6/8) Epoch 15, batch 800, train_loss[loss=2.788, ArTop10Accuracy=0.7694, over 10101.00 frames. ], tot_loss[loss=2.756, ArTop10Accuracy=0.7787, over 11637.91 frames. ], batch size: 12, lr: 7.99e-03 +2024-08-06 12:35:34,726 INFO [trainer.py:765] (6/8) Epoch 15, batch 900, train_loss[loss=2.783, ArTop10Accuracy=0.7685, over 13059.00 frames. ], tot_loss[loss=2.751, ArTop10Accuracy=0.7798, over 11686.68 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 12:36:50,540 INFO [trainer.py:765] (6/8) Epoch 15, batch 1000, train_loss[loss=2.65, ArTop10Accuracy=0.8008, over 12771.00 frames. ], tot_loss[loss=2.755, ArTop10Accuracy=0.779, over 11872.69 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 12:38:05,178 INFO [trainer.py:765] (6/8) Epoch 15, batch 1100, train_loss[loss=2.793, ArTop10Accuracy=0.7725, over 13560.00 frames. ], tot_loss[loss=2.765, ArTop10Accuracy=0.777, over 11943.50 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 12:38:12,841 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.293e+02 1.379e+02 1.467e+02 2.824e+02, threshold=2.759e+02, percent-clipped=0.1 +2024-08-06 12:39:18,788 INFO [trainer.py:765] (6/8) Epoch 15, batch 1200, train_loss[loss=2.871, ArTop10Accuracy=0.7557, over 12357.00 frames. ], tot_loss[loss=2.766, ArTop10Accuracy=0.7769, over 11881.76 frames. ], batch size: 101, lr: 7.91e-03 +2024-08-06 12:40:18,514 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 12:42:17,618 INFO [trainer.py:765] (6/8) Epoch 16, batch 100, train_loss[loss=2.841, ArTop10Accuracy=0.7601, over 14646.00 frames. ], tot_loss[loss=2.746, ArTop10Accuracy=0.7803, over 4751.87 frames. ], batch size: 62, lr: 7.63e-03 +2024-08-06 12:43:49,564 INFO [trainer.py:765] (6/8) Epoch 16, batch 200, train_loss[loss=2.797, ArTop10Accuracy=0.7643, over 13659.00 frames. ], tot_loss[loss=2.74, ArTop10Accuracy=0.7815, over 7742.92 frames. ], batch size: 34, lr: 7.61e-03 +2024-08-06 12:45:18,501 INFO [trainer.py:765] (6/8) Epoch 16, batch 300, train_loss[loss=2.836, ArTop10Accuracy=0.7623, over 14430.00 frames. ], tot_loss[loss=2.739, ArTop10Accuracy=0.7816, over 9375.84 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 12:46:45,208 INFO [trainer.py:765] (6/8) Epoch 16, batch 400, train_loss[loss=2.658, ArTop10Accuracy=0.8016, over 10086.00 frames. ], tot_loss[loss=2.739, ArTop10Accuracy=0.7816, over 10295.80 frames. ], batch size: 14, lr: 7.58e-03 +2024-08-06 12:48:16,310 INFO [trainer.py:765] (6/8) Epoch 16, batch 500, train_loss[loss=2.604, ArTop10Accuracy=0.8112, over 12246.00 frames. ], tot_loss[loss=2.732, ArTop10Accuracy=0.7829, over 10853.76 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 12:49:46,641 INFO [trainer.py:765] (6/8) Epoch 16, batch 600, train_loss[loss=2.725, ArTop10Accuracy=0.7851, over 11598.00 frames. ], tot_loss[loss=2.738, ArTop10Accuracy=0.782, over 11364.55 frames. ], batch size: 18, lr: 7.54e-03 +2024-08-06 12:51:23,681 INFO [trainer.py:765] (6/8) Epoch 16, batch 700, train_loss[loss=2.59, ArTop10Accuracy=0.8143, over 10077.00 frames. ], tot_loss[loss=2.742, ArTop10Accuracy=0.781, over 11512.75 frames. ], batch size: 12, lr: 7.52e-03 +2024-08-06 12:52:43,500 INFO [trainer.py:765] (6/8) Epoch 16, batch 800, train_loss[loss=2.67, ArTop10Accuracy=0.7927, over 9582.00 frames. ], tot_loss[loss=2.746, ArTop10Accuracy=0.7802, over 11628.57 frames. ], batch size: 11, lr: 7.51e-03 +2024-08-06 12:53:06,015 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 12:53:15,497 INFO [trainer.py:811] (6/8) Epoch 16, validation: loss=2.816, ArTop10Accuracy=0.7678, over 1827537.00 frames. +2024-08-06 12:53:15,497 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 32996MB +2024-08-06 12:53:16,186 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.112e+02 1.291e+02 1.391e+02 1.487e+02 3.459e+02, threshold=2.783e+02, percent-clipped=0.1 +2024-08-06 12:54:06,482 INFO [trainer.py:765] (6/8) Epoch 16, batch 900, train_loss[loss=2.788, ArTop10Accuracy=0.772, over 12915.00 frames. ], tot_loss[loss=2.741, ArTop10Accuracy=0.7813, over 11680.34 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 12:55:19,791 INFO [trainer.py:765] (6/8) Epoch 16, batch 1000, train_loss[loss=2.737, ArTop10Accuracy=0.7785, over 12723.00 frames. ], tot_loss[loss=2.748, ArTop10Accuracy=0.7801, over 11873.29 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 12:56:33,162 INFO [trainer.py:765] (6/8) Epoch 16, batch 1100, train_loss[loss=2.79, ArTop10Accuracy=0.7745, over 13440.00 frames. ], tot_loss[loss=2.758, ArTop10Accuracy=0.7782, over 11956.98 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 12:57:48,485 INFO [trainer.py:765] (6/8) Epoch 16, batch 1200, train_loss[loss=2.865, ArTop10Accuracy=0.7548, over 12549.00 frames. ], tot_loss[loss=2.755, ArTop10Accuracy=0.7788, over 11849.71 frames. ], batch size: 101, lr: 7.44e-03 +2024-08-06 12:58:48,420 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 13:00:47,899 INFO [trainer.py:765] (6/8) Epoch 17, batch 100, train_loss[loss=2.875, ArTop10Accuracy=0.76, over 14706.00 frames. ], tot_loss[loss=2.742, ArTop10Accuracy=0.7804, over 4765.55 frames. ], batch size: 62, lr: 7.18e-03 +2024-08-06 13:02:19,302 INFO [trainer.py:765] (6/8) Epoch 17, batch 200, train_loss[loss=2.777, ArTop10Accuracy=0.7749, over 13542.00 frames. ], tot_loss[loss=2.734, ArTop10Accuracy=0.7822, over 7755.64 frames. ], batch size: 34, lr: 7.17e-03 +2024-08-06 13:03:45,515 INFO [trainer.py:765] (6/8) Epoch 17, batch 300, train_loss[loss=2.828, ArTop10Accuracy=0.7632, over 14058.00 frames. ], tot_loss[loss=2.732, ArTop10Accuracy=0.7828, over 9393.18 frames. ], batch size: 44, lr: 7.15e-03 +2024-08-06 13:05:21,759 INFO [trainer.py:765] (6/8) Epoch 17, batch 400, train_loss[loss=2.758, ArTop10Accuracy=0.7779, over 10314.00 frames. ], tot_loss[loss=2.73, ArTop10Accuracy=0.7832, over 10294.26 frames. ], batch size: 14, lr: 7.14e-03 +2024-08-06 13:06:47,020 INFO [trainer.py:765] (6/8) Epoch 17, batch 500, train_loss[loss=2.681, ArTop10Accuracy=0.7888, over 12366.00 frames. ], tot_loss[loss=2.725, ArTop10Accuracy=0.7843, over 10838.45 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 13:07:39,878 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.140e+02 1.293e+02 1.386e+02 1.488e+02 3.253e+02, threshold=2.772e+02, percent-clipped=0.1 +2024-08-06 13:08:22,687 INFO [trainer.py:765] (6/8) Epoch 17, batch 600, train_loss[loss=2.668, ArTop10Accuracy=0.794, over 11466.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7836, over 11379.70 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 13:09:54,834 INFO [trainer.py:765] (6/8) Epoch 17, batch 700, train_loss[loss=2.621, ArTop10Accuracy=0.8043, over 10122.00 frames. ], tot_loss[loss=2.734, ArTop10Accuracy=0.7823, over 11526.17 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 13:11:19,479 INFO [trainer.py:765] (6/8) Epoch 17, batch 800, train_loss[loss=2.704, ArTop10Accuracy=0.79, over 9333.00 frames. ], tot_loss[loss=2.737, ArTop10Accuracy=0.7818, over 11640.17 frames. ], batch size: 11, lr: 7.07e-03 +2024-08-06 13:12:35,668 INFO [trainer.py:765] (6/8) Epoch 17, batch 900, train_loss[loss=2.734, ArTop10Accuracy=0.7787, over 12672.00 frames. ], tot_loss[loss=2.735, ArTop10Accuracy=0.7826, over 11668.05 frames. ], batch size: 27, lr: 7.06e-03 +2024-08-06 13:13:53,060 INFO [trainer.py:765] (6/8) Epoch 17, batch 1000, train_loss[loss=2.713, ArTop10Accuracy=0.7853, over 12852.00 frames. ], tot_loss[loss=2.74, ArTop10Accuracy=0.7818, over 11869.79 frames. ], batch size: 27, lr: 7.04e-03 +2024-08-06 13:15:08,483 INFO [trainer.py:765] (6/8) Epoch 17, batch 1100, train_loss[loss=2.706, ArTop10Accuracy=0.7856, over 13716.00 frames. ], tot_loss[loss=2.744, ArTop10Accuracy=0.781, over 11962.45 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 13:16:22,387 INFO [trainer.py:765] (6/8) Epoch 17, batch 1200, train_loss[loss=2.909, ArTop10Accuracy=0.7491, over 12246.00 frames. ], tot_loss[loss=2.744, ArTop10Accuracy=0.781, over 11873.02 frames. ], batch size: 101, lr: 7.01e-03 +2024-08-06 13:17:22,043 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 13:19:15,993 INFO [trainer.py:765] (6/8) Epoch 18, batch 100, train_loss[loss=2.776, ArTop10Accuracy=0.7781, over 14277.00 frames. ], tot_loss[loss=2.73, ArTop10Accuracy=0.783, over 4750.54 frames. ], batch size: 62, lr: 6.78e-03 +2024-08-06 13:20:46,600 INFO [trainer.py:765] (6/8) Epoch 18, batch 200, train_loss[loss=2.672, ArTop10Accuracy=0.7921, over 13611.00 frames. ], tot_loss[loss=2.726, ArTop10Accuracy=0.7837, over 7748.30 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 13:21:55,104 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 13:22:04,751 INFO [trainer.py:811] (6/8) Epoch 18, validation: loss=2.817, ArTop10Accuracy=0.768, over 1827537.00 frames. +2024-08-06 13:22:04,752 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 32996MB +2024-08-06 13:22:05,473 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.323e+02 1.409e+02 1.514e+02 3.209e+02, threshold=2.818e+02, percent-clipped=0.1 +2024-08-06 13:22:26,581 INFO [trainer.py:765] (6/8) Epoch 18, batch 300, train_loss[loss=2.838, ArTop10Accuracy=0.7638, over 13896.00 frames. ], tot_loss[loss=2.72, ArTop10Accuracy=0.7851, over 9377.58 frames. ], batch size: 44, lr: 6.76e-03 +2024-08-06 13:23:57,930 INFO [trainer.py:765] (6/8) Epoch 18, batch 400, train_loss[loss=2.641, ArTop10Accuracy=0.8014, over 10359.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.7864, over 10291.94 frames. ], batch size: 14, lr: 6.74e-03 +2024-08-06 13:25:34,013 INFO [trainer.py:765] (6/8) Epoch 18, batch 500, train_loss[loss=2.668, ArTop10Accuracy=0.7913, over 12363.00 frames. ], tot_loss[loss=2.707, ArTop10Accuracy=0.7878, over 10844.48 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 13:27:00,634 INFO [trainer.py:765] (6/8) Epoch 18, batch 600, train_loss[loss=2.764, ArTop10Accuracy=0.7797, over 11328.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.7862, over 11349.84 frames. ], batch size: 18, lr: 6.71e-03 +2024-08-06 13:28:33,582 INFO [trainer.py:765] (6/8) Epoch 18, batch 700, train_loss[loss=2.591, ArTop10Accuracy=0.8194, over 10095.00 frames. ], tot_loss[loss=2.722, ArTop10Accuracy=0.7846, over 11508.81 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 13:29:54,986 INFO [trainer.py:765] (6/8) Epoch 18, batch 800, train_loss[loss=2.675, ArTop10Accuracy=0.7975, over 10242.00 frames. ], tot_loss[loss=2.727, ArTop10Accuracy=0.7839, over 11641.75 frames. ], batch size: 12, lr: 6.68e-03 +2024-08-06 13:31:12,519 INFO [trainer.py:765] (6/8) Epoch 18, batch 900, train_loss[loss=2.736, ArTop10Accuracy=0.785, over 12867.00 frames. ], tot_loss[loss=2.723, ArTop10Accuracy=0.7847, over 11684.78 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 13:32:26,552 INFO [trainer.py:765] (6/8) Epoch 18, batch 1000, train_loss[loss=2.675, ArTop10Accuracy=0.7963, over 13053.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7837, over 11887.14 frames. ], batch size: 27, lr: 6.66e-03 +2024-08-06 13:33:41,498 INFO [trainer.py:765] (6/8) Epoch 18, batch 1100, train_loss[loss=2.771, ArTop10Accuracy=0.7765, over 13674.00 frames. ], tot_loss[loss=2.737, ArTop10Accuracy=0.7822, over 11950.20 frames. ], batch size: 34, lr: 6.64e-03 +2024-08-06 13:34:54,674 INFO [trainer.py:765] (6/8) Epoch 18, batch 1200, train_loss[loss=2.864, ArTop10Accuracy=0.756, over 12729.00 frames. ], tot_loss[loss=2.737, ArTop10Accuracy=0.7823, over 11875.52 frames. ], batch size: 103, lr: 6.63e-03 +2024-08-06 13:35:51,064 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.124e+02 1.340e+02 1.433e+02 1.533e+02 2.444e+02, threshold=2.867e+02, percent-clipped=0.0 +2024-08-06 13:35:54,178 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 13:37:48,625 INFO [trainer.py:765] (6/8) Epoch 19, batch 100, train_loss[loss=2.803, ArTop10Accuracy=0.7693, over 14697.00 frames. ], tot_loss[loss=2.725, ArTop10Accuracy=0.784, over 4767.13 frames. ], batch size: 62, lr: 6.43e-03 +2024-08-06 13:39:23,257 INFO [trainer.py:765] (6/8) Epoch 19, batch 200, train_loss[loss=2.731, ArTop10Accuracy=0.7833, over 13434.00 frames. ], tot_loss[loss=2.714, ArTop10Accuracy=0.7859, over 7768.01 frames. ], batch size: 34, lr: 6.41e-03 +2024-08-06 13:40:48,360 INFO [trainer.py:765] (6/8) Epoch 19, batch 300, train_loss[loss=2.752, ArTop10Accuracy=0.7792, over 14064.00 frames. ], tot_loss[loss=2.709, ArTop10Accuracy=0.7868, over 9391.40 frames. ], batch size: 44, lr: 6.40e-03 +2024-08-06 13:42:21,067 INFO [trainer.py:765] (6/8) Epoch 19, batch 400, train_loss[loss=2.602, ArTop10Accuracy=0.8087, over 10113.00 frames. ], tot_loss[loss=2.704, ArTop10Accuracy=0.7881, over 10296.90 frames. ], batch size: 14, lr: 6.39e-03 +2024-08-06 13:43:44,955 INFO [trainer.py:765] (6/8) Epoch 19, batch 500, train_loss[loss=2.709, ArTop10Accuracy=0.7881, over 12324.00 frames. ], tot_loss[loss=2.702, ArTop10Accuracy=0.7884, over 10837.31 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 13:45:16,682 INFO [trainer.py:765] (6/8) Epoch 19, batch 600, train_loss[loss=2.716, ArTop10Accuracy=0.7878, over 11298.00 frames. ], tot_loss[loss=2.709, ArTop10Accuracy=0.7873, over 11358.08 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 13:46:48,324 INFO [trainer.py:765] (6/8) Epoch 19, batch 700, train_loss[loss=2.64, ArTop10Accuracy=0.7995, over 9423.00 frames. ], tot_loss[loss=2.713, ArTop10Accuracy=0.7867, over 11509.57 frames. ], batch size: 11, lr: 6.35e-03 +2024-08-06 13:48:11,884 INFO [trainer.py:765] (6/8) Epoch 19, batch 800, train_loss[loss=2.655, ArTop10Accuracy=0.7969, over 9987.00 frames. ], tot_loss[loss=2.718, ArTop10Accuracy=0.7858, over 11618.78 frames. ], batch size: 12, lr: 6.34e-03 +2024-08-06 13:49:27,259 INFO [trainer.py:765] (6/8) Epoch 19, batch 900, train_loss[loss=2.658, ArTop10Accuracy=0.7996, over 12774.00 frames. ], tot_loss[loss=2.717, ArTop10Accuracy=0.7859, over 11675.63 frames. ], batch size: 27, lr: 6.32e-03 +2024-08-06 13:50:40,655 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 13:50:50,537 INFO [trainer.py:811] (6/8) Epoch 19, validation: loss=2.818, ArTop10Accuracy=0.7679, over 1827537.00 frames. +2024-08-06 13:50:50,537 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 33001MB +2024-08-06 13:50:51,489 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.371e+02 1.455e+02 1.550e+02 3.697e+02, threshold=2.909e+02, percent-clipped=0.2 +2024-08-06 13:50:52,915 INFO [trainer.py:765] (6/8) Epoch 19, batch 1000, train_loss[loss=2.717, ArTop10Accuracy=0.7874, over 13323.00 frames. ], tot_loss[loss=2.718, ArTop10Accuracy=0.7857, over 11862.01 frames. ], batch size: 28, lr: 6.31e-03 +2024-08-06 13:52:08,265 INFO [trainer.py:765] (6/8) Epoch 19, batch 1100, train_loss[loss=2.755, ArTop10Accuracy=0.7818, over 13548.00 frames. ], tot_loss[loss=2.724, ArTop10Accuracy=0.7847, over 11965.46 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 13:53:22,311 INFO [trainer.py:765] (6/8) Epoch 19, batch 1200, train_loss[loss=2.826, ArTop10Accuracy=0.7675, over 12309.00 frames. ], tot_loss[loss=2.726, ArTop10Accuracy=0.7843, over 11867.08 frames. ], batch size: 101, lr: 6.28e-03 +2024-08-06 13:54:21,695 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 13:56:12,904 INFO [trainer.py:765] (6/8) Epoch 20, batch 100, train_loss[loss=2.797, ArTop10Accuracy=0.7733, over 14463.00 frames. ], tot_loss[loss=2.713, ArTop10Accuracy=0.7859, over 4772.15 frames. ], batch size: 62, lr: 6.10e-03 +2024-08-06 13:57:42,494 INFO [trainer.py:765] (6/8) Epoch 20, batch 200, train_loss[loss=2.71, ArTop10Accuracy=0.793, over 13584.00 frames. ], tot_loss[loss=2.708, ArTop10Accuracy=0.7872, over 7731.54 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 13:59:15,430 INFO [trainer.py:765] (6/8) Epoch 20, batch 300, train_loss[loss=2.815, ArTop10Accuracy=0.7639, over 14337.00 frames. ], tot_loss[loss=2.703, ArTop10Accuracy=0.7882, over 9366.66 frames. ], batch size: 44, lr: 6.08e-03 +2024-08-06 14:00:44,356 INFO [trainer.py:765] (6/8) Epoch 20, batch 400, train_loss[loss=2.476, ArTop10Accuracy=0.8339, over 10269.00 frames. ], tot_loss[loss=2.698, ArTop10Accuracy=0.7895, over 10281.80 frames. ], batch size: 14, lr: 6.07e-03 +2024-08-06 14:02:14,854 INFO [trainer.py:765] (6/8) Epoch 20, batch 500, train_loss[loss=2.669, ArTop10Accuracy=0.7987, over 12246.00 frames. ], tot_loss[loss=2.695, ArTop10Accuracy=0.7901, over 10844.32 frames. ], batch size: 22, lr: 6.06e-03 +2024-08-06 14:03:40,855 INFO [trainer.py:765] (6/8) Epoch 20, batch 600, train_loss[loss=2.728, ArTop10Accuracy=0.7828, over 11280.00 frames. ], tot_loss[loss=2.698, ArTop10Accuracy=0.7897, over 11360.98 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 14:05:13,864 INFO [trainer.py:765] (6/8) Epoch 20, batch 700, train_loss[loss=2.634, ArTop10Accuracy=0.8074, over 9540.00 frames. ], tot_loss[loss=2.701, ArTop10Accuracy=0.7889, over 11505.93 frames. ], batch size: 11, lr: 6.03e-03 +2024-08-06 14:05:30,791 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.365e+02 1.456e+02 1.550e+02 3.525e+02, threshold=2.913e+02, percent-clipped=0.1 +2024-08-06 14:06:34,509 INFO [trainer.py:765] (6/8) Epoch 20, batch 800, train_loss[loss=2.617, ArTop10Accuracy=0.8054, over 9417.00 frames. ], tot_loss[loss=2.708, ArTop10Accuracy=0.7877, over 11629.12 frames. ], batch size: 11, lr: 6.02e-03 +2024-08-06 14:07:50,944 INFO [trainer.py:765] (6/8) Epoch 20, batch 900, train_loss[loss=2.679, ArTop10Accuracy=0.7932, over 12960.00 frames. ], tot_loss[loss=2.702, ArTop10Accuracy=0.7887, over 11692.16 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 14:09:07,172 INFO [trainer.py:765] (6/8) Epoch 20, batch 1000, train_loss[loss=2.667, ArTop10Accuracy=0.7966, over 12981.00 frames. ], tot_loss[loss=2.707, ArTop10Accuracy=0.7878, over 11875.75 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 14:10:21,209 INFO [trainer.py:765] (6/8) Epoch 20, batch 1100, train_loss[loss=2.763, ArTop10Accuracy=0.7709, over 13563.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.786, over 11962.47 frames. ], batch size: 34, lr: 5.99e-03 +2024-08-06 14:11:37,812 INFO [trainer.py:765] (6/8) Epoch 20, batch 1200, train_loss[loss=2.868, ArTop10Accuracy=0.7602, over 12078.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.7859, over 11889.77 frames. ], batch size: 101, lr: 5.98e-03 +2024-08-06 14:12:37,393 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 14:12:37,395 INFO [trainer.py:1069] (6/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-08-06-14-7 b/libritts-r/log/log-train-2024-08-06-08-06-14-7 new file mode 100644 index 0000000000000000000000000000000000000000..aa41d90cf315e97f7f629e785bbf0f7a25db49df --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-08-06-14-7 @@ -0,0 +1,336 @@ +2024-08-06 08:06:14,313 INFO [trainer.py:870] (7/8) Training started +2024-08-06 08:06:14,314 INFO [trainer.py:889] (7/8) Device: cuda:7 +2024-08-06 08:06:14,314 INFO [trainer.py:890] (7/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 20000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 08:06:14,314 INFO [trainer.py:892] (7/8) About to create model +2024-08-06 08:06:15,084 INFO [trainer.py:899] (7/8) Number of model parameters: 367386628 +2024-08-06 08:06:16,739 INFO [trainer.py:914] (7/8) Using DDP +2024-08-06 08:06:19,151 INFO [datamodule.py:427] (7/8) About to get train cuts +2024-08-06 08:06:19,153 INFO [datamodule.py:434] (7/8) About to get dev cuts +2024-08-06 08:06:19,154 INFO [datamodule.py:292] (7/8) Disable SpecAugment +2024-08-06 08:06:19,154 INFO [datamodule.py:294] (7/8) About to create train dataset +2024-08-06 08:06:19,155 INFO [datamodule.py:323] (7/8) Using DynamicBucketingSampler +2024-08-06 08:06:19,768 INFO [datamodule.py:344] (7/8) About to create train dataloader +2024-08-06 08:06:19,768 INFO [datamodule.py:367] (7/8) About to create dev dataset +2024-08-06 08:06:20,094 INFO [datamodule.py:388] (7/8) About to create dev dataloader +2024-08-06 08:08:02,126 INFO [trainer.py:765] (7/8) Epoch 1, batch 100, train_loss[loss=4.362, ArTop10Accuracy=0.4896, over 14457.00 frames. ], tot_loss[loss=5.049, ArTop10Accuracy=0.3745, over 4762.40 frames. ], batch size: 62, lr: 2.25e-02 +2024-08-06 08:09:28,833 INFO [trainer.py:765] (7/8) Epoch 1, batch 200, train_loss[loss=4.118, ArTop10Accuracy=0.5253, over 13509.00 frames. ], tot_loss[loss=4.493, ArTop10Accuracy=0.467, over 7761.95 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 08:10:52,434 INFO [trainer.py:765] (7/8) Epoch 1, batch 300, train_loss[loss=3.871, ArTop10Accuracy=0.5676, over 14031.00 frames. ], tot_loss[loss=4.216, ArTop10Accuracy=0.5135, over 9370.01 frames. ], batch size: 44, lr: 3.00e-02 +2024-08-06 08:12:12,702 INFO [trainer.py:765] (7/8) Epoch 1, batch 400, train_loss[loss=3.659, ArTop10Accuracy=0.6127, over 10974.00 frames. ], tot_loss[loss=4.026, ArTop10Accuracy=0.5457, over 10298.14 frames. ], batch size: 15, lr: 3.00e-02 +2024-08-06 08:13:40,053 INFO [trainer.py:765] (7/8) Epoch 1, batch 500, train_loss[loss=3.59, ArTop10Accuracy=0.627, over 12051.00 frames. ], tot_loss[loss=3.88, ArTop10Accuracy=0.5713, over 10855.87 frames. ], batch size: 22, lr: 2.99e-02 +2024-08-06 08:15:00,246 INFO [trainer.py:765] (7/8) Epoch 1, batch 600, train_loss[loss=3.581, ArTop10Accuracy=0.6283, over 11385.00 frames. ], tot_loss[loss=3.765, ArTop10Accuracy=0.5915, over 11370.53 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 08:16:26,428 INFO [trainer.py:765] (7/8) Epoch 1, batch 700, train_loss[loss=3.46, ArTop10Accuracy=0.6474, over 9840.00 frames. ], tot_loss[loss=3.686, ArTop10Accuracy=0.6055, over 11504.33 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 08:17:43,021 INFO [trainer.py:765] (7/8) Epoch 1, batch 800, train_loss[loss=3.483, ArTop10Accuracy=0.6427, over 9441.00 frames. ], tot_loss[loss=3.625, ArTop10Accuracy=0.6167, over 11619.59 frames. ], batch size: 11, lr: 2.98e-02 +2024-08-06 08:18:56,154 INFO [trainer.py:765] (7/8) Epoch 1, batch 900, train_loss[loss=3.48, ArTop10Accuracy=0.6414, over 13068.00 frames. ], tot_loss[loss=3.565, ArTop10Accuracy=0.6279, over 11679.43 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 08:20:12,866 INFO [trainer.py:765] (7/8) Epoch 1, batch 1000, train_loss[loss=3.465, ArTop10Accuracy=0.6448, over 13329.00 frames. ], tot_loss[loss=3.524, ArTop10Accuracy=0.6351, over 11878.27 frames. ], batch size: 28, lr: 2.97e-02 +2024-08-06 08:20:13,547 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 9.300e+01 1.871e+02 2.675e+02 4.030e+02 9.119e+03, threshold=5.351e+02, percent-clipped=0.0 +2024-08-06 08:21:29,160 INFO [trainer.py:765] (7/8) Epoch 1, batch 1100, train_loss[loss=3.453, ArTop10Accuracy=0.6489, over 14163.00 frames. ], tot_loss[loss=3.487, ArTop10Accuracy=0.6419, over 11947.56 frames. ], batch size: 35, lr: 2.96e-02 +2024-08-06 08:22:45,419 INFO [trainer.py:765] (7/8) Epoch 1, batch 1200, train_loss[loss=3.473, ArTop10Accuracy=0.6507, over 12900.00 frames. ], tot_loss[loss=3.462, ArTop10Accuracy=0.6467, over 11842.32 frames. ], batch size: 103, lr: 2.96e-02 +2024-08-06 08:23:45,173 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 08:25:36,245 INFO [trainer.py:765] (7/8) Epoch 2, batch 100, train_loss[loss=3.444, ArTop10Accuracy=0.6432, over 14592.00 frames. ], tot_loss[loss=3.421, ArTop10Accuracy=0.6525, over 4756.08 frames. ], batch size: 62, lr: 2.90e-02 +2024-08-06 08:26:58,964 INFO [trainer.py:765] (7/8) Epoch 2, batch 200, train_loss[loss=3.272, ArTop10Accuracy=0.6805, over 13635.00 frames. ], tot_loss[loss=3.39, ArTop10Accuracy=0.6589, over 7735.02 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 08:28:25,540 INFO [trainer.py:765] (7/8) Epoch 2, batch 300, train_loss[loss=3.393, ArTop10Accuracy=0.6549, over 14247.00 frames. ], tot_loss[loss=3.372, ArTop10Accuracy=0.6625, over 9363.55 frames. ], batch size: 44, lr: 2.89e-02 +2024-08-06 08:29:48,645 INFO [trainer.py:765] (7/8) Epoch 2, batch 400, train_loss[loss=3.26, ArTop10Accuracy=0.6889, over 10353.00 frames. ], tot_loss[loss=3.355, ArTop10Accuracy=0.6658, over 10272.53 frames. ], batch size: 14, lr: 2.88e-02 +2024-08-06 08:31:22,910 INFO [trainer.py:765] (7/8) Epoch 2, batch 500, train_loss[loss=3.372, ArTop10Accuracy=0.6614, over 12180.00 frames. ], tot_loss[loss=3.339, ArTop10Accuracy=0.6693, over 10849.42 frames. ], batch size: 22, lr: 2.87e-02 +2024-08-06 08:32:45,693 INFO [trainer.py:765] (7/8) Epoch 2, batch 600, train_loss[loss=3.357, ArTop10Accuracy=0.6623, over 11406.00 frames. ], tot_loss[loss=3.329, ArTop10Accuracy=0.6711, over 11357.08 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 08:34:13,589 INFO [trainer.py:765] (7/8) Epoch 2, batch 700, train_loss[loss=3.281, ArTop10Accuracy=0.6847, over 9357.00 frames. ], tot_loss[loss=3.323, ArTop10Accuracy=0.6721, over 11496.48 frames. ], batch size: 11, lr: 2.85e-02 +2024-08-06 08:34:31,180 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 08:34:40,888 INFO [trainer.py:811] (7/8) Epoch 2, validation: loss=3.277, ArTop10Accuracy=0.6803, over 1827537.00 frames. +2024-08-06 08:34:40,889 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 28320MB +2024-08-06 08:34:41,706 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 7.953e+01 1.592e+02 2.200e+02 3.344e+02 2.949e+03, threshold=4.400e+02, percent-clipped=8.6 +2024-08-06 08:35:39,884 INFO [trainer.py:765] (7/8) Epoch 2, batch 800, train_loss[loss=3.251, ArTop10Accuracy=0.6922, over 10062.00 frames. ], tot_loss[loss=3.323, ArTop10Accuracy=0.6723, over 11621.00 frames. ], batch size: 12, lr: 2.84e-02 +2024-08-06 08:36:56,378 INFO [trainer.py:765] (7/8) Epoch 2, batch 900, train_loss[loss=3.305, ArTop10Accuracy=0.6733, over 12777.00 frames. ], tot_loss[loss=3.309, ArTop10Accuracy=0.6752, over 11675.12 frames. ], batch size: 27, lr: 2.83e-02 +2024-08-06 08:38:10,518 INFO [trainer.py:765] (7/8) Epoch 2, batch 1000, train_loss[loss=3.269, ArTop10Accuracy=0.6866, over 12738.00 frames. ], tot_loss[loss=3.299, ArTop10Accuracy=0.677, over 11877.04 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 08:39:25,065 INFO [trainer.py:765] (7/8) Epoch 2, batch 1100, train_loss[loss=3.277, ArTop10Accuracy=0.6814, over 13695.00 frames. ], tot_loss[loss=3.289, ArTop10Accuracy=0.679, over 11951.32 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 08:40:38,225 INFO [trainer.py:765] (7/8) Epoch 2, batch 1200, train_loss[loss=3.377, ArTop10Accuracy=0.6608, over 12480.00 frames. ], tot_loss[loss=3.278, ArTop10Accuracy=0.681, over 11872.33 frames. ], batch size: 103, lr: 2.80e-02 +2024-08-06 08:41:38,406 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 08:43:36,655 INFO [trainer.py:765] (7/8) Epoch 3, batch 100, train_loss[loss=3.251, ArTop10Accuracy=0.6868, over 14670.00 frames. ], tot_loss[loss=3.247, ArTop10Accuracy=0.6854, over 4767.29 frames. ], batch size: 62, lr: 2.67e-02 +2024-08-06 08:45:10,507 INFO [trainer.py:765] (7/8) Epoch 3, batch 200, train_loss[loss=3.166, ArTop10Accuracy=0.7034, over 13593.00 frames. ], tot_loss[loss=3.22, ArTop10Accuracy=0.6913, over 7727.64 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 08:46:29,264 INFO [trainer.py:765] (7/8) Epoch 3, batch 300, train_loss[loss=3.151, ArTop10Accuracy=0.7055, over 14394.00 frames. ], tot_loss[loss=3.198, ArTop10Accuracy=0.6953, over 9351.33 frames. ], batch size: 45, lr: 2.64e-02 +2024-08-06 08:48:04,224 INFO [trainer.py:765] (7/8) Epoch 3, batch 400, train_loss[loss=3.084, ArTop10Accuracy=0.7187, over 10278.00 frames. ], tot_loss[loss=3.18, ArTop10Accuracy=0.6989, over 10262.26 frames. ], batch size: 14, lr: 2.63e-02 +2024-08-06 08:48:40,887 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 9.282e+01 1.561e+02 1.981e+02 2.686e+02 1.768e+03, threshold=3.962e+02, percent-clipped=7.6 +2024-08-06 08:49:25,547 INFO [trainer.py:765] (7/8) Epoch 3, batch 500, train_loss[loss=3.081, ArTop10Accuracy=0.7195, over 12162.00 frames. ], tot_loss[loss=3.166, ArTop10Accuracy=0.7019, over 10825.79 frames. ], batch size: 22, lr: 2.62e-02 +2024-08-06 08:51:00,482 INFO [trainer.py:765] (7/8) Epoch 3, batch 600, train_loss[loss=3.024, ArTop10Accuracy=0.7292, over 11460.00 frames. ], tot_loss[loss=3.154, ArTop10Accuracy=0.7042, over 11342.13 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 08:52:31,623 INFO [trainer.py:765] (7/8) Epoch 3, batch 700, train_loss[loss=3.017, ArTop10Accuracy=0.7306, over 9441.00 frames. ], tot_loss[loss=3.146, ArTop10Accuracy=0.7056, over 11498.02 frames. ], batch size: 11, lr: 2.60e-02 +2024-08-06 08:53:57,394 INFO [trainer.py:765] (7/8) Epoch 3, batch 800, train_loss[loss=3.106, ArTop10Accuracy=0.7194, over 9345.00 frames. ], tot_loss[loss=3.141, ArTop10Accuracy=0.7065, over 11638.04 frames. ], batch size: 11, lr: 2.59e-02 +2024-08-06 08:55:15,124 INFO [trainer.py:765] (7/8) Epoch 3, batch 900, train_loss[loss=3.124, ArTop10Accuracy=0.7148, over 12777.00 frames. ], tot_loss[loss=3.12, ArTop10Accuracy=0.7106, over 11686.70 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 08:56:31,563 INFO [trainer.py:765] (7/8) Epoch 3, batch 1000, train_loss[loss=3.048, ArTop10Accuracy=0.7245, over 13221.00 frames. ], tot_loss[loss=3.112, ArTop10Accuracy=0.7118, over 11891.42 frames. ], batch size: 28, lr: 2.56e-02 +2024-08-06 08:57:46,512 INFO [trainer.py:765] (7/8) Epoch 3, batch 1100, train_loss[loss=3.064, ArTop10Accuracy=0.7176, over 13782.00 frames. ], tot_loss[loss=3.105, ArTop10Accuracy=0.713, over 11944.77 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 08:59:01,406 INFO [trainer.py:765] (7/8) Epoch 3, batch 1200, train_loss[loss=3.197, ArTop10Accuracy=0.6902, over 11523.00 frames. ], tot_loss[loss=3.094, ArTop10Accuracy=0.7152, over 11857.67 frames. ], batch size: 101, lr: 2.54e-02 +2024-08-06 09:00:01,956 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 09:01:50,747 INFO [trainer.py:765] (7/8) Epoch 4, batch 100, train_loss[loss=3.081, ArTop10Accuracy=0.7135, over 14556.00 frames. ], tot_loss[loss=3.068, ArTop10Accuracy=0.7198, over 4752.46 frames. ], batch size: 62, lr: 2.38e-02 +2024-08-06 09:02:52,864 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 09:03:02,384 INFO [trainer.py:811] (7/8) Epoch 4, validation: loss=2.997, ArTop10Accuracy=0.7338, over 1827537.00 frames. +2024-08-06 09:03:02,385 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 32854MB +2024-08-06 09:03:03,370 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.499e+02 1.782e+02 2.273e+02 1.100e+03, threshold=3.565e+02, percent-clipped=4.7 +2024-08-06 09:03:29,279 INFO [trainer.py:765] (7/8) Epoch 4, batch 200, train_loss[loss=3.04, ArTop10Accuracy=0.7267, over 13431.00 frames. ], tot_loss[loss=3.05, ArTop10Accuracy=0.7235, over 7758.26 frames. ], batch size: 34, lr: 2.37e-02 +2024-08-06 09:05:01,738 INFO [trainer.py:765] (7/8) Epoch 4, batch 300, train_loss[loss=3.08, ArTop10Accuracy=0.7196, over 14442.00 frames. ], tot_loss[loss=3.04, ArTop10Accuracy=0.7252, over 9376.27 frames. ], batch size: 45, lr: 2.36e-02 +2024-08-06 09:06:28,158 INFO [trainer.py:765] (7/8) Epoch 4, batch 400, train_loss[loss=2.945, ArTop10Accuracy=0.7439, over 11004.00 frames. ], tot_loss[loss=3.035, ArTop10Accuracy=0.7263, over 10292.54 frames. ], batch size: 15, lr: 2.34e-02 +2024-08-06 09:08:01,931 INFO [trainer.py:765] (7/8) Epoch 4, batch 500, train_loss[loss=3.035, ArTop10Accuracy=0.7318, over 12075.00 frames. ], tot_loss[loss=3.026, ArTop10Accuracy=0.7281, over 10859.34 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 09:09:28,546 INFO [trainer.py:765] (7/8) Epoch 4, batch 600, train_loss[loss=3.041, ArTop10Accuracy=0.7257, over 11595.00 frames. ], tot_loss[loss=3.022, ArTop10Accuracy=0.7289, over 11371.96 frames. ], batch size: 18, lr: 2.32e-02 +2024-08-06 09:10:59,871 INFO [trainer.py:765] (7/8) Epoch 4, batch 700, train_loss[loss=2.907, ArTop10Accuracy=0.7516, over 9414.00 frames. ], tot_loss[loss=3.02, ArTop10Accuracy=0.7293, over 11502.51 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 09:12:17,518 INFO [trainer.py:765] (7/8) Epoch 4, batch 800, train_loss[loss=3.083, ArTop10Accuracy=0.7137, over 10341.00 frames. ], tot_loss[loss=3.021, ArTop10Accuracy=0.7287, over 11643.99 frames. ], batch size: 12, lr: 2.30e-02 +2024-08-06 09:13:33,218 INFO [trainer.py:765] (7/8) Epoch 4, batch 900, train_loss[loss=2.993, ArTop10Accuracy=0.7329, over 13167.00 frames. ], tot_loss[loss=3.013, ArTop10Accuracy=0.7305, over 11694.20 frames. ], batch size: 28, lr: 2.29e-02 +2024-08-06 09:14:47,526 INFO [trainer.py:765] (7/8) Epoch 4, batch 1000, train_loss[loss=2.966, ArTop10Accuracy=0.7427, over 13272.00 frames. ], tot_loss[loss=3.011, ArTop10Accuracy=0.7308, over 11890.45 frames. ], batch size: 28, lr: 2.28e-02 +2024-08-06 09:16:02,988 INFO [trainer.py:765] (7/8) Epoch 4, batch 1100, train_loss[loss=3.074, ArTop10Accuracy=0.7127, over 14052.00 frames. ], tot_loss[loss=3.012, ArTop10Accuracy=0.7307, over 11951.97 frames. ], batch size: 35, lr: 2.26e-02 +2024-08-06 09:16:53,297 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.440e+02 1.636e+02 1.968e+02 7.702e+02, threshold=3.273e+02, percent-clipped=1.3 +2024-08-06 09:17:18,350 INFO [trainer.py:765] (7/8) Epoch 4, batch 1200, train_loss[loss=3.072, ArTop10Accuracy=0.7225, over 12207.00 frames. ], tot_loss[loss=3.011, ArTop10Accuracy=0.7309, over 11891.15 frames. ], batch size: 101, lr: 2.25e-02 +2024-08-06 09:18:17,193 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 09:20:17,179 INFO [trainer.py:765] (7/8) Epoch 5, batch 100, train_loss[loss=2.969, ArTop10Accuracy=0.738, over 14637.00 frames. ], tot_loss[loss=2.991, ArTop10Accuracy=0.7338, over 4753.25 frames. ], batch size: 63, lr: 2.10e-02 +2024-08-06 09:21:52,302 INFO [trainer.py:765] (7/8) Epoch 5, batch 200, train_loss[loss=2.949, ArTop10Accuracy=0.742, over 13872.00 frames. ], tot_loss[loss=2.98, ArTop10Accuracy=0.736, over 7749.37 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 09:23:19,247 INFO [trainer.py:765] (7/8) Epoch 5, batch 300, train_loss[loss=2.975, ArTop10Accuracy=0.737, over 14184.00 frames. ], tot_loss[loss=2.972, ArTop10Accuracy=0.7381, over 9371.28 frames. ], batch size: 44, lr: 2.08e-02 +2024-08-06 09:24:53,543 INFO [trainer.py:765] (7/8) Epoch 5, batch 400, train_loss[loss=2.977, ArTop10Accuracy=0.7381, over 10341.00 frames. ], tot_loss[loss=2.966, ArTop10Accuracy=0.7394, over 10277.25 frames. ], batch size: 14, lr: 2.07e-02 +2024-08-06 09:26:19,423 INFO [trainer.py:765] (7/8) Epoch 5, batch 500, train_loss[loss=2.844, ArTop10Accuracy=0.7612, over 12285.00 frames. ], tot_loss[loss=2.96, ArTop10Accuracy=0.7406, over 10834.88 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 09:27:49,543 INFO [trainer.py:765] (7/8) Epoch 5, batch 600, train_loss[loss=2.869, ArTop10Accuracy=0.7593, over 11508.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7401, over 11349.06 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 09:29:21,675 INFO [trainer.py:765] (7/8) Epoch 5, batch 700, train_loss[loss=2.854, ArTop10Accuracy=0.7634, over 10257.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7394, over 11495.18 frames. ], batch size: 12, lr: 2.04e-02 +2024-08-06 09:30:44,698 INFO [trainer.py:765] (7/8) Epoch 5, batch 800, train_loss[loss=2.925, ArTop10Accuracy=0.7477, over 10026.00 frames. ], tot_loss[loss=2.969, ArTop10Accuracy=0.7388, over 11633.89 frames. ], batch size: 12, lr: 2.03e-02 +2024-08-06 09:31:51,245 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 09:32:00,762 INFO [trainer.py:811] (7/8) Epoch 5, validation: loss=2.926, ArTop10Accuracy=0.7466, over 1827537.00 frames. +2024-08-06 09:32:00,763 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33001MB +2024-08-06 09:32:01,716 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.060e+02 1.349e+02 1.525e+02 1.806e+02 1.007e+03, threshold=3.049e+02, percent-clipped=2.3 +2024-08-06 09:32:10,561 INFO [trainer.py:765] (7/8) Epoch 5, batch 900, train_loss[loss=2.964, ArTop10Accuracy=0.7428, over 12957.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7401, over 11679.29 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 09:33:27,329 INFO [trainer.py:765] (7/8) Epoch 5, batch 1000, train_loss[loss=2.99, ArTop10Accuracy=0.7287, over 13071.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7401, over 11873.13 frames. ], batch size: 27, lr: 2.01e-02 +2024-08-06 09:34:42,306 INFO [trainer.py:765] (7/8) Epoch 5, batch 1100, train_loss[loss=2.925, ArTop10Accuracy=0.744, over 13614.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7402, over 11964.34 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 09:35:56,337 INFO [trainer.py:765] (7/8) Epoch 5, batch 1200, train_loss[loss=3.056, ArTop10Accuracy=0.7254, over 12483.00 frames. ], tot_loss[loss=2.96, ArTop10Accuracy=0.7404, over 11842.33 frames. ], batch size: 101, lr: 1.99e-02 +2024-08-06 09:36:55,307 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 09:38:52,672 INFO [trainer.py:765] (7/8) Epoch 6, batch 100, train_loss[loss=2.971, ArTop10Accuracy=0.7363, over 14304.00 frames. ], tot_loss[loss=2.949, ArTop10Accuracy=0.7418, over 4755.22 frames. ], batch size: 62, lr: 1.85e-02 +2024-08-06 09:40:19,840 INFO [trainer.py:765] (7/8) Epoch 6, batch 200, train_loss[loss=2.939, ArTop10Accuracy=0.7429, over 13713.00 frames. ], tot_loss[loss=2.942, ArTop10Accuracy=0.7432, over 7740.92 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 09:41:52,973 INFO [trainer.py:765] (7/8) Epoch 6, batch 300, train_loss[loss=2.971, ArTop10Accuracy=0.7397, over 14376.00 frames. ], tot_loss[loss=2.934, ArTop10Accuracy=0.7451, over 9374.66 frames. ], batch size: 45, lr: 1.83e-02 +2024-08-06 09:43:17,836 INFO [trainer.py:765] (7/8) Epoch 6, batch 400, train_loss[loss=2.886, ArTop10Accuracy=0.7542, over 10356.00 frames. ], tot_loss[loss=2.926, ArTop10Accuracy=0.7469, over 10317.75 frames. ], batch size: 14, lr: 1.83e-02 +2024-08-06 09:44:54,136 INFO [trainer.py:765] (7/8) Epoch 6, batch 500, train_loss[loss=2.996, ArTop10Accuracy=0.7306, over 12210.00 frames. ], tot_loss[loss=2.917, ArTop10Accuracy=0.7488, over 10868.63 frames. ], batch size: 22, lr: 1.82e-02 +2024-08-06 09:46:22,879 INFO [trainer.py:765] (7/8) Epoch 6, batch 600, train_loss[loss=2.871, ArTop10Accuracy=0.7539, over 11418.00 frames. ], tot_loss[loss=2.922, ArTop10Accuracy=0.7478, over 11372.97 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 09:46:37,226 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.012e+02 1.339e+02 1.480e+02 1.701e+02 7.506e+02, threshold=2.959e+02, percent-clipped=1.1 +2024-08-06 09:47:57,878 INFO [trainer.py:765] (7/8) Epoch 6, batch 700, train_loss[loss=2.864, ArTop10Accuracy=0.7643, over 9345.00 frames. ], tot_loss[loss=2.927, ArTop10Accuracy=0.7467, over 11521.82 frames. ], batch size: 11, lr: 1.80e-02 +2024-08-06 09:49:15,961 INFO [trainer.py:765] (7/8) Epoch 6, batch 800, train_loss[loss=2.941, ArTop10Accuracy=0.7444, over 10086.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.7472, over 11648.74 frames. ], batch size: 12, lr: 1.79e-02 +2024-08-06 09:50:32,141 INFO [trainer.py:765] (7/8) Epoch 6, batch 900, train_loss[loss=2.924, ArTop10Accuracy=0.7467, over 12903.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7481, over 11680.34 frames. ], batch size: 27, lr: 1.78e-02 +2024-08-06 09:51:47,308 INFO [trainer.py:765] (7/8) Epoch 6, batch 1000, train_loss[loss=2.873, ArTop10Accuracy=0.7545, over 12843.00 frames. ], tot_loss[loss=2.923, ArTop10Accuracy=0.7476, over 11881.10 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 09:53:00,927 INFO [trainer.py:765] (7/8) Epoch 6, batch 1100, train_loss[loss=2.894, ArTop10Accuracy=0.7513, over 14028.00 frames. ], tot_loss[loss=2.927, ArTop10Accuracy=0.7467, over 11949.15 frames. ], batch size: 35, lr: 1.77e-02 +2024-08-06 09:54:14,343 INFO [trainer.py:765] (7/8) Epoch 6, batch 1200, train_loss[loss=3.005, ArTop10Accuracy=0.7324, over 12450.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.7471, over 11859.26 frames. ], batch size: 101, lr: 1.76e-02 +2024-08-06 09:55:13,167 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 09:57:06,705 INFO [trainer.py:765] (7/8) Epoch 7, batch 100, train_loss[loss=2.988, ArTop10Accuracy=0.7343, over 14619.00 frames. ], tot_loss[loss=2.914, ArTop10Accuracy=0.7489, over 4754.07 frames. ], batch size: 62, lr: 1.64e-02 +2024-08-06 09:58:39,433 INFO [trainer.py:765] (7/8) Epoch 7, batch 200, train_loss[loss=2.872, ArTop10Accuracy=0.7554, over 13872.00 frames. ], tot_loss[loss=2.906, ArTop10Accuracy=0.7506, over 7746.71 frames. ], batch size: 35, lr: 1.64e-02 +2024-08-06 10:00:06,089 INFO [trainer.py:765] (7/8) Epoch 7, batch 300, train_loss[loss=2.922, ArTop10Accuracy=0.7488, over 14031.00 frames. ], tot_loss[loss=2.899, ArTop10Accuracy=0.7517, over 9369.72 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 10:00:40,515 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 10:00:50,245 INFO [trainer.py:811] (7/8) Epoch 7, validation: loss=2.88, ArTop10Accuracy=0.7554, over 1827537.00 frames. +2024-08-06 10:00:50,246 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33001MB +2024-08-06 10:00:50,983 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.002e+02 1.286e+02 1.429e+02 1.605e+02 1.020e+03, threshold=2.857e+02, percent-clipped=1.5 +2024-08-06 10:01:49,123 INFO [trainer.py:765] (7/8) Epoch 7, batch 400, train_loss[loss=2.751, ArTop10Accuracy=0.7792, over 10182.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7523, over 10267.82 frames. ], batch size: 14, lr: 1.62e-02 +2024-08-06 10:03:21,465 INFO [trainer.py:765] (7/8) Epoch 7, batch 500, train_loss[loss=2.903, ArTop10Accuracy=0.7533, over 12147.00 frames. ], tot_loss[loss=2.891, ArTop10Accuracy=0.7532, over 10812.25 frames. ], batch size: 22, lr: 1.61e-02 +2024-08-06 10:04:51,890 INFO [trainer.py:765] (7/8) Epoch 7, batch 600, train_loss[loss=2.828, ArTop10Accuracy=0.7683, over 11442.00 frames. ], tot_loss[loss=2.893, ArTop10Accuracy=0.753, over 11357.88 frames. ], batch size: 18, lr: 1.61e-02 +2024-08-06 10:06:25,118 INFO [trainer.py:765] (7/8) Epoch 7, batch 700, train_loss[loss=2.884, ArTop10Accuracy=0.7584, over 10107.00 frames. ], tot_loss[loss=2.899, ArTop10Accuracy=0.7519, over 11531.01 frames. ], batch size: 12, lr: 1.60e-02 +2024-08-06 10:07:46,957 INFO [trainer.py:765] (7/8) Epoch 7, batch 800, train_loss[loss=2.865, ArTop10Accuracy=0.7595, over 10149.00 frames. ], tot_loss[loss=2.898, ArTop10Accuracy=0.752, over 11660.33 frames. ], batch size: 12, lr: 1.59e-02 +2024-08-06 10:09:02,830 INFO [trainer.py:765] (7/8) Epoch 7, batch 900, train_loss[loss=2.844, ArTop10Accuracy=0.7642, over 13023.00 frames. ], tot_loss[loss=2.892, ArTop10Accuracy=0.7533, over 11695.51 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 10:10:19,642 INFO [trainer.py:765] (7/8) Epoch 7, batch 1000, train_loss[loss=2.875, ArTop10Accuracy=0.7589, over 13020.00 frames. ], tot_loss[loss=2.893, ArTop10Accuracy=0.753, over 11897.82 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 10:11:35,214 INFO [trainer.py:765] (7/8) Epoch 7, batch 1100, train_loss[loss=2.877, ArTop10Accuracy=0.76, over 13656.00 frames. ], tot_loss[loss=2.903, ArTop10Accuracy=0.751, over 11971.94 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 10:12:48,210 INFO [trainer.py:765] (7/8) Epoch 7, batch 1200, train_loss[loss=3.014, ArTop10Accuracy=0.7296, over 11841.00 frames. ], tot_loss[loss=2.901, ArTop10Accuracy=0.7513, over 11848.41 frames. ], batch size: 101, lr: 1.57e-02 +2024-08-06 10:13:46,715 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 10:15:03,607 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.017e+02 1.283e+02 1.410e+02 1.601e+02 1.017e+03, threshold=2.820e+02, percent-clipped=0.9 +2024-08-06 10:15:40,827 INFO [trainer.py:765] (7/8) Epoch 8, batch 100, train_loss[loss=2.945, ArTop10Accuracy=0.7462, over 14433.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7518, over 4743.12 frames. ], batch size: 62, lr: 1.47e-02 +2024-08-06 10:17:12,869 INFO [trainer.py:765] (7/8) Epoch 8, batch 200, train_loss[loss=2.804, ArTop10Accuracy=0.773, over 13626.00 frames. ], tot_loss[loss=2.878, ArTop10Accuracy=0.7555, over 7739.33 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 10:18:37,904 INFO [trainer.py:765] (7/8) Epoch 8, batch 300, train_loss[loss=2.89, ArTop10Accuracy=0.7555, over 14010.00 frames. ], tot_loss[loss=2.87, ArTop10Accuracy=0.7572, over 9379.75 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 10:20:06,347 INFO [trainer.py:765] (7/8) Epoch 8, batch 400, train_loss[loss=2.805, ArTop10Accuracy=0.7662, over 10131.00 frames. ], tot_loss[loss=2.867, ArTop10Accuracy=0.7579, over 10274.71 frames. ], batch size: 14, lr: 1.45e-02 +2024-08-06 10:21:32,417 INFO [trainer.py:765] (7/8) Epoch 8, batch 500, train_loss[loss=2.796, ArTop10Accuracy=0.7714, over 12276.00 frames. ], tot_loss[loss=2.862, ArTop10Accuracy=0.7587, over 10839.39 frames. ], batch size: 22, lr: 1.45e-02 +2024-08-06 10:23:00,980 INFO [trainer.py:765] (7/8) Epoch 8, batch 600, train_loss[loss=2.787, ArTop10Accuracy=0.7728, over 11571.00 frames. ], tot_loss[loss=2.86, ArTop10Accuracy=0.7592, over 11359.98 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 10:24:37,793 INFO [trainer.py:765] (7/8) Epoch 8, batch 700, train_loss[loss=2.754, ArTop10Accuracy=0.7844, over 10128.00 frames. ], tot_loss[loss=2.866, ArTop10Accuracy=0.7583, over 11502.93 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 10:25:56,094 INFO [trainer.py:765] (7/8) Epoch 8, batch 800, train_loss[loss=2.865, ArTop10Accuracy=0.7526, over 9339.00 frames. ], tot_loss[loss=2.87, ArTop10Accuracy=0.7573, over 11619.76 frames. ], batch size: 11, lr: 1.43e-02 +2024-08-06 10:27:12,252 INFO [trainer.py:765] (7/8) Epoch 8, batch 900, train_loss[loss=2.84, ArTop10Accuracy=0.7642, over 13110.00 frames. ], tot_loss[loss=2.866, ArTop10Accuracy=0.758, over 11686.94 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 10:28:25,269 INFO [trainer.py:765] (7/8) Epoch 8, batch 1000, train_loss[loss=2.851, ArTop10Accuracy=0.7588, over 12954.00 frames. ], tot_loss[loss=2.869, ArTop10Accuracy=0.7574, over 11893.33 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 10:29:07,161 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 10:29:16,830 INFO [trainer.py:811] (7/8) Epoch 8, validation: loss=2.858, ArTop10Accuracy=0.7594, over 1827537.00 frames. +2024-08-06 10:29:16,831 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33001MB +2024-08-06 10:29:17,497 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.032e+02 1.275e+02 1.390e+02 1.547e+02 3.717e+02, threshold=2.781e+02, percent-clipped=0.7 +2024-08-06 10:29:51,738 INFO [trainer.py:765] (7/8) Epoch 8, batch 1100, train_loss[loss=2.876, ArTop10Accuracy=0.753, over 13728.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.756, over 11966.78 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 10:31:05,955 INFO [trainer.py:765] (7/8) Epoch 8, batch 1200, train_loss[loss=2.958, ArTop10Accuracy=0.7403, over 12834.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.756, over 11882.51 frames. ], batch size: 103, lr: 1.40e-02 +2024-08-06 10:32:05,689 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 10:34:01,263 INFO [trainer.py:765] (7/8) Epoch 9, batch 100, train_loss[loss=2.89, ArTop10Accuracy=0.7558, over 14211.00 frames. ], tot_loss[loss=2.861, ArTop10Accuracy=0.7584, over 4769.26 frames. ], batch size: 62, lr: 1.32e-02 +2024-08-06 10:35:31,779 INFO [trainer.py:765] (7/8) Epoch 9, batch 200, train_loss[loss=2.84, ArTop10Accuracy=0.7597, over 13761.00 frames. ], tot_loss[loss=2.853, ArTop10Accuracy=0.7601, over 7753.37 frames. ], batch size: 34, lr: 1.32e-02 +2024-08-06 10:36:57,933 INFO [trainer.py:765] (7/8) Epoch 9, batch 300, train_loss[loss=2.919, ArTop10Accuracy=0.7475, over 14613.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7615, over 9383.26 frames. ], batch size: 44, lr: 1.31e-02 +2024-08-06 10:38:32,706 INFO [trainer.py:765] (7/8) Epoch 9, batch 400, train_loss[loss=2.867, ArTop10Accuracy=0.7587, over 10458.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.7625, over 10292.17 frames. ], batch size: 14, lr: 1.31e-02 +2024-08-06 10:39:59,263 INFO [trainer.py:765] (7/8) Epoch 9, batch 500, train_loss[loss=2.778, ArTop10Accuracy=0.7717, over 12252.00 frames. ], tot_loss[loss=2.839, ArTop10Accuracy=0.7633, over 10865.89 frames. ], batch size: 22, lr: 1.30e-02 +2024-08-06 10:41:29,697 INFO [trainer.py:765] (7/8) Epoch 9, batch 600, train_loss[loss=2.849, ArTop10Accuracy=0.7639, over 11511.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.7624, over 11375.79 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 10:42:58,448 INFO [trainer.py:765] (7/8) Epoch 9, batch 700, train_loss[loss=2.758, ArTop10Accuracy=0.7781, over 10119.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7617, over 11528.05 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 10:44:02,958 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.039e+02 1.253e+02 1.352e+02 1.493e+02 7.010e+02, threshold=2.704e+02, percent-clipped=0.6 +2024-08-06 10:44:19,676 INFO [trainer.py:765] (7/8) Epoch 9, batch 800, train_loss[loss=2.705, ArTop10Accuracy=0.7909, over 10119.00 frames. ], tot_loss[loss=2.849, ArTop10Accuracy=0.7611, over 11632.30 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 10:45:35,729 INFO [trainer.py:765] (7/8) Epoch 9, batch 900, train_loss[loss=2.831, ArTop10Accuracy=0.7652, over 12933.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.7623, over 11672.27 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 10:46:51,278 INFO [trainer.py:765] (7/8) Epoch 9, batch 1000, train_loss[loss=2.873, ArTop10Accuracy=0.753, over 13110.00 frames. ], tot_loss[loss=2.85, ArTop10Accuracy=0.7612, over 11899.80 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 10:48:06,254 INFO [trainer.py:765] (7/8) Epoch 9, batch 1100, train_loss[loss=2.87, ArTop10Accuracy=0.7525, over 13704.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7602, over 11962.63 frames. ], batch size: 34, lr: 1.28e-02 +2024-08-06 10:49:21,061 INFO [trainer.py:765] (7/8) Epoch 9, batch 1200, train_loss[loss=2.96, ArTop10Accuracy=0.738, over 12012.00 frames. ], tot_loss[loss=2.853, ArTop10Accuracy=0.7602, over 11866.13 frames. ], batch size: 101, lr: 1.27e-02 +2024-08-06 10:50:21,919 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 10:52:12,333 INFO [trainer.py:765] (7/8) Epoch 10, batch 100, train_loss[loss=2.963, ArTop10Accuracy=0.7393, over 14571.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7611, over 4761.85 frames. ], batch size: 63, lr: 1.20e-02 +2024-08-06 10:53:44,592 INFO [trainer.py:765] (7/8) Epoch 10, batch 200, train_loss[loss=2.862, ArTop10Accuracy=0.7616, over 13656.00 frames. ], tot_loss[loss=2.838, ArTop10Accuracy=0.7631, over 7751.64 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 10:55:08,097 INFO [trainer.py:765] (7/8) Epoch 10, batch 300, train_loss[loss=2.885, ArTop10Accuracy=0.7585, over 14229.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.7644, over 9362.96 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 10:56:41,184 INFO [trainer.py:765] (7/8) Epoch 10, batch 400, train_loss[loss=2.817, ArTop10Accuracy=0.7678, over 10332.00 frames. ], tot_loss[loss=2.829, ArTop10Accuracy=0.7651, over 10282.26 frames. ], batch size: 14, lr: 1.19e-02 +2024-08-06 10:58:04,944 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 10:58:14,560 INFO [trainer.py:811] (7/8) Epoch 10, validation: loss=2.842, ArTop10Accuracy=0.7624, over 1827537.00 frames. +2024-08-06 10:58:14,560 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33001MB +2024-08-06 10:58:15,580 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.228e+02 1.320e+02 1.458e+02 6.096e+02, threshold=2.641e+02, percent-clipped=0.6 +2024-08-06 10:58:15,587 INFO [trainer.py:765] (7/8) Epoch 10, batch 500, train_loss[loss=2.76, ArTop10Accuracy=0.7781, over 12621.00 frames. ], tot_loss[loss=2.824, ArTop10Accuracy=0.7657, over 10826.79 frames. ], batch size: 23, lr: 1.19e-02 +2024-08-06 10:59:42,823 INFO [trainer.py:765] (7/8) Epoch 10, batch 600, train_loss[loss=2.799, ArTop10Accuracy=0.7657, over 11370.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7653, over 11339.91 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 11:01:18,113 INFO [trainer.py:765] (7/8) Epoch 10, batch 700, train_loss[loss=2.74, ArTop10Accuracy=0.7784, over 10212.00 frames. ], tot_loss[loss=2.832, ArTop10Accuracy=0.7643, over 11501.92 frames. ], batch size: 12, lr: 1.18e-02 +2024-08-06 11:02:36,923 INFO [trainer.py:765] (7/8) Epoch 10, batch 800, train_loss[loss=2.903, ArTop10Accuracy=0.7523, over 10266.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7636, over 11651.31 frames. ], batch size: 12, lr: 1.17e-02 +2024-08-06 11:03:51,218 INFO [trainer.py:765] (7/8) Epoch 10, batch 900, train_loss[loss=2.735, ArTop10Accuracy=0.7816, over 13029.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.7653, over 11691.04 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 11:05:06,357 INFO [trainer.py:765] (7/8) Epoch 10, batch 1000, train_loss[loss=2.796, ArTop10Accuracy=0.767, over 12981.00 frames. ], tot_loss[loss=2.832, ArTop10Accuracy=0.7644, over 11907.49 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 11:06:21,730 INFO [trainer.py:765] (7/8) Epoch 10, batch 1100, train_loss[loss=2.942, ArTop10Accuracy=0.7401, over 13641.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7638, over 11975.93 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 11:07:34,778 INFO [trainer.py:765] (7/8) Epoch 10, batch 1200, train_loss[loss=2.959, ArTop10Accuracy=0.7386, over 12051.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7635, over 11872.57 frames. ], batch size: 103, lr: 1.16e-02 +2024-08-06 11:08:34,013 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 11:10:29,961 INFO [trainer.py:765] (7/8) Epoch 11, batch 100, train_loss[loss=2.832, ArTop10Accuracy=0.7628, over 14784.00 frames. ], tot_loss[loss=2.819, ArTop10Accuracy=0.766, over 4757.71 frames. ], batch size: 63, lr: 1.10e-02 +2024-08-06 11:12:04,680 INFO [trainer.py:765] (7/8) Epoch 11, batch 200, train_loss[loss=2.848, ArTop10Accuracy=0.7591, over 13668.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.7679, over 7745.12 frames. ], batch size: 34, lr: 1.10e-02 +2024-08-06 11:12:22,833 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 9.884e+01 1.240e+02 1.333e+02 1.457e+02 6.939e+02, threshold=2.667e+02, percent-clipped=0.1 +2024-08-06 11:13:31,557 INFO [trainer.py:765] (7/8) Epoch 11, batch 300, train_loss[loss=2.783, ArTop10Accuracy=0.7766, over 14295.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7693, over 9379.76 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 11:15:03,276 INFO [trainer.py:765] (7/8) Epoch 11, batch 400, train_loss[loss=2.744, ArTop10Accuracy=0.7853, over 10455.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7697, over 10293.17 frames. ], batch size: 14, lr: 1.09e-02 +2024-08-06 11:16:29,644 INFO [trainer.py:765] (7/8) Epoch 11, batch 500, train_loss[loss=2.775, ArTop10Accuracy=0.7748, over 12375.00 frames. ], tot_loss[loss=2.801, ArTop10Accuracy=0.7703, over 10869.57 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 11:18:00,524 INFO [trainer.py:765] (7/8) Epoch 11, batch 600, train_loss[loss=2.686, ArTop10Accuracy=0.789, over 11559.00 frames. ], tot_loss[loss=2.803, ArTop10Accuracy=0.7701, over 11358.60 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 11:19:34,519 INFO [trainer.py:765] (7/8) Epoch 11, batch 700, train_loss[loss=2.631, ArTop10Accuracy=0.8006, over 9285.00 frames. ], tot_loss[loss=2.809, ArTop10Accuracy=0.7687, over 11492.14 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 11:20:55,489 INFO [trainer.py:765] (7/8) Epoch 11, batch 800, train_loss[loss=2.737, ArTop10Accuracy=0.7849, over 10227.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.7675, over 11631.71 frames. ], batch size: 12, lr: 1.07e-02 +2024-08-06 11:22:13,711 INFO [trainer.py:765] (7/8) Epoch 11, batch 900, train_loss[loss=2.9, ArTop10Accuracy=0.7485, over 13065.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.7683, over 11678.33 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 11:23:31,805 INFO [trainer.py:765] (7/8) Epoch 11, batch 1000, train_loss[loss=2.812, ArTop10Accuracy=0.767, over 13032.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.7674, over 11884.80 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 11:24:46,908 INFO [trainer.py:765] (7/8) Epoch 11, batch 1100, train_loss[loss=2.843, ArTop10Accuracy=0.7617, over 13743.00 frames. ], tot_loss[loss=2.824, ArTop10Accuracy=0.7661, over 11952.21 frames. ], batch size: 34, lr: 1.06e-02 +2024-08-06 11:26:00,740 INFO [trainer.py:765] (7/8) Epoch 11, batch 1200, train_loss[loss=2.971, ArTop10Accuracy=0.7319, over 11898.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7654, over 11844.39 frames. ], batch size: 101, lr: 1.06e-02 +2024-08-06 11:26:15,853 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 11:26:25,556 INFO [trainer.py:811] (7/8) Epoch 11, validation: loss=2.831, ArTop10Accuracy=0.7643, over 1827537.00 frames. +2024-08-06 11:26:25,556 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33001MB +2024-08-06 11:26:26,191 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.251e+02 1.335e+02 1.441e+02 2.942e+02, threshold=2.669e+02, percent-clipped=0.1 +2024-08-06 11:27:09,788 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 11:29:03,457 INFO [trainer.py:765] (7/8) Epoch 12, batch 100, train_loss[loss=2.906, ArTop10Accuracy=0.7525, over 14562.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7693, over 4755.02 frames. ], batch size: 62, lr: 1.01e-02 +2024-08-06 11:30:30,680 INFO [trainer.py:765] (7/8) Epoch 12, batch 200, train_loss[loss=2.829, ArTop10Accuracy=0.7614, over 13722.00 frames. ], tot_loss[loss=2.796, ArTop10Accuracy=0.771, over 7750.22 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 11:31:57,661 INFO [trainer.py:765] (7/8) Epoch 12, batch 300, train_loss[loss=2.844, ArTop10Accuracy=0.7598, over 14127.00 frames. ], tot_loss[loss=2.796, ArTop10Accuracy=0.7709, over 9370.85 frames. ], batch size: 45, lr: 1.01e-02 +2024-08-06 11:33:30,744 INFO [trainer.py:765] (7/8) Epoch 12, batch 400, train_loss[loss=2.777, ArTop10Accuracy=0.7773, over 10335.00 frames. ], tot_loss[loss=2.794, ArTop10Accuracy=0.7714, over 10290.92 frames. ], batch size: 14, lr: 1.00e-02 +2024-08-06 11:34:55,741 INFO [trainer.py:765] (7/8) Epoch 12, batch 500, train_loss[loss=2.726, ArTop10Accuracy=0.7936, over 12180.00 frames. ], tot_loss[loss=2.787, ArTop10Accuracy=0.7728, over 10849.09 frames. ], batch size: 22, lr: 1.00e-02 +2024-08-06 11:36:29,367 INFO [trainer.py:765] (7/8) Epoch 12, batch 600, train_loss[loss=2.77, ArTop10Accuracy=0.7734, over 11346.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7714, over 11371.49 frames. ], batch size: 18, lr: 9.97e-03 +2024-08-06 11:38:00,350 INFO [trainer.py:765] (7/8) Epoch 12, batch 700, train_loss[loss=2.692, ArTop10Accuracy=0.7974, over 9297.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7696, over 11504.84 frames. ], batch size: 11, lr: 9.93e-03 +2024-08-06 11:39:23,617 INFO [trainer.py:765] (7/8) Epoch 12, batch 800, train_loss[loss=2.756, ArTop10Accuracy=0.7772, over 10002.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7691, over 11641.67 frames. ], batch size: 12, lr: 9.90e-03 +2024-08-06 11:40:39,895 INFO [trainer.py:765] (7/8) Epoch 12, batch 900, train_loss[loss=2.87, ArTop10Accuracy=0.7533, over 13086.00 frames. ], tot_loss[loss=2.801, ArTop10Accuracy=0.7701, over 11681.88 frames. ], batch size: 27, lr: 9.87e-03 +2024-08-06 11:41:14,001 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.041e+02 1.248e+02 1.348e+02 1.459e+02 5.540e+02, threshold=2.695e+02, percent-clipped=0.3 +2024-08-06 11:41:56,195 INFO [trainer.py:765] (7/8) Epoch 12, batch 1000, train_loss[loss=2.782, ArTop10Accuracy=0.7761, over 12906.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7696, over 11878.21 frames. ], batch size: 27, lr: 9.85e-03 +2024-08-06 11:43:14,326 INFO [trainer.py:765] (7/8) Epoch 12, batch 1100, train_loss[loss=2.795, ArTop10Accuracy=0.7758, over 13584.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.7688, over 11961.32 frames. ], batch size: 34, lr: 9.82e-03 +2024-08-06 11:44:26,162 INFO [trainer.py:765] (7/8) Epoch 12, batch 1200, train_loss[loss=2.877, ArTop10Accuracy=0.751, over 12174.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7697, over 11871.83 frames. ], batch size: 101, lr: 9.79e-03 +2024-08-06 11:45:26,840 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 11:47:26,608 INFO [trainer.py:765] (7/8) Epoch 13, batch 100, train_loss[loss=2.856, ArTop10Accuracy=0.7638, over 14538.00 frames. ], tot_loss[loss=2.788, ArTop10Accuracy=0.7722, over 4752.84 frames. ], batch size: 63, lr: 9.37e-03 +2024-08-06 11:48:54,784 INFO [trainer.py:765] (7/8) Epoch 13, batch 200, train_loss[loss=2.783, ArTop10Accuracy=0.7744, over 13587.00 frames. ], tot_loss[loss=2.785, ArTop10Accuracy=0.7726, over 7712.35 frames. ], batch size: 34, lr: 9.34e-03 +2024-08-06 11:50:20,521 INFO [trainer.py:765] (7/8) Epoch 13, batch 300, train_loss[loss=2.848, ArTop10Accuracy=0.7612, over 14127.00 frames. ], tot_loss[loss=2.78, ArTop10Accuracy=0.7738, over 9346.13 frames. ], batch size: 44, lr: 9.31e-03 +2024-08-06 11:51:48,770 INFO [trainer.py:765] (7/8) Epoch 13, batch 400, train_loss[loss=2.655, ArTop10Accuracy=0.7974, over 10497.00 frames. ], tot_loss[loss=2.778, ArTop10Accuracy=0.7744, over 10273.88 frames. ], batch size: 14, lr: 9.28e-03 +2024-08-06 11:53:13,413 INFO [trainer.py:765] (7/8) Epoch 13, batch 500, train_loss[loss=2.696, ArTop10Accuracy=0.7907, over 12237.00 frames. ], tot_loss[loss=2.774, ArTop10Accuracy=0.7752, over 10830.80 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 11:54:52,228 INFO [trainer.py:765] (7/8) Epoch 13, batch 600, train_loss[loss=2.751, ArTop10Accuracy=0.7839, over 11358.00 frames. ], tot_loss[loss=2.776, ArTop10Accuracy=0.7748, over 11350.36 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 11:55:47,087 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 11:55:56,834 INFO [trainer.py:811] (7/8) Epoch 13, validation: loss=2.824, ArTop10Accuracy=0.7662, over 1827537.00 frames. +2024-08-06 11:55:56,835 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33001MB +2024-08-06 11:55:57,718 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.064e+02 1.255e+02 1.343e+02 1.452e+02 4.888e+02, threshold=2.687e+02, percent-clipped=0.1 +2024-08-06 11:56:28,470 INFO [trainer.py:765] (7/8) Epoch 13, batch 700, train_loss[loss=2.716, ArTop10Accuracy=0.7877, over 9372.00 frames. ], tot_loss[loss=2.778, ArTop10Accuracy=0.7744, over 11502.85 frames. ], batch size: 11, lr: 9.20e-03 +2024-08-06 11:57:46,688 INFO [trainer.py:765] (7/8) Epoch 13, batch 800, train_loss[loss=2.765, ArTop10Accuracy=0.7779, over 9270.00 frames. ], tot_loss[loss=2.781, ArTop10Accuracy=0.7737, over 11637.14 frames. ], batch size: 11, lr: 9.18e-03 +2024-08-06 11:59:03,294 INFO [trainer.py:765] (7/8) Epoch 13, batch 900, train_loss[loss=2.746, ArTop10Accuracy=0.7813, over 12927.00 frames. ], tot_loss[loss=2.779, ArTop10Accuracy=0.7742, over 11686.24 frames. ], batch size: 27, lr: 9.15e-03 +2024-08-06 12:00:19,179 INFO [trainer.py:765] (7/8) Epoch 13, batch 1000, train_loss[loss=2.763, ArTop10Accuracy=0.784, over 12996.00 frames. ], tot_loss[loss=2.787, ArTop10Accuracy=0.7727, over 11882.38 frames. ], batch size: 27, lr: 9.13e-03 +2024-08-06 12:01:34,888 INFO [trainer.py:765] (7/8) Epoch 13, batch 1100, train_loss[loss=2.788, ArTop10Accuracy=0.7706, over 13569.00 frames. ], tot_loss[loss=2.794, ArTop10Accuracy=0.7711, over 11950.05 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 12:02:48,668 INFO [trainer.py:765] (7/8) Epoch 13, batch 1200, train_loss[loss=2.882, ArTop10Accuracy=0.7557, over 12348.00 frames. ], tot_loss[loss=2.794, ArTop10Accuracy=0.7714, over 11841.55 frames. ], batch size: 101, lr: 9.08e-03 +2024-08-06 12:03:48,262 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 12:05:45,342 INFO [trainer.py:765] (7/8) Epoch 14, batch 100, train_loss[loss=2.801, ArTop10Accuracy=0.7691, over 14286.00 frames. ], tot_loss[loss=2.774, ArTop10Accuracy=0.7744, over 4789.12 frames. ], batch size: 62, lr: 8.71e-03 +2024-08-06 12:07:16,612 INFO [trainer.py:765] (7/8) Epoch 14, batch 200, train_loss[loss=2.759, ArTop10Accuracy=0.7766, over 13548.00 frames. ], tot_loss[loss=2.77, ArTop10Accuracy=0.7754, over 7763.87 frames. ], batch size: 34, lr: 8.69e-03 +2024-08-06 12:08:44,319 INFO [trainer.py:765] (7/8) Epoch 14, batch 300, train_loss[loss=2.798, ArTop10Accuracy=0.7739, over 14325.00 frames. ], tot_loss[loss=2.763, ArTop10Accuracy=0.7772, over 9404.43 frames. ], batch size: 45, lr: 8.66e-03 +2024-08-06 12:10:01,138 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.072e+02 1.266e+02 1.374e+02 1.483e+02 6.480e+02, threshold=2.748e+02, percent-clipped=0.2 +2024-08-06 12:10:10,233 INFO [trainer.py:765] (7/8) Epoch 14, batch 400, train_loss[loss=2.795, ArTop10Accuracy=0.7679, over 10785.00 frames. ], tot_loss[loss=2.764, ArTop10Accuracy=0.777, over 10299.80 frames. ], batch size: 15, lr: 8.64e-03 +2024-08-06 12:11:36,157 INFO [trainer.py:765] (7/8) Epoch 14, batch 500, train_loss[loss=2.688, ArTop10Accuracy=0.7904, over 12291.00 frames. ], tot_loss[loss=2.757, ArTop10Accuracy=0.7783, over 10862.13 frames. ], batch size: 22, lr: 8.62e-03 +2024-08-06 12:13:05,999 INFO [trainer.py:765] (7/8) Epoch 14, batch 600, train_loss[loss=2.724, ArTop10Accuracy=0.7831, over 11988.00 frames. ], tot_loss[loss=2.762, ArTop10Accuracy=0.7775, over 11389.76 frames. ], batch size: 19, lr: 8.59e-03 +2024-08-06 12:14:38,559 INFO [trainer.py:765] (7/8) Epoch 14, batch 700, train_loss[loss=2.728, ArTop10Accuracy=0.7858, over 10293.00 frames. ], tot_loss[loss=2.765, ArTop10Accuracy=0.7771, over 11532.92 frames. ], batch size: 12, lr: 8.57e-03 +2024-08-06 12:15:58,076 INFO [trainer.py:765] (7/8) Epoch 14, batch 800, train_loss[loss=2.684, ArTop10Accuracy=0.7962, over 9432.00 frames. ], tot_loss[loss=2.771, ArTop10Accuracy=0.7758, over 11619.66 frames. ], batch size: 11, lr: 8.55e-03 +2024-08-06 12:17:12,872 INFO [trainer.py:765] (7/8) Epoch 14, batch 900, train_loss[loss=2.82, ArTop10Accuracy=0.7688, over 12945.00 frames. ], tot_loss[loss=2.767, ArTop10Accuracy=0.7764, over 11694.97 frames. ], batch size: 27, lr: 8.52e-03 +2024-08-06 12:18:29,621 INFO [trainer.py:765] (7/8) Epoch 14, batch 1000, train_loss[loss=2.708, ArTop10Accuracy=0.791, over 13026.00 frames. ], tot_loss[loss=2.771, ArTop10Accuracy=0.7758, over 11882.66 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 12:19:45,383 INFO [trainer.py:765] (7/8) Epoch 14, batch 1100, train_loss[loss=2.799, ArTop10Accuracy=0.7704, over 13656.00 frames. ], tot_loss[loss=2.778, ArTop10Accuracy=0.7744, over 11972.38 frames. ], batch size: 35, lr: 8.48e-03 +2024-08-06 12:20:59,284 INFO [trainer.py:765] (7/8) Epoch 14, batch 1200, train_loss[loss=2.889, ArTop10Accuracy=0.7537, over 11946.00 frames. ], tot_loss[loss=2.777, ArTop10Accuracy=0.7747, over 11883.44 frames. ], batch size: 101, lr: 8.46e-03 +2024-08-06 12:21:58,392 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 12:23:51,969 INFO [trainer.py:765] (7/8) Epoch 15, batch 100, train_loss[loss=2.843, ArTop10Accuracy=0.7642, over 14451.00 frames. ], tot_loss[loss=2.762, ArTop10Accuracy=0.7777, over 4748.98 frames. ], batch size: 62, lr: 8.14e-03 +2024-08-06 12:24:00,606 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 12:24:10,290 INFO [trainer.py:811] (7/8) Epoch 15, validation: loss=2.819, ArTop10Accuracy=0.7675, over 1827537.00 frames. +2024-08-06 12:24:10,291 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33001MB +2024-08-06 12:24:11,100 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.284e+02 1.371e+02 1.488e+02 4.667e+02, threshold=2.743e+02, percent-clipped=0.2 +2024-08-06 12:25:29,995 INFO [trainer.py:765] (7/8) Epoch 15, batch 200, train_loss[loss=2.79, ArTop10Accuracy=0.7691, over 13515.00 frames. ], tot_loss[loss=2.756, ArTop10Accuracy=0.7787, over 7748.40 frames. ], batch size: 34, lr: 8.12e-03 +2024-08-06 12:26:58,700 INFO [trainer.py:765] (7/8) Epoch 15, batch 300, train_loss[loss=2.801, ArTop10Accuracy=0.7692, over 13812.00 frames. ], tot_loss[loss=2.749, ArTop10Accuracy=0.78, over 9360.32 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 12:28:28,541 INFO [trainer.py:765] (7/8) Epoch 15, batch 400, train_loss[loss=2.615, ArTop10Accuracy=0.8103, over 11025.00 frames. ], tot_loss[loss=2.751, ArTop10Accuracy=0.7795, over 10292.66 frames. ], batch size: 15, lr: 8.07e-03 +2024-08-06 12:29:54,040 INFO [trainer.py:765] (7/8) Epoch 15, batch 500, train_loss[loss=2.716, ArTop10Accuracy=0.7863, over 12057.00 frames. ], tot_loss[loss=2.747, ArTop10Accuracy=0.7803, over 10846.17 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 12:31:23,300 INFO [trainer.py:765] (7/8) Epoch 15, batch 600, train_loss[loss=2.654, ArTop10Accuracy=0.7999, over 11283.00 frames. ], tot_loss[loss=2.754, ArTop10Accuracy=0.7792, over 11370.72 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 12:32:53,182 INFO [trainer.py:765] (7/8) Epoch 15, batch 700, train_loss[loss=2.632, ArTop10Accuracy=0.8045, over 10308.00 frames. ], tot_loss[loss=2.757, ArTop10Accuracy=0.7785, over 11518.17 frames. ], batch size: 12, lr: 8.01e-03 +2024-08-06 12:34:18,261 INFO [trainer.py:765] (7/8) Epoch 15, batch 800, train_loss[loss=2.534, ArTop10Accuracy=0.8238, over 10224.00 frames. ], tot_loss[loss=2.759, ArTop10Accuracy=0.7781, over 11628.39 frames. ], batch size: 12, lr: 7.99e-03 +2024-08-06 12:35:34,733 INFO [trainer.py:765] (7/8) Epoch 15, batch 900, train_loss[loss=2.727, ArTop10Accuracy=0.786, over 12687.00 frames. ], tot_loss[loss=2.754, ArTop10Accuracy=0.7792, over 11667.32 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 12:36:50,547 INFO [trainer.py:765] (7/8) Epoch 15, batch 1000, train_loss[loss=2.801, ArTop10Accuracy=0.7699, over 12762.00 frames. ], tot_loss[loss=2.759, ArTop10Accuracy=0.7781, over 11870.70 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 12:38:05,188 INFO [trainer.py:765] (7/8) Epoch 15, batch 1100, train_loss[loss=2.793, ArTop10Accuracy=0.7714, over 13743.00 frames. ], tot_loss[loss=2.767, ArTop10Accuracy=0.7765, over 11953.51 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 12:38:12,847 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.080e+02 1.293e+02 1.379e+02 1.467e+02 2.824e+02, threshold=2.759e+02, percent-clipped=0.1 +2024-08-06 12:39:18,795 INFO [trainer.py:765] (7/8) Epoch 15, batch 1200, train_loss[loss=2.905, ArTop10Accuracy=0.7478, over 12228.00 frames. ], tot_loss[loss=2.766, ArTop10Accuracy=0.7768, over 11851.53 frames. ], batch size: 101, lr: 7.91e-03 +2024-08-06 12:40:18,896 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 12:42:17,627 INFO [trainer.py:765] (7/8) Epoch 16, batch 100, train_loss[loss=2.832, ArTop10Accuracy=0.7641, over 14472.00 frames. ], tot_loss[loss=2.747, ArTop10Accuracy=0.7805, over 4759.95 frames. ], batch size: 62, lr: 7.63e-03 +2024-08-06 12:43:49,572 INFO [trainer.py:765] (7/8) Epoch 16, batch 200, train_loss[loss=2.744, ArTop10Accuracy=0.7798, over 13692.00 frames. ], tot_loss[loss=2.742, ArTop10Accuracy=0.7813, over 7743.89 frames. ], batch size: 34, lr: 7.61e-03 +2024-08-06 12:45:18,508 INFO [trainer.py:765] (7/8) Epoch 16, batch 300, train_loss[loss=2.813, ArTop10Accuracy=0.7684, over 14073.00 frames. ], tot_loss[loss=2.741, ArTop10Accuracy=0.7815, over 9383.12 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 12:46:45,215 INFO [trainer.py:765] (7/8) Epoch 16, batch 400, train_loss[loss=2.627, ArTop10Accuracy=0.8022, over 10203.00 frames. ], tot_loss[loss=2.737, ArTop10Accuracy=0.7819, over 10285.65 frames. ], batch size: 14, lr: 7.58e-03 +2024-08-06 12:48:16,319 INFO [trainer.py:765] (7/8) Epoch 16, batch 500, train_loss[loss=2.705, ArTop10Accuracy=0.7881, over 12114.00 frames. ], tot_loss[loss=2.733, ArTop10Accuracy=0.7826, over 10840.99 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 12:49:46,651 INFO [trainer.py:765] (7/8) Epoch 16, batch 600, train_loss[loss=2.678, ArTop10Accuracy=0.7914, over 11430.00 frames. ], tot_loss[loss=2.738, ArTop10Accuracy=0.7818, over 11353.37 frames. ], batch size: 18, lr: 7.54e-03 +2024-08-06 12:51:23,687 INFO [trainer.py:765] (7/8) Epoch 16, batch 700, train_loss[loss=2.576, ArTop10Accuracy=0.8136, over 10032.00 frames. ], tot_loss[loss=2.741, ArTop10Accuracy=0.7812, over 11511.29 frames. ], batch size: 12, lr: 7.52e-03 +2024-08-06 12:52:43,507 INFO [trainer.py:765] (7/8) Epoch 16, batch 800, train_loss[loss=2.719, ArTop10Accuracy=0.7814, over 9390.00 frames. ], tot_loss[loss=2.75, ArTop10Accuracy=0.7794, over 11637.75 frames. ], batch size: 11, lr: 7.51e-03 +2024-08-06 12:53:06,022 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 12:53:15,499 INFO [trainer.py:811] (7/8) Epoch 16, validation: loss=2.816, ArTop10Accuracy=0.7678, over 1827537.00 frames. +2024-08-06 12:53:15,499 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33001MB +2024-08-06 12:53:16,192 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.112e+02 1.291e+02 1.391e+02 1.487e+02 3.459e+02, threshold=2.783e+02, percent-clipped=0.1 +2024-08-06 12:54:06,487 INFO [trainer.py:765] (7/8) Epoch 16, batch 900, train_loss[loss=2.729, ArTop10Accuracy=0.7871, over 12846.00 frames. ], tot_loss[loss=2.75, ArTop10Accuracy=0.7795, over 11697.95 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 12:55:19,797 INFO [trainer.py:765] (7/8) Epoch 16, batch 1000, train_loss[loss=2.687, ArTop10Accuracy=0.7908, over 12930.00 frames. ], tot_loss[loss=2.751, ArTop10Accuracy=0.7794, over 11884.37 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 12:56:33,171 INFO [trainer.py:765] (7/8) Epoch 16, batch 1100, train_loss[loss=2.778, ArTop10Accuracy=0.7749, over 13665.00 frames. ], tot_loss[loss=2.758, ArTop10Accuracy=0.7782, over 11949.15 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 12:57:48,491 INFO [trainer.py:765] (7/8) Epoch 16, batch 1200, train_loss[loss=2.839, ArTop10Accuracy=0.7655, over 12969.00 frames. ], tot_loss[loss=2.758, ArTop10Accuracy=0.7781, over 11864.36 frames. ], batch size: 103, lr: 7.44e-03 +2024-08-06 12:58:48,019 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 13:00:47,906 INFO [trainer.py:765] (7/8) Epoch 17, batch 100, train_loss[loss=2.792, ArTop10Accuracy=0.7732, over 14166.00 frames. ], tot_loss[loss=2.743, ArTop10Accuracy=0.7807, over 4760.01 frames. ], batch size: 62, lr: 7.18e-03 +2024-08-06 13:02:19,308 INFO [trainer.py:765] (7/8) Epoch 17, batch 200, train_loss[loss=2.814, ArTop10Accuracy=0.7661, over 13584.00 frames. ], tot_loss[loss=2.734, ArTop10Accuracy=0.7825, over 7731.47 frames. ], batch size: 34, lr: 7.17e-03 +2024-08-06 13:03:45,523 INFO [trainer.py:765] (7/8) Epoch 17, batch 300, train_loss[loss=2.769, ArTop10Accuracy=0.7733, over 14361.00 frames. ], tot_loss[loss=2.731, ArTop10Accuracy=0.7829, over 9385.16 frames. ], batch size: 45, lr: 7.15e-03 +2024-08-06 13:05:21,768 INFO [trainer.py:765] (7/8) Epoch 17, batch 400, train_loss[loss=2.632, ArTop10Accuracy=0.8014, over 10311.00 frames. ], tot_loss[loss=2.728, ArTop10Accuracy=0.7838, over 10317.90 frames. ], batch size: 14, lr: 7.14e-03 +2024-08-06 13:06:47,027 INFO [trainer.py:765] (7/8) Epoch 17, batch 500, train_loss[loss=2.792, ArTop10Accuracy=0.7735, over 12228.00 frames. ], tot_loss[loss=2.725, ArTop10Accuracy=0.7845, over 10862.57 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 13:07:39,886 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.140e+02 1.293e+02 1.386e+02 1.488e+02 3.253e+02, threshold=2.772e+02, percent-clipped=0.1 +2024-08-06 13:08:22,694 INFO [trainer.py:765] (7/8) Epoch 17, batch 600, train_loss[loss=2.656, ArTop10Accuracy=0.8009, over 11367.00 frames. ], tot_loss[loss=2.726, ArTop10Accuracy=0.7845, over 11383.18 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 13:09:54,842 INFO [trainer.py:765] (7/8) Epoch 17, batch 700, train_loss[loss=2.583, ArTop10Accuracy=0.8144, over 9984.00 frames. ], tot_loss[loss=2.732, ArTop10Accuracy=0.7831, over 11520.02 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 13:11:19,487 INFO [trainer.py:765] (7/8) Epoch 17, batch 800, train_loss[loss=2.614, ArTop10Accuracy=0.8099, over 9345.00 frames. ], tot_loss[loss=2.736, ArTop10Accuracy=0.7823, over 11651.14 frames. ], batch size: 11, lr: 7.07e-03 +2024-08-06 13:12:35,676 INFO [trainer.py:765] (7/8) Epoch 17, batch 900, train_loss[loss=2.75, ArTop10Accuracy=0.7803, over 12879.00 frames. ], tot_loss[loss=2.736, ArTop10Accuracy=0.7824, over 11692.53 frames. ], batch size: 27, lr: 7.06e-03 +2024-08-06 13:13:53,068 INFO [trainer.py:765] (7/8) Epoch 17, batch 1000, train_loss[loss=2.727, ArTop10Accuracy=0.7848, over 12855.00 frames. ], tot_loss[loss=2.744, ArTop10Accuracy=0.7809, over 11882.04 frames. ], batch size: 27, lr: 7.04e-03 +2024-08-06 13:15:08,492 INFO [trainer.py:765] (7/8) Epoch 17, batch 1100, train_loss[loss=2.74, ArTop10Accuracy=0.7812, over 14052.00 frames. ], tot_loss[loss=2.747, ArTop10Accuracy=0.7804, over 11950.75 frames. ], batch size: 35, lr: 7.02e-03 +2024-08-06 13:16:22,394 INFO [trainer.py:765] (7/8) Epoch 17, batch 1200, train_loss[loss=2.863, ArTop10Accuracy=0.7594, over 12015.00 frames. ], tot_loss[loss=2.747, ArTop10Accuracy=0.7804, over 11855.95 frames. ], batch size: 101, lr: 7.01e-03 +2024-08-06 13:17:21,213 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 13:19:16,001 INFO [trainer.py:765] (7/8) Epoch 18, batch 100, train_loss[loss=2.784, ArTop10Accuracy=0.7674, over 14079.00 frames. ], tot_loss[loss=2.724, ArTop10Accuracy=0.7844, over 4751.48 frames. ], batch size: 62, lr: 6.78e-03 +2024-08-06 13:20:46,608 INFO [trainer.py:765] (7/8) Epoch 18, batch 200, train_loss[loss=2.649, ArTop10Accuracy=0.7999, over 13605.00 frames. ], tot_loss[loss=2.722, ArTop10Accuracy=0.7849, over 7735.17 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 13:21:55,110 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 13:22:04,751 INFO [trainer.py:811] (7/8) Epoch 18, validation: loss=2.817, ArTop10Accuracy=0.768, over 1827537.00 frames. +2024-08-06 13:22:04,752 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33001MB +2024-08-06 13:22:05,480 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.131e+02 1.323e+02 1.409e+02 1.514e+02 3.209e+02, threshold=2.818e+02, percent-clipped=0.1 +2024-08-06 13:22:26,587 INFO [trainer.py:765] (7/8) Epoch 18, batch 300, train_loss[loss=2.752, ArTop10Accuracy=0.7807, over 14289.00 frames. ], tot_loss[loss=2.716, ArTop10Accuracy=0.7863, over 9361.90 frames. ], batch size: 44, lr: 6.76e-03 +2024-08-06 13:23:57,938 INFO [trainer.py:765] (7/8) Epoch 18, batch 400, train_loss[loss=2.653, ArTop10Accuracy=0.7984, over 10791.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.7863, over 10296.98 frames. ], batch size: 15, lr: 6.74e-03 +2024-08-06 13:25:34,019 INFO [trainer.py:765] (7/8) Epoch 18, batch 500, train_loss[loss=2.708, ArTop10Accuracy=0.7846, over 12288.00 frames. ], tot_loss[loss=2.713, ArTop10Accuracy=0.7865, over 10862.32 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 13:27:00,640 INFO [trainer.py:765] (7/8) Epoch 18, batch 600, train_loss[loss=2.674, ArTop10Accuracy=0.7969, over 11604.00 frames. ], tot_loss[loss=2.719, ArTop10Accuracy=0.7857, over 11368.16 frames. ], batch size: 18, lr: 6.71e-03 +2024-08-06 13:28:33,590 INFO [trainer.py:765] (7/8) Epoch 18, batch 700, train_loss[loss=2.703, ArTop10Accuracy=0.7906, over 10236.00 frames. ], tot_loss[loss=2.724, ArTop10Accuracy=0.7847, over 11505.06 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 13:29:54,993 INFO [trainer.py:765] (7/8) Epoch 18, batch 800, train_loss[loss=2.754, ArTop10Accuracy=0.7758, over 10239.00 frames. ], tot_loss[loss=2.725, ArTop10Accuracy=0.7845, over 11629.35 frames. ], batch size: 12, lr: 6.68e-03 +2024-08-06 13:31:12,525 INFO [trainer.py:765] (7/8) Epoch 18, batch 900, train_loss[loss=2.723, ArTop10Accuracy=0.7859, over 12897.00 frames. ], tot_loss[loss=2.721, ArTop10Accuracy=0.7852, over 11685.87 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 13:32:26,558 INFO [trainer.py:765] (7/8) Epoch 18, batch 1000, train_loss[loss=2.771, ArTop10Accuracy=0.7752, over 12876.00 frames. ], tot_loss[loss=2.727, ArTop10Accuracy=0.784, over 11871.23 frames. ], batch size: 27, lr: 6.66e-03 +2024-08-06 13:33:41,504 INFO [trainer.py:765] (7/8) Epoch 18, batch 1100, train_loss[loss=2.757, ArTop10Accuracy=0.7748, over 13779.00 frames. ], tot_loss[loss=2.734, ArTop10Accuracy=0.7827, over 11937.42 frames. ], batch size: 34, lr: 6.64e-03 +2024-08-06 13:34:54,682 INFO [trainer.py:765] (7/8) Epoch 18, batch 1200, train_loss[loss=2.863, ArTop10Accuracy=0.7601, over 12321.00 frames. ], tot_loss[loss=2.734, ArTop10Accuracy=0.7828, over 11837.69 frames. ], batch size: 101, lr: 6.63e-03 +2024-08-06 13:35:51,070 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.124e+02 1.340e+02 1.433e+02 1.533e+02 2.444e+02, threshold=2.867e+02, percent-clipped=0.0 +2024-08-06 13:35:54,176 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 13:37:48,630 INFO [trainer.py:765] (7/8) Epoch 19, batch 100, train_loss[loss=2.798, ArTop10Accuracy=0.7683, over 14376.00 frames. ], tot_loss[loss=2.723, ArTop10Accuracy=0.7846, over 4770.95 frames. ], batch size: 62, lr: 6.43e-03 +2024-08-06 13:39:23,263 INFO [trainer.py:765] (7/8) Epoch 19, batch 200, train_loss[loss=2.726, ArTop10Accuracy=0.7834, over 13749.00 frames. ], tot_loss[loss=2.721, ArTop10Accuracy=0.7848, over 7745.36 frames. ], batch size: 34, lr: 6.41e-03 +2024-08-06 13:40:48,366 INFO [trainer.py:765] (7/8) Epoch 19, batch 300, train_loss[loss=2.731, ArTop10Accuracy=0.7845, over 14517.00 frames. ], tot_loss[loss=2.713, ArTop10Accuracy=0.7862, over 9373.26 frames. ], batch size: 44, lr: 6.40e-03 +2024-08-06 13:42:21,074 INFO [trainer.py:765] (7/8) Epoch 19, batch 400, train_loss[loss=2.731, ArTop10Accuracy=0.7861, over 10323.00 frames. ], tot_loss[loss=2.705, ArTop10Accuracy=0.7879, over 10293.26 frames. ], batch size: 14, lr: 6.39e-03 +2024-08-06 13:43:44,961 INFO [trainer.py:765] (7/8) Epoch 19, batch 500, train_loss[loss=2.731, ArTop10Accuracy=0.7784, over 12534.00 frames. ], tot_loss[loss=2.7, ArTop10Accuracy=0.789, over 10849.95 frames. ], batch size: 23, lr: 6.37e-03 +2024-08-06 13:45:16,688 INFO [trainer.py:765] (7/8) Epoch 19, batch 600, train_loss[loss=2.637, ArTop10Accuracy=0.8015, over 11577.00 frames. ], tot_loss[loss=2.703, ArTop10Accuracy=0.7885, over 11382.15 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 13:46:48,330 INFO [trainer.py:765] (7/8) Epoch 19, batch 700, train_loss[loss=2.646, ArTop10Accuracy=0.7997, over 10125.00 frames. ], tot_loss[loss=2.71, ArTop10Accuracy=0.787, over 11524.94 frames. ], batch size: 12, lr: 6.35e-03 +2024-08-06 13:48:11,890 INFO [trainer.py:765] (7/8) Epoch 19, batch 800, train_loss[loss=2.537, ArTop10Accuracy=0.8126, over 10260.00 frames. ], tot_loss[loss=2.715, ArTop10Accuracy=0.786, over 11639.76 frames. ], batch size: 12, lr: 6.34e-03 +2024-08-06 13:49:27,268 INFO [trainer.py:765] (7/8) Epoch 19, batch 900, train_loss[loss=2.684, ArTop10Accuracy=0.7954, over 13089.00 frames. ], tot_loss[loss=2.71, ArTop10Accuracy=0.7871, over 11707.01 frames. ], batch size: 27, lr: 6.32e-03 +2024-08-06 13:50:40,660 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 13:50:50,536 INFO [trainer.py:811] (7/8) Epoch 19, validation: loss=2.818, ArTop10Accuracy=0.7679, over 1827537.00 frames. +2024-08-06 13:50:50,537 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33001MB +2024-08-06 13:50:51,497 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.161e+02 1.371e+02 1.455e+02 1.550e+02 3.697e+02, threshold=2.909e+02, percent-clipped=0.2 +2024-08-06 13:50:52,921 INFO [trainer.py:765] (7/8) Epoch 19, batch 1000, train_loss[loss=2.774, ArTop10Accuracy=0.773, over 12831.00 frames. ], tot_loss[loss=2.716, ArTop10Accuracy=0.7858, over 11889.69 frames. ], batch size: 27, lr: 6.31e-03 +2024-08-06 13:52:08,274 INFO [trainer.py:765] (7/8) Epoch 19, batch 1100, train_loss[loss=2.682, ArTop10Accuracy=0.792, over 13770.00 frames. ], tot_loss[loss=2.725, ArTop10Accuracy=0.784, over 11949.85 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 13:53:22,320 INFO [trainer.py:765] (7/8) Epoch 19, batch 1200, train_loss[loss=2.872, ArTop10Accuracy=0.7564, over 12657.00 frames. ], tot_loss[loss=2.725, ArTop10Accuracy=0.7842, over 11853.51 frames. ], batch size: 103, lr: 6.28e-03 +2024-08-06 13:54:21,906 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 13:56:12,912 INFO [trainer.py:765] (7/8) Epoch 20, batch 100, train_loss[loss=2.794, ArTop10Accuracy=0.7677, over 14673.00 frames. ], tot_loss[loss=2.706, ArTop10Accuracy=0.7875, over 4740.81 frames. ], batch size: 62, lr: 6.10e-03 +2024-08-06 13:57:42,501 INFO [trainer.py:765] (7/8) Epoch 20, batch 200, train_loss[loss=2.691, ArTop10Accuracy=0.7936, over 13551.00 frames. ], tot_loss[loss=2.704, ArTop10Accuracy=0.788, over 7749.97 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 13:59:15,436 INFO [trainer.py:765] (7/8) Epoch 20, batch 300, train_loss[loss=2.764, ArTop10Accuracy=0.7777, over 13926.00 frames. ], tot_loss[loss=2.696, ArTop10Accuracy=0.7898, over 9400.91 frames. ], batch size: 44, lr: 6.08e-03 +2024-08-06 14:00:44,362 INFO [trainer.py:765] (7/8) Epoch 20, batch 400, train_loss[loss=2.598, ArTop10Accuracy=0.8109, over 10725.00 frames. ], tot_loss[loss=2.696, ArTop10Accuracy=0.7898, over 10319.38 frames. ], batch size: 15, lr: 6.07e-03 +2024-08-06 14:02:14,860 INFO [trainer.py:765] (7/8) Epoch 20, batch 500, train_loss[loss=2.713, ArTop10Accuracy=0.7871, over 12249.00 frames. ], tot_loss[loss=2.692, ArTop10Accuracy=0.7905, over 10864.98 frames. ], batch size: 22, lr: 6.06e-03 +2024-08-06 14:03:40,862 INFO [trainer.py:765] (7/8) Epoch 20, batch 600, train_loss[loss=2.639, ArTop10Accuracy=0.8041, over 11127.00 frames. ], tot_loss[loss=2.694, ArTop10Accuracy=0.79, over 11364.86 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 14:05:13,870 INFO [trainer.py:765] (7/8) Epoch 20, batch 700, train_loss[loss=2.727, ArTop10Accuracy=0.7852, over 10017.00 frames. ], tot_loss[loss=2.701, ArTop10Accuracy=0.7889, over 11514.99 frames. ], batch size: 12, lr: 6.03e-03 +2024-08-06 14:05:30,798 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.180e+02 1.365e+02 1.456e+02 1.550e+02 3.525e+02, threshold=2.913e+02, percent-clipped=0.1 +2024-08-06 14:06:34,515 INFO [trainer.py:765] (7/8) Epoch 20, batch 800, train_loss[loss=2.786, ArTop10Accuracy=0.7703, over 10329.00 frames. ], tot_loss[loss=2.703, ArTop10Accuracy=0.7884, over 11642.78 frames. ], batch size: 12, lr: 6.02e-03 +2024-08-06 14:07:50,950 INFO [trainer.py:765] (7/8) Epoch 20, batch 900, train_loss[loss=2.812, ArTop10Accuracy=0.7704, over 12780.00 frames. ], tot_loss[loss=2.703, ArTop10Accuracy=0.7884, over 11670.27 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 14:09:07,180 INFO [trainer.py:765] (7/8) Epoch 20, batch 1000, train_loss[loss=2.698, ArTop10Accuracy=0.7897, over 12972.00 frames. ], tot_loss[loss=2.71, ArTop10Accuracy=0.7871, over 11868.65 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 14:10:21,216 INFO [trainer.py:765] (7/8) Epoch 20, batch 1100, train_loss[loss=2.704, ArTop10Accuracy=0.7918, over 13494.00 frames. ], tot_loss[loss=2.717, ArTop10Accuracy=0.7857, over 11953.96 frames. ], batch size: 34, lr: 5.99e-03 +2024-08-06 14:11:37,819 INFO [trainer.py:765] (7/8) Epoch 20, batch 1200, train_loss[loss=2.794, ArTop10Accuracy=0.7705, over 11277.00 frames. ], tot_loss[loss=2.72, ArTop10Accuracy=0.7853, over 11847.41 frames. ], batch size: 101, lr: 5.98e-03 +2024-08-06 14:12:37,438 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 14:12:37,442 INFO [trainer.py:1069] (7/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-14-23-41-0 b/libritts-r/log/log-train-2024-08-06-14-23-41-0 new file mode 100644 index 0000000000000000000000000000000000000000..2978840b8f7720f350f271050e613925d935ff53 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-14-23-41-0 @@ -0,0 +1,1301 @@ +2024-08-06 14:23:41,767 INFO [trainer.py:870] (0/8) Training started +2024-08-06 14:23:41,773 INFO [trainer.py:889] (0/8) Device: cuda:0 +2024-08-06 14:23:41,773 INFO [trainer.py:890] (0/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 100000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 14:23:41,773 INFO [trainer.py:892] (0/8) About to create model +2024-08-06 14:23:42,559 INFO [trainer.py:899] (0/8) Number of model parameters: 367386628 +2024-08-06 14:23:42,559 INFO [checkpoint.py:112] (0/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 14:23:47,526 INFO [trainer.py:914] (0/8) Using DDP +2024-08-06 14:23:49,643 INFO [datamodule.py:427] (0/8) About to get train cuts +2024-08-06 14:23:49,644 INFO [datamodule.py:434] (0/8) About to get dev cuts +2024-08-06 14:23:49,646 INFO [datamodule.py:292] (0/8) Disable SpecAugment +2024-08-06 14:23:49,646 INFO [datamodule.py:294] (0/8) About to create train dataset +2024-08-06 14:23:49,646 INFO [datamodule.py:323] (0/8) Using DynamicBucketingSampler +2024-08-06 14:23:50,267 INFO [datamodule.py:344] (0/8) About to create train dataloader +2024-08-06 14:23:50,267 INFO [datamodule.py:367] (0/8) About to create dev dataset +2024-08-06 14:23:50,599 INFO [datamodule.py:388] (0/8) About to create dev dataloader +2024-08-06 14:24:38,248 INFO [trainer.py:765] (0/8) Epoch 1, batch 100, train_loss[loss=105, NarTop10Accuracy=0.02097, over 7647.00 frames. ], tot_loss[loss=73.82, NarTop10Accuracy=0.04726, over 2372.89 frames. ], batch size: 32, lr: 2.25e-02 +2024-08-06 14:25:07,518 INFO [trainer.py:765] (0/8) Epoch 1, batch 200, train_loss[loss=142.8, NarTop10Accuracy=0.01102, over 6753.00 frames. ], tot_loss[loss=97.51, NarTop10Accuracy=0.0428, over 3846.60 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 14:25:37,110 INFO [trainer.py:765] (0/8) Epoch 1, batch 300, train_loss[loss=106.3, NarTop10Accuracy=0.01929, over 6993.00 frames. ], tot_loss[loss=85.06, NarTop10Accuracy=0.04279, over 4664.29 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 14:26:07,483 INFO [trainer.py:765] (0/8) Epoch 1, batch 400, train_loss[loss=51.43, NarTop10Accuracy=0.01936, over 5139.00 frames. ], tot_loss[loss=67.66, NarTop10Accuracy=0.0466, over 5130.21 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 14:26:35,358 INFO [trainer.py:765] (0/8) Epoch 1, batch 500, train_loss[loss=14.94, NarTop10Accuracy=0.0248, over 6060.00 frames. ], tot_loss[loss=49, NarTop10Accuracy=0.04913, over 5394.70 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 14:27:04,001 INFO [trainer.py:765] (0/8) Epoch 1, batch 600, train_loss[loss=6.167, NarTop10Accuracy=0.1951, over 5670.00 frames. ], tot_loss[loss=33.42, NarTop10Accuracy=0.0545, over 5662.20 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 14:27:39,491 INFO [trainer.py:765] (0/8) Epoch 1, batch 700, train_loss[loss=6.837, NarTop10Accuracy=0.09458, over 5040.00 frames. ], tot_loss[loss=23.4, NarTop10Accuracy=0.06406, over 5731.17 frames. ], batch size: 6, lr: 2.99e-02 +2024-08-06 14:28:08,833 INFO [trainer.py:765] (0/8) Epoch 1, batch 800, train_loss[loss=6.401, NarTop10Accuracy=0.1431, over 5061.00 frames. ], tot_loss[loss=17.15, NarTop10Accuracy=0.08487, over 5790.72 frames. ], batch size: 6, lr: 2.98e-02 +2024-08-06 14:28:36,759 INFO [trainer.py:765] (0/8) Epoch 1, batch 900, train_loss[loss=5.758, NarTop10Accuracy=0.1671, over 6312.00 frames. ], tot_loss[loss=12.78, NarTop10Accuracy=0.1136, over 5810.92 frames. ], batch size: 13, lr: 2.98e-02 +2024-08-06 14:29:12,588 INFO [trainer.py:765] (0/8) Epoch 1, batch 1000, train_loss[loss=5.851, NarTop10Accuracy=0.1673, over 6138.00 frames. ], tot_loss[loss=10.09, NarTop10Accuracy=0.1359, over 5910.81 frames. ], batch size: 13, lr: 2.97e-02 +2024-08-06 14:29:42,826 INFO [trainer.py:765] (0/8) Epoch 1, batch 1100, train_loss[loss=5.707, NarTop10Accuracy=0.1984, over 6663.00 frames. ], tot_loss[loss=8.416, NarTop10Accuracy=0.1532, over 5940.25 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 14:30:11,470 INFO [trainer.py:765] (0/8) Epoch 1, batch 1200, train_loss[loss=5.887, NarTop10Accuracy=0.1764, over 7245.00 frames. ], tot_loss[loss=7.35, NarTop10Accuracy=0.1708, over 5937.84 frames. ], batch size: 31, lr: 2.96e-02 +2024-08-06 14:30:48,752 INFO [trainer.py:765] (0/8) Epoch 1, batch 1300, train_loss[loss=5.249, NarTop10Accuracy=0.2838, over 5001.00 frames. ], tot_loss[loss=6.686, NarTop10Accuracy=0.1861, over 5990.56 frames. ], batch size: 6, lr: 2.95e-02 +2024-08-06 14:31:18,145 INFO [trainer.py:765] (0/8) Epoch 1, batch 1400, train_loss[loss=5.694, NarTop10Accuracy=0.191, over 6123.00 frames. ], tot_loss[loss=6.256, NarTop10Accuracy=0.1972, over 6011.67 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 14:31:46,027 INFO [trainer.py:765] (0/8) Epoch 1, batch 1500, train_loss[loss=5.649, NarTop10Accuracy=0.206, over 6378.00 frames. ], tot_loss[loss=5.971, NarTop10Accuracy=0.2093, over 5947.78 frames. ], batch size: 51, lr: 2.94e-02 +2024-08-06 14:32:13,693 INFO [trainer.py:765] (0/8) Epoch 1, batch 1600, train_loss[loss=5.52, NarTop10Accuracy=0.226, over 7074.00 frames. ], tot_loss[loss=5.79, NarTop10Accuracy=0.2179, over 5913.02 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 14:32:40,199 INFO [trainer.py:765] (0/8) Epoch 1, batch 1700, train_loss[loss=5.463, NarTop10Accuracy=0.2397, over 6135.00 frames. ], tot_loss[loss=5.669, NarTop10Accuracy=0.2252, over 5903.26 frames. ], batch size: 13, lr: 2.92e-02 +2024-08-06 14:33:06,500 INFO [trainer.py:765] (0/8) Epoch 1, batch 1800, train_loss[loss=5.624, NarTop10Accuracy=0.1997, over 7122.00 frames. ], tot_loss[loss=5.588, NarTop10Accuracy=0.2304, over 5955.21 frames. ], batch size: 23, lr: 2.91e-02 +2024-08-06 14:33:32,626 INFO [trainer.py:765] (0/8) Epoch 1, batch 1900, train_loss[loss=5.713, NarTop10Accuracy=0.1862, over 6579.00 frames. ], tot_loss[loss=5.513, NarTop10Accuracy=0.2396, over 6011.46 frames. ], batch size: 51, lr: 2.90e-02 +2024-08-06 14:33:58,015 INFO [trainer.py:765] (0/8) Epoch 1, batch 2000, train_loss[loss=5.458, NarTop10Accuracy=0.2503, over 5856.00 frames. ], tot_loss[loss=5.446, NarTop10Accuracy=0.2494, over 6003.31 frames. ], batch size: 51, lr: 2.89e-02 +2024-08-06 14:33:58,017 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 14:34:06,103 INFO [trainer.py:811] (0/8) Epoch 1, validation: loss=5.397, NarTop10Accuracy=0.2581, over 1905321.00 frames. +2024-08-06 14:34:06,104 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 26917MB +2024-08-06 14:34:06,612 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 4.749e+01 2.278e+02 7.300e+02 1.664e+04 7.177e+05, threshold=1.460e+03, percent-clipped=0.0 +2024-08-06 14:34:32,061 INFO [trainer.py:765] (0/8) Epoch 1, batch 2100, train_loss[loss=5.12, NarTop10Accuracy=0.3081, over 4809.00 frames. ], tot_loss[loss=5.388, NarTop10Accuracy=0.2592, over 5970.78 frames. ], batch size: 5, lr: 2.88e-02 +2024-08-06 14:34:57,303 INFO [trainer.py:765] (0/8) Epoch 1, batch 2200, train_loss[loss=5.504, NarTop10Accuracy=0.2395, over 7437.00 frames. ], tot_loss[loss=5.36, NarTop10Accuracy=0.2634, over 6011.58 frames. ], batch size: 33, lr: 2.87e-02 +2024-08-06 14:35:22,455 INFO [trainer.py:765] (0/8) Epoch 1, batch 2300, train_loss[loss=5.201, NarTop10Accuracy=0.2931, over 5613.00 frames. ], tot_loss[loss=5.339, NarTop10Accuracy=0.2667, over 6023.24 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 14:35:46,815 INFO [trainer.py:765] (0/8) Epoch 1, batch 2400, train_loss[loss=5.374, NarTop10Accuracy=0.2526, over 5205.00 frames. ], tot_loss[loss=5.282, NarTop10Accuracy=0.2775, over 5777.18 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 14:36:10,408 INFO [trainer.py:765] (0/8) Epoch 1, batch 2500, train_loss[loss=5.143, NarTop10Accuracy=0.3044, over 5031.00 frames. ], tot_loss[loss=5.217, NarTop10Accuracy=0.2889, over 5500.73 frames. ], batch size: 7, lr: 2.84e-02 +2024-08-06 14:36:31,000 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 14:36:31,003 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-1.pt +2024-08-06 14:37:29,671 INFO [trainer.py:765] (0/8) Epoch 2, batch 100, train_loss[loss=4.991, NarTop10Accuracy=0.3392, over 7191.00 frames. ], tot_loss[loss=5.184, NarTop10Accuracy=0.2963, over 2375.63 frames. ], batch size: 31, lr: 2.77e-02 +2024-08-06 14:38:10,016 INFO [trainer.py:765] (0/8) Epoch 2, batch 200, train_loss[loss=5.163, NarTop10Accuracy=0.3069, over 6723.00 frames. ], tot_loss[loss=5.16, NarTop10Accuracy=0.2996, over 3850.00 frames. ], batch size: 17, lr: 2.76e-02 +2024-08-06 14:38:38,298 INFO [trainer.py:765] (0/8) Epoch 2, batch 300, train_loss[loss=5.1, NarTop10Accuracy=0.3142, over 6855.00 frames. ], tot_loss[loss=5.131, NarTop10Accuracy=0.3048, over 4655.37 frames. ], batch size: 22, lr: 2.75e-02 +2024-08-06 14:39:07,000 INFO [trainer.py:765] (0/8) Epoch 2, batch 400, train_loss[loss=4.828, NarTop10Accuracy=0.3566, over 5016.00 frames. ], tot_loss[loss=5.113, NarTop10Accuracy=0.3074, over 5119.47 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 14:39:46,120 INFO [trainer.py:765] (0/8) Epoch 2, batch 500, train_loss[loss=4.949, NarTop10Accuracy=0.3396, over 6144.00 frames. ], tot_loss[loss=5.07, NarTop10Accuracy=0.316, over 5389.35 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 14:40:15,084 INFO [trainer.py:765] (0/8) Epoch 2, batch 600, train_loss[loss=4.951, NarTop10Accuracy=0.3419, over 5760.00 frames. ], tot_loss[loss=5.046, NarTop10Accuracy=0.3206, over 5665.67 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 14:40:44,592 INFO [trainer.py:765] (0/8) Epoch 2, batch 700, train_loss[loss=5.12, NarTop10Accuracy=0.3019, over 5169.00 frames. ], tot_loss[loss=5.033, NarTop10Accuracy=0.3227, over 5715.02 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 14:41:24,516 INFO [trainer.py:765] (0/8) Epoch 2, batch 800, train_loss[loss=4.982, NarTop10Accuracy=0.3311, over 4239.00 frames. ], tot_loss[loss=5.019, NarTop10Accuracy=0.3247, over 5768.80 frames. ], batch size: 5, lr: 2.69e-02 +2024-08-06 14:41:54,406 INFO [trainer.py:765] (0/8) Epoch 2, batch 900, train_loss[loss=4.866, NarTop10Accuracy=0.3503, over 6240.00 frames. ], tot_loss[loss=4.981, NarTop10Accuracy=0.3323, over 5789.80 frames. ], batch size: 13, lr: 2.68e-02 +2024-08-06 14:42:23,903 INFO [trainer.py:765] (0/8) Epoch 2, batch 1000, train_loss[loss=4.773, NarTop10Accuracy=0.3753, over 6576.00 frames. ], tot_loss[loss=4.949, NarTop10Accuracy=0.3387, over 5883.22 frames. ], batch size: 14, lr: 2.66e-02 +2024-08-06 14:42:56,256 INFO [trainer.py:765] (0/8) Epoch 2, batch 1100, train_loss[loss=4.971, NarTop10Accuracy=0.3313, over 6870.00 frames. ], tot_loss[loss=4.929, NarTop10Accuracy=0.3423, over 5921.81 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 14:43:35,189 INFO [trainer.py:765] (0/8) Epoch 2, batch 1200, train_loss[loss=4.731, NarTop10Accuracy=0.3814, over 7320.00 frames. ], tot_loss[loss=4.91, NarTop10Accuracy=0.3457, over 5918.38 frames. ], batch size: 31, lr: 2.64e-02 +2024-08-06 14:44:04,347 INFO [trainer.py:765] (0/8) Epoch 2, batch 1300, train_loss[loss=4.799, NarTop10Accuracy=0.369, over 5079.00 frames. ], tot_loss[loss=4.866, NarTop10Accuracy=0.3539, over 5986.90 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 14:44:33,728 INFO [trainer.py:765] (0/8) Epoch 2, batch 1400, train_loss[loss=4.98, NarTop10Accuracy=0.3216, over 6153.00 frames. ], tot_loss[loss=4.844, NarTop10Accuracy=0.3582, over 6020.35 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 14:44:40,444 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 14:44:48,506 INFO [trainer.py:811] (0/8) Epoch 2, validation: loss=4.808, NarTop10Accuracy=0.3642, over 1905321.00 frames. +2024-08-06 14:44:48,506 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 27188MB +2024-08-06 14:44:49,204 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 6.328e+01 1.178e+02 1.410e+02 1.789e+02 6.269e+02, threshold=2.821e+02, percent-clipped=0.0 +2024-08-06 14:45:09,806 INFO [trainer.py:765] (0/8) Epoch 2, batch 1500, train_loss[loss=4.809, NarTop10Accuracy=0.3668, over 6102.00 frames. ], tot_loss[loss=4.824, NarTop10Accuracy=0.3621, over 5958.65 frames. ], batch size: 51, lr: 2.60e-02 +2024-08-06 14:45:37,659 INFO [trainer.py:765] (0/8) Epoch 2, batch 1600, train_loss[loss=4.768, NarTop10Accuracy=0.3801, over 7035.00 frames. ], tot_loss[loss=4.804, NarTop10Accuracy=0.3657, over 5933.77 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 14:46:04,368 INFO [trainer.py:765] (0/8) Epoch 2, batch 1700, train_loss[loss=4.818, NarTop10Accuracy=0.3555, over 6576.00 frames. ], tot_loss[loss=4.792, NarTop10Accuracy=0.368, over 5925.42 frames. ], batch size: 14, lr: 2.58e-02 +2024-08-06 14:46:31,034 INFO [trainer.py:765] (0/8) Epoch 2, batch 1800, train_loss[loss=4.731, NarTop10Accuracy=0.3781, over 6921.00 frames. ], tot_loss[loss=4.772, NarTop10Accuracy=0.372, over 5993.33 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 14:46:57,532 INFO [trainer.py:765] (0/8) Epoch 2, batch 1900, train_loss[loss=4.704, NarTop10Accuracy=0.3845, over 5661.00 frames. ], tot_loss[loss=4.756, NarTop10Accuracy=0.3749, over 6019.38 frames. ], batch size: 50, lr: 2.55e-02 +2024-08-06 14:47:23,234 INFO [trainer.py:765] (0/8) Epoch 2, batch 2000, train_loss[loss=4.906, NarTop10Accuracy=0.3549, over 6351.00 frames. ], tot_loss[loss=4.73, NarTop10Accuracy=0.3797, over 5991.27 frames. ], batch size: 53, lr: 2.54e-02 +2024-08-06 14:47:48,589 INFO [trainer.py:765] (0/8) Epoch 2, batch 2100, train_loss[loss=4.848, NarTop10Accuracy=0.3518, over 3993.00 frames. ], tot_loss[loss=4.719, NarTop10Accuracy=0.3816, over 5979.39 frames. ], batch size: 4, lr: 2.53e-02 +2024-08-06 14:48:13,765 INFO [trainer.py:765] (0/8) Epoch 2, batch 2200, train_loss[loss=4.635, NarTop10Accuracy=0.3985, over 7368.00 frames. ], tot_loss[loss=4.681, NarTop10Accuracy=0.389, over 6007.30 frames. ], batch size: 31, lr: 2.51e-02 +2024-08-06 14:48:38,951 INFO [trainer.py:765] (0/8) Epoch 2, batch 2300, train_loss[loss=4.958, NarTop10Accuracy=0.326, over 5823.00 frames. ], tot_loss[loss=4.687, NarTop10Accuracy=0.3878, over 6021.27 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 14:49:03,320 INFO [trainer.py:765] (0/8) Epoch 2, batch 2400, train_loss[loss=4.474, NarTop10Accuracy=0.4173, over 5052.00 frames. ], tot_loss[loss=4.647, NarTop10Accuracy=0.3955, over 5773.42 frames. ], batch size: 7, lr: 2.49e-02 +2024-08-06 14:49:26,867 INFO [trainer.py:765] (0/8) Epoch 2, batch 2500, train_loss[loss=4.64, NarTop10Accuracy=0.3929, over 5148.00 frames. ], tot_loss[loss=4.611, NarTop10Accuracy=0.4024, over 5463.74 frames. ], batch size: 7, lr: 2.48e-02 +2024-08-06 14:49:46,775 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 14:49:46,779 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-2.pt +2024-08-06 14:50:51,117 INFO [trainer.py:765] (0/8) Epoch 3, batch 100, train_loss[loss=4.81, NarTop10Accuracy=0.3602, over 7140.00 frames. ], tot_loss[loss=4.586, NarTop10Accuracy=0.4079, over 2365.43 frames. ], batch size: 31, lr: 2.36e-02 +2024-08-06 14:51:20,388 INFO [trainer.py:765] (0/8) Epoch 3, batch 200, train_loss[loss=4.805, NarTop10Accuracy=0.357, over 6777.00 frames. ], tot_loss[loss=4.547, NarTop10Accuracy=0.4157, over 3852.19 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 14:51:50,954 INFO [trainer.py:765] (0/8) Epoch 3, batch 300, train_loss[loss=4.713, NarTop10Accuracy=0.3716, over 7197.00 frames. ], tot_loss[loss=4.516, NarTop10Accuracy=0.4214, over 4636.61 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 14:52:32,359 INFO [trainer.py:765] (0/8) Epoch 3, batch 400, train_loss[loss=4.327, NarTop10Accuracy=0.462, over 5748.00 frames. ], tot_loss[loss=4.499, NarTop10Accuracy=0.4246, over 5104.02 frames. ], batch size: 8, lr: 2.32e-02 +2024-08-06 14:53:00,680 INFO [trainer.py:765] (0/8) Epoch 3, batch 500, train_loss[loss=4.365, NarTop10Accuracy=0.4534, over 6012.00 frames. ], tot_loss[loss=4.485, NarTop10Accuracy=0.4271, over 5391.19 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 14:53:29,551 INFO [trainer.py:765] (0/8) Epoch 3, batch 600, train_loss[loss=4.103, NarTop10Accuracy=0.507, over 5727.00 frames. ], tot_loss[loss=4.472, NarTop10Accuracy=0.43, over 5655.49 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 14:54:12,466 INFO [trainer.py:765] (0/8) Epoch 3, batch 700, train_loss[loss=4.268, NarTop10Accuracy=0.4737, over 5190.00 frames. ], tot_loss[loss=4.449, NarTop10Accuracy=0.4346, over 5749.35 frames. ], batch size: 6, lr: 2.29e-02 +2024-08-06 14:54:44,785 INFO [trainer.py:765] (0/8) Epoch 3, batch 800, train_loss[loss=4.224, NarTop10Accuracy=0.4837, over 5067.00 frames. ], tot_loss[loss=4.417, NarTop10Accuracy=0.4408, over 5814.17 frames. ], batch size: 6, lr: 2.28e-02 +2024-08-06 14:54:58,684 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 14:55:06,655 INFO [trainer.py:811] (0/8) Epoch 3, validation: loss=4.276, NarTop10Accuracy=0.4689, over 1905321.00 frames. +2024-08-06 14:55:06,656 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 27314MB +2024-08-06 14:55:07,183 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 8.443e+01 1.396e+02 1.639e+02 2.017e+02 7.124e+02, threshold=3.277e+02, percent-clipped=4.5 +2024-08-06 14:55:21,052 INFO [trainer.py:765] (0/8) Epoch 3, batch 900, train_loss[loss=4.033, NarTop10Accuracy=0.5184, over 6273.00 frames. ], tot_loss[loss=4.39, NarTop10Accuracy=0.4463, over 5836.51 frames. ], batch size: 13, lr: 2.26e-02 +2024-08-06 14:56:04,958 INFO [trainer.py:765] (0/8) Epoch 3, batch 1000, train_loss[loss=4.229, NarTop10Accuracy=0.4771, over 6246.00 frames. ], tot_loss[loss=4.373, NarTop10Accuracy=0.4494, over 5922.26 frames. ], batch size: 13, lr: 2.25e-02 +2024-08-06 14:56:37,301 INFO [trainer.py:765] (0/8) Epoch 3, batch 1100, train_loss[loss=4.378, NarTop10Accuracy=0.4348, over 6687.00 frames. ], tot_loss[loss=4.344, NarTop10Accuracy=0.4548, over 5939.32 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 14:57:06,378 INFO [trainer.py:765] (0/8) Epoch 3, batch 1200, train_loss[loss=4.296, NarTop10Accuracy=0.4554, over 7257.00 frames. ], tot_loss[loss=4.325, NarTop10Accuracy=0.4585, over 5932.72 frames. ], batch size: 31, lr: 2.23e-02 +2024-08-06 14:57:51,631 INFO [trainer.py:765] (0/8) Epoch 3, batch 1300, train_loss[loss=4.371, NarTop10Accuracy=0.4529, over 4245.00 frames. ], tot_loss[loss=4.304, NarTop10Accuracy=0.4628, over 5964.61 frames. ], batch size: 5, lr: 2.22e-02 +2024-08-06 14:58:22,900 INFO [trainer.py:765] (0/8) Epoch 3, batch 1400, train_loss[loss=4.044, NarTop10Accuracy=0.5134, over 6153.00 frames. ], tot_loss[loss=4.294, NarTop10Accuracy=0.4645, over 6009.45 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 14:58:50,856 INFO [trainer.py:765] (0/8) Epoch 3, batch 1500, train_loss[loss=4.359, NarTop10Accuracy=0.4528, over 6273.00 frames. ], tot_loss[loss=4.276, NarTop10Accuracy=0.4683, over 5963.27 frames. ], batch size: 50, lr: 2.20e-02 +2024-08-06 14:59:18,715 INFO [trainer.py:765] (0/8) Epoch 3, batch 1600, train_loss[loss=3.953, NarTop10Accuracy=0.5381, over 7149.00 frames. ], tot_loss[loss=4.26, NarTop10Accuracy=0.4716, over 5956.55 frames. ], batch size: 22, lr: 2.19e-02 +2024-08-06 14:59:45,952 INFO [trainer.py:765] (0/8) Epoch 3, batch 1700, train_loss[loss=4.181, NarTop10Accuracy=0.4818, over 6693.00 frames. ], tot_loss[loss=4.232, NarTop10Accuracy=0.4769, over 5934.67 frames. ], batch size: 14, lr: 2.18e-02 +2024-08-06 15:00:12,498 INFO [trainer.py:765] (0/8) Epoch 3, batch 1800, train_loss[loss=3.962, NarTop10Accuracy=0.5352, over 7137.00 frames. ], tot_loss[loss=4.213, NarTop10Accuracy=0.4808, over 5982.10 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 15:00:38,949 INFO [trainer.py:765] (0/8) Epoch 3, batch 1900, train_loss[loss=4.646, NarTop10Accuracy=0.3923, over 6207.00 frames. ], tot_loss[loss=4.199, NarTop10Accuracy=0.4836, over 6025.95 frames. ], batch size: 51, lr: 2.16e-02 +2024-08-06 15:01:04,606 INFO [trainer.py:765] (0/8) Epoch 3, batch 2000, train_loss[loss=4.487, NarTop10Accuracy=0.4217, over 6312.00 frames. ], tot_loss[loss=4.163, NarTop10Accuracy=0.4908, over 5977.51 frames. ], batch size: 50, lr: 2.15e-02 +2024-08-06 15:01:29,899 INFO [trainer.py:765] (0/8) Epoch 3, batch 2100, train_loss[loss=3.958, NarTop10Accuracy=0.5327, over 3945.00 frames. ], tot_loss[loss=4.14, NarTop10Accuracy=0.4955, over 5948.52 frames. ], batch size: 4, lr: 2.14e-02 +2024-08-06 15:01:55,182 INFO [trainer.py:765] (0/8) Epoch 3, batch 2200, train_loss[loss=3.954, NarTop10Accuracy=0.5321, over 7194.00 frames. ], tot_loss[loss=4.115, NarTop10Accuracy=0.5012, over 6007.35 frames. ], batch size: 31, lr: 2.13e-02 +2024-08-06 15:02:20,410 INFO [trainer.py:765] (0/8) Epoch 3, batch 2300, train_loss[loss=4.411, NarTop10Accuracy=0.441, over 5682.00 frames. ], tot_loss[loss=4.127, NarTop10Accuracy=0.4986, over 6011.85 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 15:02:44,663 INFO [trainer.py:765] (0/8) Epoch 3, batch 2400, train_loss[loss=4.354, NarTop10Accuracy=0.4518, over 5019.00 frames. ], tot_loss[loss=4.102, NarTop10Accuracy=0.5037, over 5774.34 frames. ], batch size: 7, lr: 2.11e-02 +2024-08-06 15:03:08,235 INFO [trainer.py:765] (0/8) Epoch 3, batch 2500, train_loss[loss=3.895, NarTop10Accuracy=0.5494, over 5250.00 frames. ], tot_loss[loss=4.048, NarTop10Accuracy=0.5146, over 5475.66 frames. ], batch size: 7, lr: 2.10e-02 +2024-08-06 15:03:28,391 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 15:03:28,393 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-3.pt +2024-08-06 15:04:28,130 INFO [trainer.py:765] (0/8) Epoch 4, batch 100, train_loss[loss=3.959, NarTop10Accuracy=0.5323, over 7410.00 frames. ], tot_loss[loss=4.031, NarTop10Accuracy=0.5179, over 2368.88 frames. ], batch size: 31, lr: 1.97e-02 +2024-08-06 15:04:59,842 INFO [trainer.py:765] (0/8) Epoch 4, batch 200, train_loss[loss=3.704, NarTop10Accuracy=0.5801, over 6750.00 frames. ], tot_loss[loss=4.01, NarTop10Accuracy=0.5229, over 3861.12 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 15:05:27,509 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 15:05:35,694 INFO [trainer.py:811] (0/8) Epoch 4, validation: loss=3.804, NarTop10Accuracy=0.5644, over 1905321.00 frames. +2024-08-06 15:05:35,695 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 27314MB +2024-08-06 15:05:36,238 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.765e+02 1.975e+02 2.270e+02 5.852e+02, threshold=3.949e+02, percent-clipped=2.8 +2024-08-06 15:05:43,889 INFO [trainer.py:765] (0/8) Epoch 4, batch 300, train_loss[loss=3.767, NarTop10Accuracy=0.5713, over 7185.00 frames. ], tot_loss[loss=3.998, NarTop10Accuracy=0.5254, over 4661.91 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 15:06:16,124 INFO [trainer.py:765] (0/8) Epoch 4, batch 400, train_loss[loss=3.578, NarTop10Accuracy=0.6158, over 5229.00 frames. ], tot_loss[loss=4.008, NarTop10Accuracy=0.5231, over 5111.58 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 15:06:46,473 INFO [trainer.py:765] (0/8) Epoch 4, batch 500, train_loss[loss=4.22, NarTop10Accuracy=0.4842, over 6174.00 frames. ], tot_loss[loss=3.989, NarTop10Accuracy=0.5267, over 5380.80 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 15:07:23,817 INFO [trainer.py:765] (0/8) Epoch 4, batch 600, train_loss[loss=3.705, NarTop10Accuracy=0.588, over 5838.00 frames. ], tot_loss[loss=3.981, NarTop10Accuracy=0.528, over 5658.01 frames. ], batch size: 9, lr: 1.93e-02 +2024-08-06 15:07:59,001 INFO [trainer.py:765] (0/8) Epoch 4, batch 700, train_loss[loss=4.371, NarTop10Accuracy=0.4459, over 4305.00 frames. ], tot_loss[loss=3.976, NarTop10Accuracy=0.5293, over 5741.18 frames. ], batch size: 5, lr: 1.92e-02 +2024-08-06 15:08:32,429 INFO [trainer.py:765] (0/8) Epoch 4, batch 800, train_loss[loss=3.669, NarTop10Accuracy=0.5992, over 5055.00 frames. ], tot_loss[loss=3.962, NarTop10Accuracy=0.5322, over 5790.70 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 15:09:10,690 INFO [trainer.py:765] (0/8) Epoch 4, batch 900, train_loss[loss=3.559, NarTop10Accuracy=0.611, over 6246.00 frames. ], tot_loss[loss=3.925, NarTop10Accuracy=0.5396, over 5798.61 frames. ], batch size: 13, lr: 1.90e-02 +2024-08-06 15:09:46,076 INFO [trainer.py:765] (0/8) Epoch 4, batch 1000, train_loss[loss=3.57, NarTop10Accuracy=0.6103, over 6225.00 frames. ], tot_loss[loss=3.911, NarTop10Accuracy=0.542, over 5902.03 frames. ], batch size: 13, lr: 1.89e-02 +2024-08-06 15:10:18,140 INFO [trainer.py:765] (0/8) Epoch 4, batch 1100, train_loss[loss=3.842, NarTop10Accuracy=0.5547, over 6840.00 frames. ], tot_loss[loss=3.902, NarTop10Accuracy=0.5441, over 5938.36 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 15:10:55,075 INFO [trainer.py:765] (0/8) Epoch 4, batch 1200, train_loss[loss=4.37, NarTop10Accuracy=0.4399, over 7392.00 frames. ], tot_loss[loss=3.898, NarTop10Accuracy=0.545, over 5943.45 frames. ], batch size: 31, lr: 1.88e-02 +2024-08-06 15:11:32,074 INFO [trainer.py:765] (0/8) Epoch 4, batch 1300, train_loss[loss=3.712, NarTop10Accuracy=0.5762, over 5076.00 frames. ], tot_loss[loss=3.859, NarTop10Accuracy=0.5525, over 6002.17 frames. ], batch size: 6, lr: 1.87e-02 +2024-08-06 15:12:05,688 INFO [trainer.py:765] (0/8) Epoch 4, batch 1400, train_loss[loss=3.787, NarTop10Accuracy=0.5743, over 6069.00 frames. ], tot_loss[loss=3.859, NarTop10Accuracy=0.5528, over 6028.65 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 15:12:33,695 INFO [trainer.py:765] (0/8) Epoch 4, batch 1500, train_loss[loss=3.836, NarTop10Accuracy=0.5589, over 6168.00 frames. ], tot_loss[loss=3.862, NarTop10Accuracy=0.5522, over 5971.10 frames. ], batch size: 50, lr: 1.85e-02 +2024-08-06 15:13:01,510 INFO [trainer.py:765] (0/8) Epoch 4, batch 1600, train_loss[loss=3.801, NarTop10Accuracy=0.5672, over 6996.00 frames. ], tot_loss[loss=3.856, NarTop10Accuracy=0.5534, over 5936.92 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 15:13:28,133 INFO [trainer.py:765] (0/8) Epoch 4, batch 1700, train_loss[loss=3.699, NarTop10Accuracy=0.583, over 6189.00 frames. ], tot_loss[loss=3.826, NarTop10Accuracy=0.5597, over 5930.29 frames. ], batch size: 13, lr: 1.84e-02 +2024-08-06 15:13:54,557 INFO [trainer.py:765] (0/8) Epoch 4, batch 1800, train_loss[loss=3.77, NarTop10Accuracy=0.5676, over 7167.00 frames. ], tot_loss[loss=3.824, NarTop10Accuracy=0.56, over 5975.82 frames. ], batch size: 23, lr: 1.83e-02 +2024-08-06 15:14:20,998 INFO [trainer.py:765] (0/8) Epoch 4, batch 1900, train_loss[loss=3.773, NarTop10Accuracy=0.573, over 6150.00 frames. ], tot_loss[loss=3.848, NarTop10Accuracy=0.5557, over 6012.83 frames. ], batch size: 50, lr: 1.82e-02 +2024-08-06 15:14:46,672 INFO [trainer.py:765] (0/8) Epoch 4, batch 2000, train_loss[loss=3.771, NarTop10Accuracy=0.582, over 6834.00 frames. ], tot_loss[loss=3.828, NarTop10Accuracy=0.5596, over 6004.56 frames. ], batch size: 53, lr: 1.81e-02 +2024-08-06 15:15:11,859 INFO [trainer.py:765] (0/8) Epoch 4, batch 2100, train_loss[loss=3.572, NarTop10Accuracy=0.603, over 4863.00 frames. ], tot_loss[loss=3.81, NarTop10Accuracy=0.5633, over 5990.80 frames. ], batch size: 5, lr: 1.81e-02 +2024-08-06 15:15:37,089 INFO [trainer.py:765] (0/8) Epoch 4, batch 2200, train_loss[loss=3.668, NarTop10Accuracy=0.597, over 7176.00 frames. ], tot_loss[loss=3.804, NarTop10Accuracy=0.5641, over 6034.65 frames. ], batch size: 31, lr: 1.80e-02 +2024-08-06 15:15:55,090 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 15:16:03,242 INFO [trainer.py:811] (0/8) Epoch 4, validation: loss=3.665, NarTop10Accuracy=0.5912, over 1905321.00 frames. +2024-08-06 15:16:03,243 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 27314MB +2024-08-06 15:16:03,741 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.414e+02 1.889e+02 2.096e+02 2.369e+02 1.168e+03, threshold=4.192e+02, percent-clipped=1.7 +2024-08-06 15:16:10,347 INFO [trainer.py:765] (0/8) Epoch 4, batch 2300, train_loss[loss=3.624, NarTop10Accuracy=0.6049, over 5622.00 frames. ], tot_loss[loss=3.806, NarTop10Accuracy=0.5637, over 6042.25 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 15:16:34,840 INFO [trainer.py:765] (0/8) Epoch 4, batch 2400, train_loss[loss=3.616, NarTop10Accuracy=0.6004, over 5166.00 frames. ], tot_loss[loss=3.772, NarTop10Accuracy=0.5705, over 5778.66 frames. ], batch size: 7, lr: 1.79e-02 +2024-08-06 15:16:58,535 INFO [trainer.py:765] (0/8) Epoch 4, batch 2500, train_loss[loss=3.387, NarTop10Accuracy=0.6682, over 5262.00 frames. ], tot_loss[loss=3.762, NarTop10Accuracy=0.5725, over 5475.32 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 15:17:18,131 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 15:17:18,134 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-4.pt +2024-08-06 15:18:24,101 INFO [trainer.py:765] (0/8) Epoch 5, batch 100, train_loss[loss=3.599, NarTop10Accuracy=0.6115, over 7323.00 frames. ], tot_loss[loss=3.78, NarTop10Accuracy=0.5691, over 2362.71 frames. ], batch size: 31, lr: 1.66e-02 +2024-08-06 15:18:59,676 INFO [trainer.py:765] (0/8) Epoch 5, batch 200, train_loss[loss=4.161, NarTop10Accuracy=0.4899, over 6672.00 frames. ], tot_loss[loss=3.759, NarTop10Accuracy=0.5735, over 3852.17 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 15:19:32,888 INFO [trainer.py:765] (0/8) Epoch 5, batch 300, train_loss[loss=3.961, NarTop10Accuracy=0.5252, over 7227.00 frames. ], tot_loss[loss=3.732, NarTop10Accuracy=0.5786, over 4659.95 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 15:20:01,656 INFO [trainer.py:765] (0/8) Epoch 5, batch 400, train_loss[loss=3.658, NarTop10Accuracy=0.5945, over 5106.00 frames. ], tot_loss[loss=3.719, NarTop10Accuracy=0.5811, over 5116.71 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 15:20:38,298 INFO [trainer.py:765] (0/8) Epoch 5, batch 500, train_loss[loss=3.926, NarTop10Accuracy=0.5349, over 6000.00 frames. ], tot_loss[loss=3.738, NarTop10Accuracy=0.5774, over 5391.54 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 15:21:13,711 INFO [trainer.py:765] (0/8) Epoch 5, batch 600, train_loss[loss=4.088, NarTop10Accuracy=0.5148, over 5781.00 frames. ], tot_loss[loss=3.721, NarTop10Accuracy=0.5809, over 5647.77 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 15:21:45,881 INFO [trainer.py:765] (0/8) Epoch 5, batch 700, train_loss[loss=3.485, NarTop10Accuracy=0.6278, over 4236.00 frames. ], tot_loss[loss=3.719, NarTop10Accuracy=0.5813, over 5728.32 frames. ], batch size: 5, lr: 1.62e-02 +2024-08-06 15:22:24,499 INFO [trainer.py:765] (0/8) Epoch 5, batch 800, train_loss[loss=3.909, NarTop10Accuracy=0.5329, over 5073.00 frames. ], tot_loss[loss=3.71, NarTop10Accuracy=0.583, over 5793.58 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 15:22:56,783 INFO [trainer.py:765] (0/8) Epoch 5, batch 900, train_loss[loss=3.67, NarTop10Accuracy=0.5935, over 6273.00 frames. ], tot_loss[loss=3.698, NarTop10Accuracy=0.5849, over 5795.50 frames. ], batch size: 13, lr: 1.61e-02 +2024-08-06 15:23:31,914 INFO [trainer.py:765] (0/8) Epoch 5, batch 1000, train_loss[loss=3.487, NarTop10Accuracy=0.6365, over 6570.00 frames. ], tot_loss[loss=3.688, NarTop10Accuracy=0.5874, over 5896.10 frames. ], batch size: 14, lr: 1.60e-02 +2024-08-06 15:24:09,571 INFO [trainer.py:765] (0/8) Epoch 5, batch 1100, train_loss[loss=3.508, NarTop10Accuracy=0.6337, over 6819.00 frames. ], tot_loss[loss=3.679, NarTop10Accuracy=0.5894, over 5921.13 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 15:24:44,529 INFO [trainer.py:765] (0/8) Epoch 5, batch 1200, train_loss[loss=3.53, NarTop10Accuracy=0.6194, over 7293.00 frames. ], tot_loss[loss=3.679, NarTop10Accuracy=0.5894, over 5900.74 frames. ], batch size: 31, lr: 1.59e-02 +2024-08-06 15:25:19,380 INFO [trainer.py:765] (0/8) Epoch 5, batch 1300, train_loss[loss=3.732, NarTop10Accuracy=0.5818, over 4248.00 frames. ], tot_loss[loss=3.668, NarTop10Accuracy=0.5916, over 5978.22 frames. ], batch size: 5, lr: 1.59e-02 +2024-08-06 15:25:51,694 INFO [trainer.py:765] (0/8) Epoch 5, batch 1400, train_loss[loss=3.866, NarTop10Accuracy=0.5558, over 6120.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5912, over 6034.20 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 15:26:26,195 INFO [trainer.py:765] (0/8) Epoch 5, batch 1500, train_loss[loss=3.609, NarTop10Accuracy=0.612, over 6519.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.5921, over 5971.11 frames. ], batch size: 51, lr: 1.58e-02 +2024-08-06 15:26:54,130 INFO [trainer.py:765] (0/8) Epoch 5, batch 1600, train_loss[loss=3.463, NarTop10Accuracy=0.6322, over 7092.00 frames. ], tot_loss[loss=3.674, NarTop10Accuracy=0.5902, over 5953.33 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 15:27:19,604 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 15:27:27,821 INFO [trainer.py:811] (0/8) Epoch 5, validation: loss=3.552, NarTop10Accuracy=0.6147, over 1905321.00 frames. +2024-08-06 15:27:27,822 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 27314MB +2024-08-06 15:27:28,341 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.756e+02 1.962e+02 2.205e+02 5.880e+02, threshold=3.924e+02, percent-clipped=0.8 +2024-08-06 15:27:29,131 INFO [trainer.py:765] (0/8) Epoch 5, batch 1700, train_loss[loss=3.708, NarTop10Accuracy=0.5883, over 6198.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5909, over 5927.21 frames. ], batch size: 13, lr: 1.56e-02 +2024-08-06 15:27:55,653 INFO [trainer.py:765] (0/8) Epoch 5, batch 1800, train_loss[loss=3.878, NarTop10Accuracy=0.5434, over 7143.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.592, over 5997.54 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 15:28:22,172 INFO [trainer.py:765] (0/8) Epoch 5, batch 1900, train_loss[loss=3.628, NarTop10Accuracy=0.602, over 6159.00 frames. ], tot_loss[loss=3.667, NarTop10Accuracy=0.5915, over 6029.41 frames. ], batch size: 50, lr: 1.55e-02 +2024-08-06 15:28:47,894 INFO [trainer.py:765] (0/8) Epoch 5, batch 2000, train_loss[loss=3.735, NarTop10Accuracy=0.5803, over 6147.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5906, over 6005.61 frames. ], batch size: 50, lr: 1.55e-02 +2024-08-06 15:29:13,770 INFO [trainer.py:765] (0/8) Epoch 5, batch 2100, train_loss[loss=3.37, NarTop10Accuracy=0.6461, over 3960.00 frames. ], tot_loss[loss=3.679, NarTop10Accuracy=0.5881, over 5986.90 frames. ], batch size: 4, lr: 1.54e-02 +2024-08-06 15:29:39,177 INFO [trainer.py:765] (0/8) Epoch 5, batch 2200, train_loss[loss=4.029, NarTop10Accuracy=0.5076, over 7449.00 frames. ], tot_loss[loss=3.662, NarTop10Accuracy=0.5918, over 6022.85 frames. ], batch size: 31, lr: 1.54e-02 +2024-08-06 15:30:04,430 INFO [trainer.py:765] (0/8) Epoch 5, batch 2300, train_loss[loss=3.39, NarTop10Accuracy=0.6471, over 5805.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5903, over 6037.63 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 15:30:28,862 INFO [trainer.py:765] (0/8) Epoch 5, batch 2400, train_loss[loss=3.389, NarTop10Accuracy=0.6427, over 4998.00 frames. ], tot_loss[loss=3.645, NarTop10Accuracy=0.5954, over 5796.16 frames. ], batch size: 7, lr: 1.53e-02 +2024-08-06 15:30:52,503 INFO [trainer.py:765] (0/8) Epoch 5, batch 2500, train_loss[loss=3.313, NarTop10Accuracy=0.6675, over 5151.00 frames. ], tot_loss[loss=3.61, NarTop10Accuracy=0.6026, over 5476.85 frames. ], batch size: 7, lr: 1.52e-02 +2024-08-06 15:31:12,425 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 15:31:12,429 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-5.pt +2024-08-06 15:32:14,415 INFO [trainer.py:765] (0/8) Epoch 6, batch 100, train_loss[loss=3.445, NarTop10Accuracy=0.6468, over 7026.00 frames. ], tot_loss[loss=3.635, NarTop10Accuracy=0.5987, over 2359.54 frames. ], batch size: 31, lr: 1.42e-02 +2024-08-06 15:32:46,015 INFO [trainer.py:765] (0/8) Epoch 6, batch 200, train_loss[loss=3.978, NarTop10Accuracy=0.5324, over 6699.00 frames. ], tot_loss[loss=3.612, NarTop10Accuracy=0.6033, over 3855.40 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 15:33:21,242 INFO [trainer.py:765] (0/8) Epoch 6, batch 300, train_loss[loss=3.462, NarTop10Accuracy=0.636, over 7227.00 frames. ], tot_loss[loss=3.599, NarTop10Accuracy=0.6053, over 4676.93 frames. ], batch size: 22, lr: 1.41e-02 +2024-08-06 15:33:56,035 INFO [trainer.py:765] (0/8) Epoch 6, batch 400, train_loss[loss=3.425, NarTop10Accuracy=0.6412, over 5157.00 frames. ], tot_loss[loss=3.591, NarTop10Accuracy=0.6072, over 5124.41 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 15:34:26,759 INFO [trainer.py:765] (0/8) Epoch 6, batch 500, train_loss[loss=3.283, NarTop10Accuracy=0.6653, over 6084.00 frames. ], tot_loss[loss=3.577, NarTop10Accuracy=0.6105, over 5395.95 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 15:35:01,458 INFO [trainer.py:765] (0/8) Epoch 6, batch 600, train_loss[loss=3.319, NarTop10Accuracy=0.6589, over 5730.00 frames. ], tot_loss[loss=3.58, NarTop10Accuracy=0.6094, over 5676.42 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 15:35:32,734 INFO [trainer.py:765] (0/8) Epoch 6, batch 700, train_loss[loss=3.368, NarTop10Accuracy=0.6528, over 5175.00 frames. ], tot_loss[loss=3.584, NarTop10Accuracy=0.6083, over 5747.23 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 15:36:06,844 INFO [trainer.py:765] (0/8) Epoch 6, batch 800, train_loss[loss=3.853, NarTop10Accuracy=0.5519, over 4419.00 frames. ], tot_loss[loss=3.593, NarTop10Accuracy=0.6062, over 5801.03 frames. ], batch size: 5, lr: 1.39e-02 +2024-08-06 15:36:40,384 INFO [trainer.py:765] (0/8) Epoch 6, batch 900, train_loss[loss=3.935, NarTop10Accuracy=0.5378, over 6195.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.6093, over 5808.51 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 15:37:15,272 INFO [trainer.py:765] (0/8) Epoch 6, batch 1000, train_loss[loss=3.544, NarTop10Accuracy=0.6245, over 6663.00 frames. ], tot_loss[loss=3.594, NarTop10Accuracy=0.6058, over 5893.57 frames. ], batch size: 14, lr: 1.38e-02 +2024-08-06 15:37:50,508 INFO [trainer.py:765] (0/8) Epoch 6, batch 1100, train_loss[loss=3.513, NarTop10Accuracy=0.6372, over 6816.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.6063, over 5931.53 frames. ], batch size: 17, lr: 1.38e-02 +2024-08-06 15:37:55,828 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 15:38:04,436 INFO [trainer.py:811] (0/8) Epoch 6, validation: loss=3.421, NarTop10Accuracy=0.6418, over 1905321.00 frames. +2024-08-06 15:38:04,437 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 27314MB +2024-08-06 15:38:04,966 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.809e+02 1.991e+02 2.234e+02 5.215e+02, threshold=3.983e+02, percent-clipped=0.5 +2024-08-06 15:38:36,168 INFO [trainer.py:765] (0/8) Epoch 6, batch 1200, train_loss[loss=3.433, NarTop10Accuracy=0.6435, over 7284.00 frames. ], tot_loss[loss=3.58, NarTop10Accuracy=0.6087, over 5911.47 frames. ], batch size: 31, lr: 1.37e-02 +2024-08-06 15:39:08,243 INFO [trainer.py:765] (0/8) Epoch 6, batch 1300, train_loss[loss=3.347, NarTop10Accuracy=0.6542, over 5049.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6101, over 5985.53 frames. ], batch size: 6, lr: 1.37e-02 +2024-08-06 15:39:44,070 INFO [trainer.py:765] (0/8) Epoch 6, batch 1400, train_loss[loss=3.413, NarTop10Accuracy=0.6514, over 6036.00 frames. ], tot_loss[loss=3.574, NarTop10Accuracy=0.6109, over 6020.78 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 15:40:15,383 INFO [trainer.py:765] (0/8) Epoch 6, batch 1500, train_loss[loss=3.959, NarTop10Accuracy=0.5284, over 6717.00 frames. ], tot_loss[loss=3.567, NarTop10Accuracy=0.6118, over 5951.86 frames. ], batch size: 52, lr: 1.36e-02 +2024-08-06 15:40:43,106 INFO [trainer.py:765] (0/8) Epoch 6, batch 1600, train_loss[loss=3.439, NarTop10Accuracy=0.6464, over 7218.00 frames. ], tot_loss[loss=3.569, NarTop10Accuracy=0.6115, over 5930.18 frames. ], batch size: 23, lr: 1.35e-02 +2024-08-06 15:41:09,789 INFO [trainer.py:765] (0/8) Epoch 6, batch 1700, train_loss[loss=3.48, NarTop10Accuracy=0.6384, over 6372.00 frames. ], tot_loss[loss=3.557, NarTop10Accuracy=0.6137, over 5915.95 frames. ], batch size: 13, lr: 1.35e-02 +2024-08-06 15:41:36,317 INFO [trainer.py:765] (0/8) Epoch 6, batch 1800, train_loss[loss=3.425, NarTop10Accuracy=0.6426, over 7140.00 frames. ], tot_loss[loss=3.565, NarTop10Accuracy=0.6119, over 5985.41 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 15:42:02,720 INFO [trainer.py:765] (0/8) Epoch 6, batch 1900, train_loss[loss=3.792, NarTop10Accuracy=0.5725, over 5724.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.6086, over 6025.36 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 15:42:28,319 INFO [trainer.py:765] (0/8) Epoch 6, batch 2000, train_loss[loss=3.491, NarTop10Accuracy=0.6278, over 6216.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.6098, over 5984.80 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 15:42:53,669 INFO [trainer.py:765] (0/8) Epoch 6, batch 2100, train_loss[loss=3.37, NarTop10Accuracy=0.6529, over 3882.00 frames. ], tot_loss[loss=3.561, NarTop10Accuracy=0.6129, over 5964.57 frames. ], batch size: 4, lr: 1.33e-02 +2024-08-06 15:43:18,977 INFO [trainer.py:765] (0/8) Epoch 6, batch 2200, train_loss[loss=3.825, NarTop10Accuracy=0.5623, over 7215.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6114, over 6018.32 frames. ], batch size: 31, lr: 1.33e-02 +2024-08-06 15:43:44,105 INFO [trainer.py:765] (0/8) Epoch 6, batch 2300, train_loss[loss=3.368, NarTop10Accuracy=0.6542, over 5808.00 frames. ], tot_loss[loss=3.573, NarTop10Accuracy=0.6107, over 6028.53 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 15:44:08,620 INFO [trainer.py:765] (0/8) Epoch 6, batch 2400, train_loss[loss=3.253, NarTop10Accuracy=0.6769, over 5124.00 frames. ], tot_loss[loss=3.548, NarTop10Accuracy=0.6157, over 5771.82 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:32,132 INFO [trainer.py:765] (0/8) Epoch 6, batch 2500, train_loss[loss=3.543, NarTop10Accuracy=0.6223, over 5070.00 frames. ], tot_loss[loss=3.529, NarTop10Accuracy=0.6191, over 5490.16 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:51,940 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 15:44:51,944 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-6.pt +2024-08-06 15:45:58,043 INFO [trainer.py:765] (0/8) Epoch 7, batch 100, train_loss[loss=3.413, NarTop10Accuracy=0.641, over 7431.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6181, over 2371.15 frames. ], batch size: 31, lr: 1.24e-02 +2024-08-06 15:46:33,615 INFO [trainer.py:765] (0/8) Epoch 7, batch 200, train_loss[loss=3.476, NarTop10Accuracy=0.639, over 6834.00 frames. ], tot_loss[loss=3.53, NarTop10Accuracy=0.6191, over 3873.64 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 15:47:03,247 INFO [trainer.py:765] (0/8) Epoch 7, batch 300, train_loss[loss=3.754, NarTop10Accuracy=0.5664, over 7251.00 frames. ], tot_loss[loss=3.543, NarTop10Accuracy=0.6165, over 4672.98 frames. ], batch size: 22, lr: 1.23e-02 +2024-08-06 15:47:34,496 INFO [trainer.py:765] (0/8) Epoch 7, batch 400, train_loss[loss=3.298, NarTop10Accuracy=0.6589, over 5085.00 frames. ], tot_loss[loss=3.529, NarTop10Accuracy=0.6193, over 5111.55 frames. ], batch size: 7, lr: 1.23e-02 +2024-08-06 15:48:13,731 INFO [trainer.py:765] (0/8) Epoch 7, batch 500, train_loss[loss=3.562, NarTop10Accuracy=0.6082, over 6084.00 frames. ], tot_loss[loss=3.523, NarTop10Accuracy=0.6206, over 5392.41 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 15:48:26,370 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 15:48:34,533 INFO [trainer.py:811] (0/8) Epoch 7, validation: loss=3.326, NarTop10Accuracy=0.6612, over 1905321.00 frames. +2024-08-06 15:48:34,534 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 28825MB +2024-08-06 15:48:35,078 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.860e+02 2.018e+02 2.241e+02 5.111e+02, threshold=4.035e+02, percent-clipped=0.3 +2024-08-06 15:48:52,721 INFO [trainer.py:765] (0/8) Epoch 7, batch 600, train_loss[loss=3.153, NarTop10Accuracy=0.6996, over 5727.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6207, over 5661.55 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 15:49:24,913 INFO [trainer.py:765] (0/8) Epoch 7, batch 700, train_loss[loss=3.569, NarTop10Accuracy=0.594, over 5202.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6223, over 5735.66 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 15:50:04,382 INFO [trainer.py:765] (0/8) Epoch 7, batch 800, train_loss[loss=3.363, NarTop10Accuracy=0.6648, over 5118.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6256, over 5800.32 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 15:50:34,550 INFO [trainer.py:765] (0/8) Epoch 7, batch 900, train_loss[loss=3.277, NarTop10Accuracy=0.6702, over 6195.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6265, over 5822.83 frames. ], batch size: 13, lr: 1.21e-02 +2024-08-06 15:51:07,156 INFO [trainer.py:765] (0/8) Epoch 7, batch 1000, train_loss[loss=3.263, NarTop10Accuracy=0.6716, over 6372.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6275, over 5912.82 frames. ], batch size: 13, lr: 1.20e-02 +2024-08-06 15:51:51,760 INFO [trainer.py:765] (0/8) Epoch 7, batch 1100, train_loss[loss=3.373, NarTop10Accuracy=0.6528, over 6822.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.627, over 5933.00 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 15:52:22,700 INFO [trainer.py:765] (0/8) Epoch 7, batch 1200, train_loss[loss=3.368, NarTop10Accuracy=0.6553, over 7146.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6282, over 5921.73 frames. ], batch size: 31, lr: 1.20e-02 +2024-08-06 15:52:52,008 INFO [trainer.py:765] (0/8) Epoch 7, batch 1300, train_loss[loss=3.264, NarTop10Accuracy=0.6692, over 4341.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6275, over 5977.49 frames. ], batch size: 5, lr: 1.19e-02 +2024-08-06 15:53:33,843 INFO [trainer.py:765] (0/8) Epoch 7, batch 1400, train_loss[loss=3.409, NarTop10Accuracy=0.6467, over 6012.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6261, over 6021.77 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 15:54:04,600 INFO [trainer.py:765] (0/8) Epoch 7, batch 1500, train_loss[loss=3.784, NarTop10Accuracy=0.569, over 5850.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6302, over 5942.10 frames. ], batch size: 50, lr: 1.19e-02 +2024-08-06 15:54:32,386 INFO [trainer.py:765] (0/8) Epoch 7, batch 1600, train_loss[loss=3.642, NarTop10Accuracy=0.5948, over 7005.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6279, over 5930.30 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 15:54:59,056 INFO [trainer.py:765] (0/8) Epoch 7, batch 1700, train_loss[loss=3.617, NarTop10Accuracy=0.5998, over 6645.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6251, over 5939.91 frames. ], batch size: 14, lr: 1.18e-02 +2024-08-06 15:55:25,513 INFO [trainer.py:765] (0/8) Epoch 7, batch 1800, train_loss[loss=3.732, NarTop10Accuracy=0.5672, over 7101.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6275, over 5988.30 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 15:55:52,083 INFO [trainer.py:765] (0/8) Epoch 7, batch 1900, train_loss[loss=3.473, NarTop10Accuracy=0.6351, over 6111.00 frames. ], tot_loss[loss=3.507, NarTop10Accuracy=0.6236, over 6026.83 frames. ], batch size: 50, lr: 1.18e-02 +2024-08-06 15:56:17,592 INFO [trainer.py:765] (0/8) Epoch 7, batch 2000, train_loss[loss=3.707, NarTop10Accuracy=0.5843, over 6405.00 frames. ], tot_loss[loss=3.505, NarTop10Accuracy=0.6242, over 6001.32 frames. ], batch size: 51, lr: 1.17e-02 +2024-08-06 15:56:42,857 INFO [trainer.py:765] (0/8) Epoch 7, batch 2100, train_loss[loss=3.425, NarTop10Accuracy=0.6054, over 4068.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6281, over 5966.88 frames. ], batch size: 4, lr: 1.17e-02 +2024-08-06 15:57:08,080 INFO [trainer.py:765] (0/8) Epoch 7, batch 2200, train_loss[loss=3.504, NarTop10Accuracy=0.6331, over 7044.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6244, over 6015.87 frames. ], batch size: 31, lr: 1.17e-02 +2024-08-06 15:57:33,179 INFO [trainer.py:765] (0/8) Epoch 7, batch 2300, train_loss[loss=3.3, NarTop10Accuracy=0.6648, over 5808.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.624, over 6029.06 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 15:57:57,620 INFO [trainer.py:765] (0/8) Epoch 7, batch 2400, train_loss[loss=3.372, NarTop10Accuracy=0.6556, over 5229.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6266, over 5775.86 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 15:58:21,089 INFO [trainer.py:765] (0/8) Epoch 7, batch 2500, train_loss[loss=3.687, NarTop10Accuracy=0.5931, over 5097.00 frames. ], tot_loss[loss=3.467, NarTop10Accuracy=0.6317, over 5461.86 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 15:58:31,566 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 15:58:39,769 INFO [trainer.py:811] (0/8) Epoch 7, validation: loss=3.381, NarTop10Accuracy=0.6488, over 1905321.00 frames. +2024-08-06 15:58:39,770 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 28825MB +2024-08-06 15:58:40,220 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.831e+02 1.996e+02 2.207e+02 5.229e+02, threshold=3.992e+02, percent-clipped=0.2 +2024-08-06 15:58:49,186 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 15:58:49,190 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-7.pt +2024-08-06 15:59:52,877 INFO [trainer.py:765] (0/8) Epoch 8, batch 100, train_loss[loss=3.513, NarTop10Accuracy=0.6199, over 7377.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6352, over 2361.58 frames. ], batch size: 31, lr: 1.09e-02 +2024-08-06 16:00:27,881 INFO [trainer.py:765] (0/8) Epoch 8, batch 200, train_loss[loss=3.34, NarTop10Accuracy=0.6549, over 6918.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6304, over 3845.46 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 16:00:58,563 INFO [trainer.py:765] (0/8) Epoch 8, batch 300, train_loss[loss=3.351, NarTop10Accuracy=0.6574, over 7149.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.632, over 4653.78 frames. ], batch size: 22, lr: 1.08e-02 +2024-08-06 16:01:29,760 INFO [trainer.py:765] (0/8) Epoch 8, batch 400, train_loss[loss=3.701, NarTop10Accuracy=0.5839, over 5175.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6315, over 5101.42 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 16:02:04,066 INFO [trainer.py:765] (0/8) Epoch 8, batch 500, train_loss[loss=3.821, NarTop10Accuracy=0.5507, over 6174.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6338, over 5379.20 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 16:02:41,836 INFO [trainer.py:765] (0/8) Epoch 8, batch 600, train_loss[loss=3.156, NarTop10Accuracy=0.7004, over 5844.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6302, over 5642.59 frames. ], batch size: 9, lr: 1.08e-02 +2024-08-06 16:03:11,500 INFO [trainer.py:765] (0/8) Epoch 8, batch 700, train_loss[loss=3.648, NarTop10Accuracy=0.5958, over 5007.00 frames. ], tot_loss[loss=3.48, NarTop10Accuracy=0.6291, over 5714.94 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 16:03:50,084 INFO [trainer.py:765] (0/8) Epoch 8, batch 800, train_loss[loss=3.511, NarTop10Accuracy=0.6198, over 5040.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6308, over 5763.16 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 16:04:27,588 INFO [trainer.py:765] (0/8) Epoch 8, batch 900, train_loss[loss=3.278, NarTop10Accuracy=0.665, over 6663.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.635, over 5778.07 frames. ], batch size: 14, lr: 1.07e-02 +2024-08-06 16:04:57,466 INFO [trainer.py:765] (0/8) Epoch 8, batch 1000, train_loss[loss=3.609, NarTop10Accuracy=0.5924, over 6159.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6367, over 5881.94 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 16:05:37,294 INFO [trainer.py:765] (0/8) Epoch 8, batch 1100, train_loss[loss=3.623, NarTop10Accuracy=0.591, over 7125.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.637, over 5905.93 frames. ], batch size: 18, lr: 1.06e-02 +2024-08-06 16:06:15,859 INFO [trainer.py:765] (0/8) Epoch 8, batch 1200, train_loss[loss=3.447, NarTop10Accuracy=0.643, over 7335.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6341, over 5915.91 frames. ], batch size: 31, lr: 1.06e-02 +2024-08-06 16:06:45,187 INFO [trainer.py:765] (0/8) Epoch 8, batch 1300, train_loss[loss=3.228, NarTop10Accuracy=0.6784, over 5172.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6372, over 5990.31 frames. ], batch size: 6, lr: 1.06e-02 +2024-08-06 16:07:24,236 INFO [trainer.py:765] (0/8) Epoch 8, batch 1400, train_loss[loss=3.525, NarTop10Accuracy=0.6169, over 5997.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6362, over 6004.63 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 16:07:52,169 INFO [trainer.py:765] (0/8) Epoch 8, batch 1500, train_loss[loss=3.456, NarTop10Accuracy=0.6435, over 6444.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6397, over 5934.45 frames. ], batch size: 53, lr: 1.05e-02 +2024-08-06 16:08:19,949 INFO [trainer.py:765] (0/8) Epoch 8, batch 1600, train_loss[loss=3.239, NarTop10Accuracy=0.6812, over 7329.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6401, over 5924.60 frames. ], batch size: 23, lr: 1.05e-02 +2024-08-06 16:08:46,618 INFO [trainer.py:765] (0/8) Epoch 8, batch 1700, train_loss[loss=3.416, NarTop10Accuracy=0.6469, over 6309.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6404, over 5913.43 frames. ], batch size: 13, lr: 1.05e-02 +2024-08-06 16:09:13,106 INFO [trainer.py:765] (0/8) Epoch 8, batch 1800, train_loss[loss=3.352, NarTop10Accuracy=0.6577, over 7119.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6411, over 5984.33 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 16:09:39,636 INFO [trainer.py:765] (0/8) Epoch 8, batch 1900, train_loss[loss=3.709, NarTop10Accuracy=0.5917, over 6039.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6431, over 6018.01 frames. ], batch size: 50, lr: 1.04e-02 +2024-08-06 16:09:56,940 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 16:10:04,970 INFO [trainer.py:811] (0/8) Epoch 8, validation: loss=3.282, NarTop10Accuracy=0.6699, over 1905321.00 frames. +2024-08-06 16:10:04,970 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 28825MB +2024-08-06 16:10:05,470 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.814e+02 1.981e+02 2.158e+02 5.862e+02, threshold=3.962e+02, percent-clipped=0.1 +2024-08-06 16:10:13,204 INFO [trainer.py:765] (0/8) Epoch 8, batch 2000, train_loss[loss=4.042, NarTop10Accuracy=0.5142, over 6165.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.641, over 5991.40 frames. ], batch size: 50, lr: 1.04e-02 +2024-08-06 16:10:38,515 INFO [trainer.py:765] (0/8) Epoch 8, batch 2100, train_loss[loss=3.329, NarTop10Accuracy=0.6686, over 4071.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.642, over 5961.37 frames. ], batch size: 4, lr: 1.04e-02 +2024-08-06 16:11:03,747 INFO [trainer.py:765] (0/8) Epoch 8, batch 2200, train_loss[loss=3.54, NarTop10Accuracy=0.6168, over 7482.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6413, over 6012.01 frames. ], batch size: 31, lr: 1.04e-02 +2024-08-06 16:11:28,905 INFO [trainer.py:765] (0/8) Epoch 8, batch 2300, train_loss[loss=3.738, NarTop10Accuracy=0.5715, over 5664.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6372, over 6024.93 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 16:11:53,093 INFO [trainer.py:765] (0/8) Epoch 8, batch 2400, train_loss[loss=3.36, NarTop10Accuracy=0.6562, over 5196.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6411, over 5789.77 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 16:12:16,446 INFO [trainer.py:765] (0/8) Epoch 8, batch 2500, train_loss[loss=3.414, NarTop10Accuracy=0.6395, over 5142.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6435, over 5462.32 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 16:12:36,907 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 16:12:36,910 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-8.pt +2024-08-06 16:13:37,515 INFO [trainer.py:765] (0/8) Epoch 9, batch 100, train_loss[loss=3.186, NarTop10Accuracy=0.6898, over 7053.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6546, over 2363.40 frames. ], batch size: 31, lr: 9.72e-03 +2024-08-06 16:14:14,441 INFO [trainer.py:765] (0/8) Epoch 9, batch 200, train_loss[loss=3.613, NarTop10Accuracy=0.5957, over 6750.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6536, over 3850.63 frames. ], batch size: 17, lr: 9.70e-03 +2024-08-06 16:14:44,509 INFO [trainer.py:765] (0/8) Epoch 9, batch 300, train_loss[loss=3.297, NarTop10Accuracy=0.6602, over 6960.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.652, over 4650.87 frames. ], batch size: 22, lr: 9.68e-03 +2024-08-06 16:15:14,915 INFO [trainer.py:765] (0/8) Epoch 9, batch 400, train_loss[loss=2.992, NarTop10Accuracy=0.7159, over 5001.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6555, over 5114.48 frames. ], batch size: 7, lr: 9.65e-03 +2024-08-06 16:15:50,337 INFO [trainer.py:765] (0/8) Epoch 9, batch 500, train_loss[loss=3.289, NarTop10Accuracy=0.6662, over 6246.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6565, over 5384.78 frames. ], batch size: 11, lr: 9.63e-03 +2024-08-06 16:16:23,973 INFO [trainer.py:765] (0/8) Epoch 9, batch 600, train_loss[loss=3.544, NarTop10Accuracy=0.6154, over 5769.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6597, over 5647.44 frames. ], batch size: 9, lr: 9.61e-03 +2024-08-06 16:16:57,146 INFO [trainer.py:765] (0/8) Epoch 9, batch 700, train_loss[loss=3.122, NarTop10Accuracy=0.6896, over 5103.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6557, over 5708.11 frames. ], batch size: 6, lr: 9.59e-03 +2024-08-06 16:17:32,053 INFO [trainer.py:765] (0/8) Epoch 9, batch 800, train_loss[loss=3.171, NarTop10Accuracy=0.6844, over 4950.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6493, over 5787.75 frames. ], batch size: 6, lr: 9.57e-03 +2024-08-06 16:18:07,816 INFO [trainer.py:765] (0/8) Epoch 9, batch 900, train_loss[loss=3.301, NarTop10Accuracy=0.6699, over 6228.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6495, over 5814.37 frames. ], batch size: 13, lr: 9.55e-03 +2024-08-06 16:18:39,345 INFO [trainer.py:765] (0/8) Epoch 9, batch 1000, train_loss[loss=3.087, NarTop10Accuracy=0.7181, over 6645.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6466, over 5916.90 frames. ], batch size: 14, lr: 9.53e-03 +2024-08-06 16:19:15,383 INFO [trainer.py:765] (0/8) Epoch 9, batch 1100, train_loss[loss=3.438, NarTop10Accuracy=0.6364, over 6729.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6455, over 5965.39 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 16:19:53,878 INFO [trainer.py:765] (0/8) Epoch 9, batch 1200, train_loss[loss=3.835, NarTop10Accuracy=0.5599, over 7203.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6435, over 5952.35 frames. ], batch size: 31, lr: 9.48e-03 +2024-08-06 16:20:24,907 INFO [trainer.py:765] (0/8) Epoch 9, batch 1300, train_loss[loss=3.219, NarTop10Accuracy=0.685, over 4956.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6432, over 6013.00 frames. ], batch size: 6, lr: 9.46e-03 +2024-08-06 16:20:56,581 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 16:21:04,483 INFO [trainer.py:811] (0/8) Epoch 9, validation: loss=3.266, NarTop10Accuracy=0.6725, over 1905321.00 frames. +2024-08-06 16:21:04,484 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 28825MB +2024-08-06 16:21:05,036 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 1.808e+02 1.967e+02 2.142e+02 6.126e+02, threshold=3.935e+02, percent-clipped=0.5 +2024-08-06 16:21:06,692 INFO [trainer.py:765] (0/8) Epoch 9, batch 1400, train_loss[loss=3.524, NarTop10Accuracy=0.6184, over 6204.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.643, over 6024.80 frames. ], batch size: 11, lr: 9.44e-03 +2024-08-06 16:21:38,896 INFO [trainer.py:765] (0/8) Epoch 9, batch 1500, train_loss[loss=3.36, NarTop10Accuracy=0.6545, over 5874.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6482, over 5962.18 frames. ], batch size: 50, lr: 9.42e-03 +2024-08-06 16:22:06,721 INFO [trainer.py:765] (0/8) Epoch 9, batch 1600, train_loss[loss=3.352, NarTop10Accuracy=0.6546, over 7119.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6505, over 5946.11 frames. ], batch size: 22, lr: 9.40e-03 +2024-08-06 16:22:33,470 INFO [trainer.py:765] (0/8) Epoch 9, batch 1700, train_loss[loss=3.434, NarTop10Accuracy=0.6378, over 6819.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6475, over 5926.66 frames. ], batch size: 14, lr: 9.38e-03 +2024-08-06 16:23:00,064 INFO [trainer.py:765] (0/8) Epoch 9, batch 1800, train_loss[loss=3.115, NarTop10Accuracy=0.6969, over 7035.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6496, over 5986.04 frames. ], batch size: 22, lr: 9.36e-03 +2024-08-06 16:23:26,783 INFO [trainer.py:765] (0/8) Epoch 9, batch 1900, train_loss[loss=3.387, NarTop10Accuracy=0.6559, over 6510.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6493, over 6033.34 frames. ], batch size: 50, lr: 9.34e-03 +2024-08-06 16:23:52,486 INFO [trainer.py:765] (0/8) Epoch 9, batch 2000, train_loss[loss=3.921, NarTop10Accuracy=0.5345, over 6045.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6486, over 5998.57 frames. ], batch size: 50, lr: 9.32e-03 +2024-08-06 16:24:17,963 INFO [trainer.py:765] (0/8) Epoch 9, batch 2100, train_loss[loss=3.191, NarTop10Accuracy=0.7008, over 4929.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6489, over 5976.14 frames. ], batch size: 5, lr: 9.30e-03 +2024-08-06 16:24:43,422 INFO [trainer.py:765] (0/8) Epoch 9, batch 2200, train_loss[loss=3.671, NarTop10Accuracy=0.5969, over 7083.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6484, over 6005.88 frames. ], batch size: 31, lr: 9.28e-03 +2024-08-06 16:25:08,721 INFO [trainer.py:765] (0/8) Epoch 9, batch 2300, train_loss[loss=3.428, NarTop10Accuracy=0.6407, over 5724.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6452, over 6014.34 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 16:25:33,164 INFO [trainer.py:765] (0/8) Epoch 9, batch 2400, train_loss[loss=3.146, NarTop10Accuracy=0.7004, over 5085.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6468, over 5766.76 frames. ], batch size: 7, lr: 9.25e-03 +2024-08-06 16:25:56,769 INFO [trainer.py:765] (0/8) Epoch 9, batch 2500, train_loss[loss=3.107, NarTop10Accuracy=0.6997, over 5046.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6523, over 5464.86 frames. ], batch size: 7, lr: 9.23e-03 +2024-08-06 16:26:16,445 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 16:26:16,447 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-9.pt +2024-08-06 16:27:19,584 INFO [trainer.py:765] (0/8) Epoch 10, batch 100, train_loss[loss=3.221, NarTop10Accuracy=0.6773, over 7293.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6512, over 2363.26 frames. ], batch size: 31, lr: 8.76e-03 +2024-08-06 16:27:52,628 INFO [trainer.py:765] (0/8) Epoch 10, batch 200, train_loss[loss=3.053, NarTop10Accuracy=0.7157, over 6903.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6546, over 3859.16 frames. ], batch size: 17, lr: 8.74e-03 +2024-08-06 16:28:23,057 INFO [trainer.py:765] (0/8) Epoch 10, batch 300, train_loss[loss=3.104, NarTop10Accuracy=0.7048, over 6993.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6543, over 4659.82 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 16:28:59,200 INFO [trainer.py:765] (0/8) Epoch 10, batch 400, train_loss[loss=3.181, NarTop10Accuracy=0.6848, over 5217.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6571, over 5091.27 frames. ], batch size: 7, lr: 8.71e-03 +2024-08-06 16:29:29,218 INFO [trainer.py:765] (0/8) Epoch 10, batch 500, train_loss[loss=3.105, NarTop10Accuracy=0.7084, over 6024.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6577, over 5366.72 frames. ], batch size: 11, lr: 8.69e-03 +2024-08-06 16:30:02,765 INFO [trainer.py:765] (0/8) Epoch 10, batch 600, train_loss[loss=3.286, NarTop10Accuracy=0.665, over 5859.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6575, over 5648.06 frames. ], batch size: 9, lr: 8.67e-03 +2024-08-06 16:30:34,265 INFO [trainer.py:765] (0/8) Epoch 10, batch 700, train_loss[loss=3.361, NarTop10Accuracy=0.6508, over 4338.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6568, over 5703.47 frames. ], batch size: 5, lr: 8.65e-03 +2024-08-06 16:31:09,843 INFO [trainer.py:765] (0/8) Epoch 10, batch 800, train_loss[loss=3.43, NarTop10Accuracy=0.6314, over 5013.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6549, over 5771.98 frames. ], batch size: 6, lr: 8.64e-03 +2024-08-06 16:31:16,258 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 16:31:24,565 INFO [trainer.py:811] (0/8) Epoch 10, validation: loss=3.184, NarTop10Accuracy=0.6898, over 1905321.00 frames. +2024-08-06 16:31:24,566 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 28825MB +2024-08-06 16:31:25,155 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.851e+02 2.012e+02 2.196e+02 4.599e+02, threshold=4.024e+02, percent-clipped=0.1 +2024-08-06 16:31:50,345 INFO [trainer.py:765] (0/8) Epoch 10, batch 900, train_loss[loss=3.155, NarTop10Accuracy=0.6927, over 6762.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.66, over 5783.23 frames. ], batch size: 14, lr: 8.62e-03 +2024-08-06 16:32:28,589 INFO [trainer.py:765] (0/8) Epoch 10, batch 1000, train_loss[loss=2.988, NarTop10Accuracy=0.7299, over 6312.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6592, over 5890.91 frames. ], batch size: 13, lr: 8.60e-03 +2024-08-06 16:33:06,376 INFO [trainer.py:765] (0/8) Epoch 10, batch 1100, train_loss[loss=3.091, NarTop10Accuracy=0.7033, over 7053.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6563, over 5917.42 frames. ], batch size: 17, lr: 8.59e-03 +2024-08-06 16:33:40,960 INFO [trainer.py:765] (0/8) Epoch 10, batch 1200, train_loss[loss=3.318, NarTop10Accuracy=0.6628, over 7338.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6572, over 5926.24 frames. ], batch size: 31, lr: 8.57e-03 +2024-08-06 16:34:16,170 INFO [trainer.py:765] (0/8) Epoch 10, batch 1300, train_loss[loss=3.183, NarTop10Accuracy=0.6867, over 5103.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6574, over 5993.91 frames. ], batch size: 6, lr: 8.55e-03 +2024-08-06 16:34:51,201 INFO [trainer.py:765] (0/8) Epoch 10, batch 1400, train_loss[loss=3.445, NarTop10Accuracy=0.637, over 6027.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6524, over 6014.34 frames. ], batch size: 11, lr: 8.54e-03 +2024-08-06 16:35:22,159 INFO [trainer.py:765] (0/8) Epoch 10, batch 1500, train_loss[loss=3.524, NarTop10Accuracy=0.6145, over 6420.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.657, over 5958.20 frames. ], batch size: 52, lr: 8.52e-03 +2024-08-06 16:35:50,136 INFO [trainer.py:765] (0/8) Epoch 10, batch 1600, train_loss[loss=3.666, NarTop10Accuracy=0.5903, over 7020.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6598, over 5942.72 frames. ], batch size: 22, lr: 8.50e-03 +2024-08-06 16:36:16,976 INFO [trainer.py:765] (0/8) Epoch 10, batch 1700, train_loss[loss=3.42, NarTop10Accuracy=0.6432, over 6741.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6579, over 5910.28 frames. ], batch size: 14, lr: 8.49e-03 +2024-08-06 16:36:43,647 INFO [trainer.py:765] (0/8) Epoch 10, batch 1800, train_loss[loss=3.13, NarTop10Accuracy=0.7, over 6837.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6591, over 5979.15 frames. ], batch size: 22, lr: 8.47e-03 +2024-08-06 16:37:10,290 INFO [trainer.py:765] (0/8) Epoch 10, batch 1900, train_loss[loss=3.23, NarTop10Accuracy=0.6844, over 5991.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6607, over 6008.35 frames. ], batch size: 50, lr: 8.45e-03 +2024-08-06 16:37:36,089 INFO [trainer.py:765] (0/8) Epoch 10, batch 2000, train_loss[loss=3.242, NarTop10Accuracy=0.6785, over 5847.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6614, over 5986.47 frames. ], batch size: 51, lr: 8.44e-03 +2024-08-06 16:38:01,649 INFO [trainer.py:765] (0/8) Epoch 10, batch 2100, train_loss[loss=3.566, NarTop10Accuracy=0.6161, over 4899.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6588, over 5953.88 frames. ], batch size: 5, lr: 8.42e-03 +2024-08-06 16:38:27,120 INFO [trainer.py:765] (0/8) Epoch 10, batch 2200, train_loss[loss=3.707, NarTop10Accuracy=0.5842, over 7170.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6571, over 5995.01 frames. ], batch size: 31, lr: 8.41e-03 +2024-08-06 16:38:52,447 INFO [trainer.py:765] (0/8) Epoch 10, batch 2300, train_loss[loss=3.056, NarTop10Accuracy=0.7226, over 5511.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6558, over 6023.48 frames. ], batch size: 9, lr: 8.39e-03 +2024-08-06 16:39:17,005 INFO [trainer.py:765] (0/8) Epoch 10, batch 2400, train_loss[loss=3.225, NarTop10Accuracy=0.6735, over 5259.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6617, over 5785.01 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 16:39:40,801 INFO [trainer.py:765] (0/8) Epoch 10, batch 2500, train_loss[loss=3.552, NarTop10Accuracy=0.6136, over 5343.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6665, over 5470.92 frames. ], batch size: 7, lr: 8.36e-03 +2024-08-06 16:40:00,398 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 16:40:00,401 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-10.pt +2024-08-06 16:41:06,235 INFO [trainer.py:765] (0/8) Epoch 11, batch 100, train_loss[loss=3.592, NarTop10Accuracy=0.6059, over 7398.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6552, over 2366.23 frames. ], batch size: 31, lr: 7.97e-03 +2024-08-06 16:41:39,022 INFO [trainer.py:765] (0/8) Epoch 11, batch 200, train_loss[loss=3.713, NarTop10Accuracy=0.588, over 6708.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.66, over 3869.78 frames. ], batch size: 17, lr: 7.95e-03 +2024-08-06 16:41:53,191 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 16:42:01,355 INFO [trainer.py:811] (0/8) Epoch 11, validation: loss=3.116, NarTop10Accuracy=0.7034, over 1905321.00 frames. +2024-08-06 16:42:01,356 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 28825MB +2024-08-06 16:42:01,879 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 1.889e+02 2.046e+02 2.249e+02 5.417e+02, threshold=4.093e+02, percent-clipped=0.2 +2024-08-06 16:42:17,975 INFO [trainer.py:765] (0/8) Epoch 11, batch 300, train_loss[loss=3.077, NarTop10Accuracy=0.7129, over 7062.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6666, over 4665.36 frames. ], batch size: 22, lr: 7.94e-03 +2024-08-06 16:42:55,154 INFO [trainer.py:765] (0/8) Epoch 11, batch 400, train_loss[loss=3.412, NarTop10Accuracy=0.6424, over 5142.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6675, over 5103.01 frames. ], batch size: 7, lr: 7.92e-03 +2024-08-06 16:43:25,719 INFO [trainer.py:765] (0/8) Epoch 11, batch 500, train_loss[loss=3.137, NarTop10Accuracy=0.6985, over 6051.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6678, over 5375.64 frames. ], batch size: 11, lr: 7.91e-03 +2024-08-06 16:44:02,242 INFO [trainer.py:765] (0/8) Epoch 11, batch 600, train_loss[loss=3.426, NarTop10Accuracy=0.6382, over 5781.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6671, over 5636.60 frames. ], batch size: 9, lr: 7.89e-03 +2024-08-06 16:44:35,716 INFO [trainer.py:765] (0/8) Epoch 11, batch 700, train_loss[loss=3.323, NarTop10Accuracy=0.6548, over 4233.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6674, over 5709.51 frames. ], batch size: 5, lr: 7.88e-03 +2024-08-06 16:45:10,468 INFO [trainer.py:765] (0/8) Epoch 11, batch 800, train_loss[loss=2.846, NarTop10Accuracy=0.7582, over 5133.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6646, over 5759.99 frames. ], batch size: 6, lr: 7.86e-03 +2024-08-06 16:45:46,457 INFO [trainer.py:765] (0/8) Epoch 11, batch 900, train_loss[loss=3.623, NarTop10Accuracy=0.5958, over 6300.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6647, over 5797.17 frames. ], batch size: 13, lr: 7.85e-03 +2024-08-06 16:46:20,312 INFO [trainer.py:765] (0/8) Epoch 11, batch 1000, train_loss[loss=3.297, NarTop10Accuracy=0.654, over 6336.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6655, over 5900.57 frames. ], batch size: 13, lr: 7.84e-03 +2024-08-06 16:46:53,456 INFO [trainer.py:765] (0/8) Epoch 11, batch 1100, train_loss[loss=2.974, NarTop10Accuracy=0.7241, over 6807.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.667, over 5927.17 frames. ], batch size: 17, lr: 7.82e-03 +2024-08-06 16:47:33,030 INFO [trainer.py:765] (0/8) Epoch 11, batch 1200, train_loss[loss=3.401, NarTop10Accuracy=0.6514, over 7329.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6659, over 5929.21 frames. ], batch size: 31, lr: 7.81e-03 +2024-08-06 16:48:06,482 INFO [trainer.py:765] (0/8) Epoch 11, batch 1300, train_loss[loss=2.863, NarTop10Accuracy=0.7491, over 4989.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6638, over 5996.78 frames. ], batch size: 6, lr: 7.79e-03 +2024-08-06 16:48:41,353 INFO [trainer.py:765] (0/8) Epoch 11, batch 1400, train_loss[loss=3.548, NarTop10Accuracy=0.6162, over 6045.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6594, over 6020.48 frames. ], batch size: 11, lr: 7.78e-03 +2024-08-06 16:49:09,345 INFO [trainer.py:765] (0/8) Epoch 11, batch 1500, train_loss[loss=3.266, NarTop10Accuracy=0.676, over 5991.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6586, over 5956.98 frames. ], batch size: 50, lr: 7.77e-03 +2024-08-06 16:49:37,103 INFO [trainer.py:765] (0/8) Epoch 11, batch 1600, train_loss[loss=3.256, NarTop10Accuracy=0.6747, over 7215.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6623, over 5921.96 frames. ], batch size: 22, lr: 7.75e-03 +2024-08-06 16:50:03,792 INFO [trainer.py:765] (0/8) Epoch 11, batch 1700, train_loss[loss=3.309, NarTop10Accuracy=0.6605, over 6237.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6648, over 5924.89 frames. ], batch size: 13, lr: 7.74e-03 +2024-08-06 16:50:30,353 INFO [trainer.py:765] (0/8) Epoch 11, batch 1800, train_loss[loss=3.462, NarTop10Accuracy=0.6467, over 7080.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6616, over 5992.79 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 16:50:56,821 INFO [trainer.py:765] (0/8) Epoch 11, batch 1900, train_loss[loss=3.72, NarTop10Accuracy=0.5857, over 6552.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6614, over 6034.18 frames. ], batch size: 51, lr: 7.71e-03 +2024-08-06 16:51:22,405 INFO [trainer.py:765] (0/8) Epoch 11, batch 2000, train_loss[loss=3.864, NarTop10Accuracy=0.5522, over 6198.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6626, over 5998.34 frames. ], batch size: 50, lr: 7.70e-03 +2024-08-06 16:51:47,794 INFO [trainer.py:765] (0/8) Epoch 11, batch 2100, train_loss[loss=2.945, NarTop10Accuracy=0.7423, over 3966.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6638, over 5949.04 frames. ], batch size: 4, lr: 7.68e-03 +2024-08-06 16:52:13,118 INFO [trainer.py:765] (0/8) Epoch 11, batch 2200, train_loss[loss=3.305, NarTop10Accuracy=0.6592, over 7518.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6649, over 5990.45 frames. ], batch size: 31, lr: 7.67e-03 +2024-08-06 16:52:23,899 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 16:52:32,079 INFO [trainer.py:811] (0/8) Epoch 11, validation: loss=3.101, NarTop10Accuracy=0.7058, over 1905321.00 frames. +2024-08-06 16:52:32,080 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 28825MB +2024-08-06 16:52:32,594 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.920e+02 2.088e+02 2.244e+02 3.599e+02, threshold=4.177e+02, percent-clipped=0.0 +2024-08-06 16:52:46,445 INFO [trainer.py:765] (0/8) Epoch 11, batch 2300, train_loss[loss=3.246, NarTop10Accuracy=0.6813, over 5652.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6622, over 6000.54 frames. ], batch size: 9, lr: 7.66e-03 +2024-08-06 16:53:10,887 INFO [trainer.py:765] (0/8) Epoch 11, batch 2400, train_loss[loss=3.466, NarTop10Accuracy=0.6391, over 4950.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6646, over 5769.98 frames. ], batch size: 7, lr: 7.64e-03 +2024-08-06 16:53:34,373 INFO [trainer.py:765] (0/8) Epoch 11, batch 2500, train_loss[loss=3.549, NarTop10Accuracy=0.6083, over 5103.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6659, over 5477.71 frames. ], batch size: 7, lr: 7.63e-03 +2024-08-06 16:53:54,213 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 16:53:54,216 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-11.pt +2024-08-06 16:54:58,526 INFO [trainer.py:765] (0/8) Epoch 12, batch 100, train_loss[loss=3.69, NarTop10Accuracy=0.5761, over 7134.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6663, over 2352.14 frames. ], batch size: 31, lr: 7.30e-03 +2024-08-06 16:55:32,432 INFO [trainer.py:765] (0/8) Epoch 12, batch 200, train_loss[loss=3.1, NarTop10Accuracy=0.7153, over 6738.00 frames. ], tot_loss[loss=3.265, NarTop10Accuracy=0.6737, over 3859.40 frames. ], batch size: 17, lr: 7.29e-03 +2024-08-06 16:56:05,096 INFO [trainer.py:765] (0/8) Epoch 12, batch 300, train_loss[loss=2.905, NarTop10Accuracy=0.7485, over 7275.00 frames. ], tot_loss[loss=3.245, NarTop10Accuracy=0.6776, over 4660.40 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 16:56:36,426 INFO [trainer.py:765] (0/8) Epoch 12, batch 400, train_loss[loss=3.057, NarTop10Accuracy=0.7139, over 5763.00 frames. ], tot_loss[loss=3.255, NarTop10Accuracy=0.675, over 5116.91 frames. ], batch size: 8, lr: 7.26e-03 +2024-08-06 16:57:10,503 INFO [trainer.py:765] (0/8) Epoch 12, batch 500, train_loss[loss=3.602, NarTop10Accuracy=0.5976, over 6147.00 frames. ], tot_loss[loss=3.273, NarTop10Accuracy=0.6715, over 5379.63 frames. ], batch size: 11, lr: 7.25e-03 +2024-08-06 16:57:45,484 INFO [trainer.py:765] (0/8) Epoch 12, batch 600, train_loss[loss=2.987, NarTop10Accuracy=0.7316, over 5889.00 frames. ], tot_loss[loss=3.268, NarTop10Accuracy=0.6727, over 5641.25 frames. ], batch size: 9, lr: 7.24e-03 +2024-08-06 16:58:17,005 INFO [trainer.py:765] (0/8) Epoch 12, batch 700, train_loss[loss=3.745, NarTop10Accuracy=0.5687, over 5064.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6689, over 5712.70 frames. ], batch size: 6, lr: 7.22e-03 +2024-08-06 16:58:53,469 INFO [trainer.py:765] (0/8) Epoch 12, batch 800, train_loss[loss=3.22, NarTop10Accuracy=0.6718, over 5142.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6676, over 5783.57 frames. ], batch size: 6, lr: 7.21e-03 +2024-08-06 16:59:27,206 INFO [trainer.py:765] (0/8) Epoch 12, batch 900, train_loss[loss=3.124, NarTop10Accuracy=0.7064, over 6657.00 frames. ], tot_loss[loss=3.271, NarTop10Accuracy=0.6718, over 5800.88 frames. ], batch size: 14, lr: 7.20e-03 +2024-08-06 17:00:01,574 INFO [trainer.py:765] (0/8) Epoch 12, batch 1000, train_loss[loss=2.989, NarTop10Accuracy=0.7335, over 6780.00 frames. ], tot_loss[loss=3.279, NarTop10Accuracy=0.6696, over 5888.50 frames. ], batch size: 14, lr: 7.19e-03 +2024-08-06 17:00:39,189 INFO [trainer.py:765] (0/8) Epoch 12, batch 1100, train_loss[loss=3.656, NarTop10Accuracy=0.5978, over 6717.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6654, over 5936.38 frames. ], batch size: 17, lr: 7.18e-03 +2024-08-06 17:01:13,964 INFO [trainer.py:765] (0/8) Epoch 12, batch 1200, train_loss[loss=3.025, NarTop10Accuracy=0.7252, over 7227.00 frames. ], tot_loss[loss=3.265, NarTop10Accuracy=0.6726, over 5940.55 frames. ], batch size: 31, lr: 7.17e-03 +2024-08-06 17:01:48,108 INFO [trainer.py:765] (0/8) Epoch 12, batch 1300, train_loss[loss=3.316, NarTop10Accuracy=0.6527, over 5007.00 frames. ], tot_loss[loss=3.28, NarTop10Accuracy=0.6694, over 6008.88 frames. ], batch size: 6, lr: 7.15e-03 +2024-08-06 17:02:22,324 INFO [trainer.py:765] (0/8) Epoch 12, batch 1400, train_loss[loss=3.541, NarTop10Accuracy=0.6185, over 6045.00 frames. ], tot_loss[loss=3.29, NarTop10Accuracy=0.6679, over 6035.59 frames. ], batch size: 11, lr: 7.14e-03 +2024-08-06 17:02:52,877 INFO [trainer.py:765] (0/8) Epoch 12, batch 1500, train_loss[loss=3.344, NarTop10Accuracy=0.6541, over 6192.00 frames. ], tot_loss[loss=3.268, NarTop10Accuracy=0.6723, over 5972.44 frames. ], batch size: 52, lr: 7.13e-03 +2024-08-06 17:03:20,691 INFO [trainer.py:765] (0/8) Epoch 12, batch 1600, train_loss[loss=3.304, NarTop10Accuracy=0.6642, over 7119.00 frames. ], tot_loss[loss=3.28, NarTop10Accuracy=0.6698, over 5939.78 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 17:03:38,297 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 17:03:46,473 INFO [trainer.py:811] (0/8) Epoch 12, validation: loss=3.054, NarTop10Accuracy=0.7153, over 1905321.00 frames. +2024-08-06 17:03:46,474 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 28949MB +2024-08-06 17:03:46,988 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.899e+02 2.078e+02 2.276e+02 5.455e+02, threshold=4.157e+02, percent-clipped=0.1 +2024-08-06 17:03:55,603 INFO [trainer.py:765] (0/8) Epoch 12, batch 1700, train_loss[loss=3.363, NarTop10Accuracy=0.6606, over 6192.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.6697, over 5917.76 frames. ], batch size: 13, lr: 7.11e-03 +2024-08-06 17:04:22,121 INFO [trainer.py:765] (0/8) Epoch 12, batch 1800, train_loss[loss=3.509, NarTop10Accuracy=0.6235, over 7122.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6686, over 5974.16 frames. ], batch size: 22, lr: 7.10e-03 +2024-08-06 17:04:48,591 INFO [trainer.py:765] (0/8) Epoch 12, batch 1900, train_loss[loss=3.305, NarTop10Accuracy=0.6615, over 6597.00 frames. ], tot_loss[loss=3.279, NarTop10Accuracy=0.6698, over 6021.40 frames. ], batch size: 52, lr: 7.08e-03 +2024-08-06 17:05:14,198 INFO [trainer.py:765] (0/8) Epoch 12, batch 2000, train_loss[loss=3.493, NarTop10Accuracy=0.6261, over 5973.00 frames. ], tot_loss[loss=3.27, NarTop10Accuracy=0.6719, over 5997.92 frames. ], batch size: 50, lr: 7.07e-03 +2024-08-06 17:05:39,468 INFO [trainer.py:765] (0/8) Epoch 12, batch 2100, train_loss[loss=3.326, NarTop10Accuracy=0.6645, over 4794.00 frames. ], tot_loss[loss=3.276, NarTop10Accuracy=0.6704, over 5978.98 frames. ], batch size: 5, lr: 7.06e-03 +2024-08-06 17:06:04,692 INFO [trainer.py:765] (0/8) Epoch 12, batch 2200, train_loss[loss=3.351, NarTop10Accuracy=0.6554, over 7260.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6686, over 5992.70 frames. ], batch size: 31, lr: 7.05e-03 +2024-08-06 17:06:29,847 INFO [trainer.py:765] (0/8) Epoch 12, batch 2300, train_loss[loss=3.358, NarTop10Accuracy=0.6432, over 5727.00 frames. ], tot_loss[loss=3.288, NarTop10Accuracy=0.6684, over 6016.18 frames. ], batch size: 9, lr: 7.04e-03 +2024-08-06 17:06:54,200 INFO [trainer.py:765] (0/8) Epoch 12, batch 2400, train_loss[loss=3.173, NarTop10Accuracy=0.7012, over 5196.00 frames. ], tot_loss[loss=3.272, NarTop10Accuracy=0.6714, over 5781.10 frames. ], batch size: 7, lr: 7.03e-03 +2024-08-06 17:07:17,646 INFO [trainer.py:765] (0/8) Epoch 12, batch 2500, train_loss[loss=3.141, NarTop10Accuracy=0.706, over 5184.00 frames. ], tot_loss[loss=3.255, NarTop10Accuracy=0.6748, over 5487.45 frames. ], batch size: 7, lr: 7.02e-03 +2024-08-06 17:07:37,728 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 17:07:37,732 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-12.pt +2024-08-06 17:08:40,079 INFO [trainer.py:765] (0/8) Epoch 13, batch 100, train_loss[loss=3.039, NarTop10Accuracy=0.732, over 7569.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6699, over 2358.13 frames. ], batch size: 31, lr: 6.73e-03 +2024-08-06 17:09:14,120 INFO [trainer.py:765] (0/8) Epoch 13, batch 200, train_loss[loss=2.976, NarTop10Accuracy=0.7335, over 6813.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6688, over 3861.03 frames. ], batch size: 17, lr: 6.72e-03 +2024-08-06 17:09:46,278 INFO [trainer.py:765] (0/8) Epoch 13, batch 300, train_loss[loss=3.642, NarTop10Accuracy=0.5985, over 7386.00 frames. ], tot_loss[loss=3.261, NarTop10Accuracy=0.6738, over 4668.77 frames. ], batch size: 23, lr: 6.71e-03 +2024-08-06 17:10:19,165 INFO [trainer.py:765] (0/8) Epoch 13, batch 400, train_loss[loss=2.929, NarTop10Accuracy=0.7463, over 5055.00 frames. ], tot_loss[loss=3.242, NarTop10Accuracy=0.6777, over 5102.25 frames. ], batch size: 7, lr: 6.70e-03 +2024-08-06 17:10:49,335 INFO [trainer.py:765] (0/8) Epoch 13, batch 500, train_loss[loss=3.14, NarTop10Accuracy=0.7012, over 6033.00 frames. ], tot_loss[loss=3.237, NarTop10Accuracy=0.6787, over 5379.48 frames. ], batch size: 11, lr: 6.69e-03 +2024-08-06 17:11:26,246 INFO [trainer.py:765] (0/8) Epoch 13, batch 600, train_loss[loss=3.057, NarTop10Accuracy=0.7165, over 5820.00 frames. ], tot_loss[loss=3.235, NarTop10Accuracy=0.679, over 5669.48 frames. ], batch size: 9, lr: 6.68e-03 +2024-08-06 17:11:57,382 INFO [trainer.py:765] (0/8) Epoch 13, batch 700, train_loss[loss=3.083, NarTop10Accuracy=0.703, over 4242.00 frames. ], tot_loss[loss=3.241, NarTop10Accuracy=0.6781, over 5727.25 frames. ], batch size: 5, lr: 6.67e-03 +2024-08-06 17:12:33,442 INFO [trainer.py:765] (0/8) Epoch 13, batch 800, train_loss[loss=2.877, NarTop10Accuracy=0.7537, over 4353.00 frames. ], tot_loss[loss=3.245, NarTop10Accuracy=0.6772, over 5768.78 frames. ], batch size: 5, lr: 6.66e-03 +2024-08-06 17:13:10,032 INFO [trainer.py:765] (0/8) Epoch 13, batch 900, train_loss[loss=3.237, NarTop10Accuracy=0.6854, over 6711.00 frames. ], tot_loss[loss=3.24, NarTop10Accuracy=0.6783, over 5800.80 frames. ], batch size: 14, lr: 6.65e-03 +2024-08-06 17:13:41,442 INFO [trainer.py:765] (0/8) Epoch 13, batch 1000, train_loss[loss=3.487, NarTop10Accuracy=0.6253, over 6738.00 frames. ], tot_loss[loss=3.234, NarTop10Accuracy=0.6792, over 5915.04 frames. ], batch size: 14, lr: 6.64e-03 +2024-08-06 17:14:15,537 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 17:14:23,644 INFO [trainer.py:811] (0/8) Epoch 13, validation: loss=3.099, NarTop10Accuracy=0.7062, over 1905321.00 frames. +2024-08-06 17:14:23,645 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 28949MB +2024-08-06 17:14:24,471 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 1.948e+02 2.091e+02 2.295e+02 3.353e+02, threshold=4.181e+02, percent-clipped=0.0 +2024-08-06 17:14:26,698 INFO [trainer.py:765] (0/8) Epoch 13, batch 1100, train_loss[loss=3.511, NarTop10Accuracy=0.62, over 6777.00 frames. ], tot_loss[loss=3.241, NarTop10Accuracy=0.6776, over 5925.35 frames. ], batch size: 17, lr: 6.63e-03 +2024-08-06 17:15:03,475 INFO [trainer.py:765] (0/8) Epoch 13, batch 1200, train_loss[loss=3.475, NarTop10Accuracy=0.6265, over 7149.00 frames. ], tot_loss[loss=3.252, NarTop10Accuracy=0.6752, over 5929.20 frames. ], batch size: 31, lr: 6.62e-03 +2024-08-06 17:15:35,514 INFO [trainer.py:765] (0/8) Epoch 13, batch 1300, train_loss[loss=2.82, NarTop10Accuracy=0.7671, over 4281.00 frames. ], tot_loss[loss=3.257, NarTop10Accuracy=0.6741, over 5997.50 frames. ], batch size: 5, lr: 6.61e-03 +2024-08-06 17:16:11,783 INFO [trainer.py:765] (0/8) Epoch 13, batch 1400, train_loss[loss=2.965, NarTop10Accuracy=0.7273, over 6072.00 frames. ], tot_loss[loss=3.259, NarTop10Accuracy=0.674, over 5998.95 frames. ], batch size: 11, lr: 6.60e-03 +2024-08-06 17:16:39,788 INFO [trainer.py:765] (0/8) Epoch 13, batch 1500, train_loss[loss=3.556, NarTop10Accuracy=0.61, over 6171.00 frames. ], tot_loss[loss=3.258, NarTop10Accuracy=0.6744, over 5962.24 frames. ], batch size: 52, lr: 6.59e-03 +2024-08-06 17:17:07,603 INFO [trainer.py:765] (0/8) Epoch 13, batch 1600, train_loss[loss=3.087, NarTop10Accuracy=0.714, over 6921.00 frames. ], tot_loss[loss=3.265, NarTop10Accuracy=0.6731, over 5946.39 frames. ], batch size: 22, lr: 6.58e-03 +2024-08-06 17:17:34,261 INFO [trainer.py:765] (0/8) Epoch 13, batch 1700, train_loss[loss=3.339, NarTop10Accuracy=0.6568, over 6204.00 frames. ], tot_loss[loss=3.265, NarTop10Accuracy=0.6727, over 5939.43 frames. ], batch size: 13, lr: 6.57e-03 +2024-08-06 17:18:00,762 INFO [trainer.py:765] (0/8) Epoch 13, batch 1800, train_loss[loss=3.147, NarTop10Accuracy=0.7004, over 7143.00 frames. ], tot_loss[loss=3.256, NarTop10Accuracy=0.6747, over 6005.28 frames. ], batch size: 22, lr: 6.56e-03 +2024-08-06 17:18:27,244 INFO [trainer.py:765] (0/8) Epoch 13, batch 1900, train_loss[loss=3.611, NarTop10Accuracy=0.6019, over 5484.00 frames. ], tot_loss[loss=3.256, NarTop10Accuracy=0.6751, over 6039.69 frames. ], batch size: 50, lr: 6.55e-03 +2024-08-06 17:18:52,779 INFO [trainer.py:765] (0/8) Epoch 13, batch 2000, train_loss[loss=3.529, NarTop10Accuracy=0.6254, over 5853.00 frames. ], tot_loss[loss=3.239, NarTop10Accuracy=0.6788, over 5991.69 frames. ], batch size: 50, lr: 6.54e-03 +2024-08-06 17:19:18,148 INFO [trainer.py:765] (0/8) Epoch 13, batch 2100, train_loss[loss=2.864, NarTop10Accuracy=0.7589, over 4788.00 frames. ], tot_loss[loss=3.235, NarTop10Accuracy=0.6793, over 5972.48 frames. ], batch size: 5, lr: 6.53e-03 +2024-08-06 17:19:43,412 INFO [trainer.py:765] (0/8) Epoch 13, batch 2200, train_loss[loss=3.385, NarTop10Accuracy=0.6444, over 7482.00 frames. ], tot_loss[loss=3.248, NarTop10Accuracy=0.6766, over 5996.03 frames. ], batch size: 31, lr: 6.52e-03 +2024-08-06 17:20:08,543 INFO [trainer.py:765] (0/8) Epoch 13, batch 2300, train_loss[loss=3.626, NarTop10Accuracy=0.6013, over 5571.00 frames. ], tot_loss[loss=3.263, NarTop10Accuracy=0.6734, over 6031.19 frames. ], batch size: 9, lr: 6.51e-03 +2024-08-06 17:20:32,940 INFO [trainer.py:765] (0/8) Epoch 13, batch 2400, train_loss[loss=3.568, NarTop10Accuracy=0.6098, over 5253.00 frames. ], tot_loss[loss=3.237, NarTop10Accuracy=0.6785, over 5789.09 frames. ], batch size: 7, lr: 6.50e-03 +2024-08-06 17:20:56,409 INFO [trainer.py:765] (0/8) Epoch 13, batch 2500, train_loss[loss=3.547, NarTop10Accuracy=0.6164, over 5034.00 frames. ], tot_loss[loss=3.22, NarTop10Accuracy=0.6815, over 5483.74 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 17:21:16,347 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 17:21:16,350 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-13.pt +2024-08-06 17:22:19,316 INFO [trainer.py:765] (0/8) Epoch 14, batch 100, train_loss[loss=3.075, NarTop10Accuracy=0.7211, over 7191.00 frames. ], tot_loss[loss=3.219, NarTop10Accuracy=0.6835, over 2371.11 frames. ], batch size: 31, lr: 6.24e-03 +2024-08-06 17:22:50,379 INFO [trainer.py:765] (0/8) Epoch 14, batch 200, train_loss[loss=3.243, NarTop10Accuracy=0.6733, over 6819.00 frames. ], tot_loss[loss=3.234, NarTop10Accuracy=0.6799, over 3850.18 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 17:23:23,881 INFO [trainer.py:765] (0/8) Epoch 14, batch 300, train_loss[loss=3.097, NarTop10Accuracy=0.7019, over 7062.00 frames. ], tot_loss[loss=3.207, NarTop10Accuracy=0.6853, over 4675.03 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 17:23:57,485 INFO [trainer.py:765] (0/8) Epoch 14, batch 400, train_loss[loss=2.941, NarTop10Accuracy=0.7354, over 5082.00 frames. ], tot_loss[loss=3.226, NarTop10Accuracy=0.6813, over 5136.42 frames. ], batch size: 7, lr: 6.22e-03 +2024-08-06 17:24:32,115 INFO [trainer.py:765] (0/8) Epoch 14, batch 500, train_loss[loss=3.21, NarTop10Accuracy=0.6837, over 6105.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6797, over 5397.84 frames. ], batch size: 11, lr: 6.21e-03 +2024-08-06 17:24:36,214 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 17:24:44,275 INFO [trainer.py:811] (0/8) Epoch 14, validation: loss=3.004, NarTop10Accuracy=0.726, over 1905321.00 frames. +2024-08-06 17:24:44,275 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 28949MB +2024-08-06 17:24:44,823 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 1.969e+02 2.114e+02 2.287e+02 4.406e+02, threshold=4.227e+02, percent-clipped=0.1 +2024-08-06 17:25:12,915 INFO [trainer.py:765] (0/8) Epoch 14, batch 600, train_loss[loss=3.002, NarTop10Accuracy=0.7339, over 5832.00 frames. ], tot_loss[loss=3.238, NarTop10Accuracy=0.6785, over 5648.08 frames. ], batch size: 9, lr: 6.20e-03 +2024-08-06 17:25:48,548 INFO [trainer.py:765] (0/8) Epoch 14, batch 700, train_loss[loss=3.524, NarTop10Accuracy=0.6203, over 4995.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.6814, over 5718.15 frames. ], batch size: 6, lr: 6.19e-03 +2024-08-06 17:26:25,279 INFO [trainer.py:765] (0/8) Epoch 14, batch 800, train_loss[loss=2.8, NarTop10Accuracy=0.7669, over 5073.00 frames. ], tot_loss[loss=3.209, NarTop10Accuracy=0.6843, over 5776.56 frames. ], batch size: 6, lr: 6.18e-03 +2024-08-06 17:26:57,663 INFO [trainer.py:765] (0/8) Epoch 14, batch 900, train_loss[loss=3.226, NarTop10Accuracy=0.6717, over 6231.00 frames. ], tot_loss[loss=3.21, NarTop10Accuracy=0.6838, over 5803.29 frames. ], batch size: 13, lr: 6.17e-03 +2024-08-06 17:27:31,717 INFO [trainer.py:765] (0/8) Epoch 14, batch 1000, train_loss[loss=3.389, NarTop10Accuracy=0.6431, over 6228.00 frames. ], tot_loss[loss=3.226, NarTop10Accuracy=0.6803, over 5903.49 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 17:28:11,597 INFO [trainer.py:765] (0/8) Epoch 14, batch 1100, train_loss[loss=2.999, NarTop10Accuracy=0.7347, over 6900.00 frames. ], tot_loss[loss=3.222, NarTop10Accuracy=0.6814, over 5946.06 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 17:28:40,734 INFO [trainer.py:765] (0/8) Epoch 14, batch 1200, train_loss[loss=3.47, NarTop10Accuracy=0.6276, over 7509.00 frames. ], tot_loss[loss=3.221, NarTop10Accuracy=0.6815, over 5950.46 frames. ], batch size: 32, lr: 6.15e-03 +2024-08-06 17:29:16,214 INFO [trainer.py:765] (0/8) Epoch 14, batch 1300, train_loss[loss=3.515, NarTop10Accuracy=0.6237, over 5064.00 frames. ], tot_loss[loss=3.22, NarTop10Accuracy=0.6818, over 5985.62 frames. ], batch size: 6, lr: 6.14e-03 +2024-08-06 17:29:54,603 INFO [trainer.py:765] (0/8) Epoch 14, batch 1400, train_loss[loss=3.356, NarTop10Accuracy=0.6543, over 6006.00 frames. ], tot_loss[loss=3.234, NarTop10Accuracy=0.6791, over 6009.42 frames. ], batch size: 11, lr: 6.13e-03 +2024-08-06 17:30:25,315 INFO [trainer.py:765] (0/8) Epoch 14, batch 1500, train_loss[loss=3.739, NarTop10Accuracy=0.576, over 6477.00 frames. ], tot_loss[loss=3.242, NarTop10Accuracy=0.6774, over 5964.93 frames. ], batch size: 50, lr: 6.12e-03 +2024-08-06 17:30:53,043 INFO [trainer.py:765] (0/8) Epoch 14, batch 1600, train_loss[loss=3.009, NarTop10Accuracy=0.7205, over 7251.00 frames. ], tot_loss[loss=3.227, NarTop10Accuracy=0.6804, over 5937.09 frames. ], batch size: 23, lr: 6.11e-03 +2024-08-06 17:31:19,728 INFO [trainer.py:765] (0/8) Epoch 14, batch 1700, train_loss[loss=3.235, NarTop10Accuracy=0.6789, over 6180.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.6843, over 5919.55 frames. ], batch size: 13, lr: 6.10e-03 +2024-08-06 17:31:46,289 INFO [trainer.py:765] (0/8) Epoch 14, batch 1800, train_loss[loss=3.033, NarTop10Accuracy=0.7301, over 7149.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6883, over 5971.12 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 17:32:12,727 INFO [trainer.py:765] (0/8) Epoch 14, batch 1900, train_loss[loss=3.694, NarTop10Accuracy=0.585, over 5901.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.6858, over 6011.86 frames. ], batch size: 50, lr: 6.09e-03 +2024-08-06 17:32:38,283 INFO [trainer.py:765] (0/8) Epoch 14, batch 2000, train_loss[loss=3.303, NarTop10Accuracy=0.662, over 6213.00 frames. ], tot_loss[loss=3.212, NarTop10Accuracy=0.6834, over 5985.57 frames. ], batch size: 51, lr: 6.08e-03 +2024-08-06 17:33:03,646 INFO [trainer.py:765] (0/8) Epoch 14, batch 2100, train_loss[loss=3.046, NarTop10Accuracy=0.7126, over 3948.00 frames. ], tot_loss[loss=3.22, NarTop10Accuracy=0.6817, over 5973.14 frames. ], batch size: 4, lr: 6.07e-03 +2024-08-06 17:33:28,999 INFO [trainer.py:765] (0/8) Epoch 14, batch 2200, train_loss[loss=3.24, NarTop10Accuracy=0.6791, over 7212.00 frames. ], tot_loss[loss=3.217, NarTop10Accuracy=0.6824, over 6011.55 frames. ], batch size: 31, lr: 6.06e-03 +2024-08-06 17:33:54,087 INFO [trainer.py:765] (0/8) Epoch 14, batch 2300, train_loss[loss=2.853, NarTop10Accuracy=0.7613, over 5661.00 frames. ], tot_loss[loss=3.235, NarTop10Accuracy=0.6792, over 6035.11 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 17:34:18,534 INFO [trainer.py:765] (0/8) Epoch 14, batch 2400, train_loss[loss=3.087, NarTop10Accuracy=0.7075, over 5718.00 frames. ], tot_loss[loss=3.237, NarTop10Accuracy=0.6783, over 5783.32 frames. ], batch size: 8, lr: 6.04e-03 +2024-08-06 17:34:42,116 INFO [trainer.py:765] (0/8) Epoch 14, batch 2500, train_loss[loss=2.719, NarTop10Accuracy=0.7818, over 5022.00 frames. ], tot_loss[loss=3.206, NarTop10Accuracy=0.685, over 5468.76 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:45,395 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 17:34:53,209 INFO [trainer.py:811] (0/8) Epoch 14, validation: loss=3.062, NarTop10Accuracy=0.7136, over 1905321.00 frames. +2024-08-06 17:34:53,209 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 17:34:53,680 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 1.975e+02 2.132e+02 2.304e+02 3.875e+02, threshold=4.265e+02, percent-clipped=0.0 +2024-08-06 17:35:09,685 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 17:35:09,688 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-14.pt +2024-08-06 17:36:11,739 INFO [trainer.py:765] (0/8) Epoch 15, batch 100, train_loss[loss=3.001, NarTop10Accuracy=0.7215, over 7086.00 frames. ], tot_loss[loss=3.22, NarTop10Accuracy=0.6817, over 2365.35 frames. ], batch size: 31, lr: 5.82e-03 +2024-08-06 17:36:44,335 INFO [trainer.py:765] (0/8) Epoch 15, batch 200, train_loss[loss=3.455, NarTop10Accuracy=0.6308, over 6765.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6868, over 3850.82 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 17:37:17,715 INFO [trainer.py:765] (0/8) Epoch 15, batch 300, train_loss[loss=3.418, NarTop10Accuracy=0.6403, over 7209.00 frames. ], tot_loss[loss=3.198, NarTop10Accuracy=0.6861, over 4661.39 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 17:37:48,904 INFO [trainer.py:765] (0/8) Epoch 15, batch 400, train_loss[loss=3.091, NarTop10Accuracy=0.7085, over 5091.00 frames. ], tot_loss[loss=3.187, NarTop10Accuracy=0.6887, over 5099.78 frames. ], batch size: 7, lr: 5.80e-03 +2024-08-06 17:38:22,354 INFO [trainer.py:765] (0/8) Epoch 15, batch 500, train_loss[loss=2.905, NarTop10Accuracy=0.7441, over 5946.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6874, over 5379.46 frames. ], batch size: 11, lr: 5.79e-03 +2024-08-06 17:38:53,094 INFO [trainer.py:765] (0/8) Epoch 15, batch 600, train_loss[loss=3, NarTop10Accuracy=0.7298, over 5712.00 frames. ], tot_loss[loss=3.206, NarTop10Accuracy=0.6848, over 5653.20 frames. ], batch size: 9, lr: 5.78e-03 +2024-08-06 17:39:27,922 INFO [trainer.py:765] (0/8) Epoch 15, batch 700, train_loss[loss=2.812, NarTop10Accuracy=0.7682, over 5070.00 frames. ], tot_loss[loss=3.209, NarTop10Accuracy=0.684, over 5715.95 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 17:40:05,565 INFO [trainer.py:765] (0/8) Epoch 15, batch 800, train_loss[loss=3.469, NarTop10Accuracy=0.6243, over 4227.00 frames. ], tot_loss[loss=3.228, NarTop10Accuracy=0.6802, over 5762.62 frames. ], batch size: 5, lr: 5.76e-03 +2024-08-06 17:40:35,791 INFO [trainer.py:765] (0/8) Epoch 15, batch 900, train_loss[loss=3.392, NarTop10Accuracy=0.6436, over 6594.00 frames. ], tot_loss[loss=3.211, NarTop10Accuracy=0.6835, over 5800.27 frames. ], batch size: 14, lr: 5.76e-03 +2024-08-06 17:41:11,251 INFO [trainer.py:765] (0/8) Epoch 15, batch 1000, train_loss[loss=3.18, NarTop10Accuracy=0.6969, over 6138.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.6862, over 5910.69 frames. ], batch size: 13, lr: 5.75e-03 +2024-08-06 17:41:46,452 INFO [trainer.py:765] (0/8) Epoch 15, batch 1100, train_loss[loss=3.111, NarTop10Accuracy=0.7018, over 6825.00 frames. ], tot_loss[loss=3.201, NarTop10Accuracy=0.6861, over 5961.40 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 17:42:19,456 INFO [trainer.py:765] (0/8) Epoch 15, batch 1200, train_loss[loss=3.394, NarTop10Accuracy=0.6501, over 7278.00 frames. ], tot_loss[loss=3.227, NarTop10Accuracy=0.6805, over 5941.87 frames. ], batch size: 31, lr: 5.73e-03 +2024-08-06 17:42:54,428 INFO [trainer.py:765] (0/8) Epoch 15, batch 1300, train_loss[loss=3.062, NarTop10Accuracy=0.7173, over 5103.00 frames. ], tot_loss[loss=3.21, NarTop10Accuracy=0.6842, over 5995.19 frames. ], batch size: 6, lr: 5.73e-03 +2024-08-06 17:43:26,608 INFO [trainer.py:765] (0/8) Epoch 15, batch 1400, train_loss[loss=3.447, NarTop10Accuracy=0.6329, over 6180.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.6812, over 6018.80 frames. ], batch size: 11, lr: 5.72e-03 +2024-08-06 17:43:56,558 INFO [trainer.py:765] (0/8) Epoch 15, batch 1500, train_loss[loss=3.126, NarTop10Accuracy=0.6993, over 6267.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.6809, over 5934.90 frames. ], batch size: 51, lr: 5.71e-03 +2024-08-06 17:44:24,242 INFO [trainer.py:765] (0/8) Epoch 15, batch 1600, train_loss[loss=3.585, NarTop10Accuracy=0.6123, over 6951.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.6857, over 5910.68 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 17:44:50,856 INFO [trainer.py:765] (0/8) Epoch 15, batch 1700, train_loss[loss=2.926, NarTop10Accuracy=0.7407, over 6141.00 frames. ], tot_loss[loss=3.191, NarTop10Accuracy=0.6874, over 5893.57 frames. ], batch size: 13, lr: 5.70e-03 +2024-08-06 17:45:17,294 INFO [trainer.py:765] (0/8) Epoch 15, batch 1800, train_loss[loss=3.281, NarTop10Accuracy=0.6724, over 7119.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6879, over 5985.83 frames. ], batch size: 22, lr: 5.69e-03 +2024-08-06 17:45:43,679 INFO [trainer.py:765] (0/8) Epoch 15, batch 1900, train_loss[loss=3.112, NarTop10Accuracy=0.7147, over 6072.00 frames. ], tot_loss[loss=3.214, NarTop10Accuracy=0.6829, over 6032.65 frames. ], batch size: 50, lr: 5.68e-03 +2024-08-06 17:45:53,542 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 17:46:01,742 INFO [trainer.py:811] (0/8) Epoch 15, validation: loss=3.006, NarTop10Accuracy=0.725, over 1905321.00 frames. +2024-08-06 17:46:01,743 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 17:46:02,217 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.004e+02 2.149e+02 2.324e+02 3.721e+02, threshold=4.298e+02, percent-clipped=0.0 +2024-08-06 17:46:17,372 INFO [trainer.py:765] (0/8) Epoch 15, batch 2000, train_loss[loss=3.275, NarTop10Accuracy=0.676, over 6285.00 frames. ], tot_loss[loss=3.205, NarTop10Accuracy=0.6848, over 6001.58 frames. ], batch size: 50, lr: 5.67e-03 +2024-08-06 17:46:42,773 INFO [trainer.py:765] (0/8) Epoch 15, batch 2100, train_loss[loss=3.199, NarTop10Accuracy=0.6903, over 3942.00 frames. ], tot_loss[loss=3.199, NarTop10Accuracy=0.6862, over 5973.99 frames. ], batch size: 4, lr: 5.67e-03 +2024-08-06 17:47:08,033 INFO [trainer.py:765] (0/8) Epoch 15, batch 2200, train_loss[loss=3.069, NarTop10Accuracy=0.7179, over 7128.00 frames. ], tot_loss[loss=3.204, NarTop10Accuracy=0.6851, over 6017.02 frames. ], batch size: 31, lr: 5.66e-03 +2024-08-06 17:47:33,291 INFO [trainer.py:765] (0/8) Epoch 15, batch 2300, train_loss[loss=3.414, NarTop10Accuracy=0.6349, over 5559.00 frames. ], tot_loss[loss=3.206, NarTop10Accuracy=0.6847, over 6016.67 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 17:47:57,640 INFO [trainer.py:765] (0/8) Epoch 15, batch 2400, train_loss[loss=3.148, NarTop10Accuracy=0.6963, over 5124.00 frames. ], tot_loss[loss=3.186, NarTop10Accuracy=0.6891, over 5778.05 frames. ], batch size: 7, lr: 5.65e-03 +2024-08-06 17:48:21,162 INFO [trainer.py:765] (0/8) Epoch 15, batch 2500, train_loss[loss=2.822, NarTop10Accuracy=0.7628, over 5163.00 frames. ], tot_loss[loss=3.162, NarTop10Accuracy=0.6938, over 5488.94 frames. ], batch size: 7, lr: 5.64e-03 +2024-08-06 17:48:41,347 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 17:48:41,350 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-15.pt +2024-08-06 17:49:41,222 INFO [trainer.py:765] (0/8) Epoch 16, batch 100, train_loss[loss=3.483, NarTop10Accuracy=0.636, over 7254.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6959, over 2364.04 frames. ], batch size: 32, lr: 5.45e-03 +2024-08-06 17:50:12,158 INFO [trainer.py:765] (0/8) Epoch 16, batch 200, train_loss[loss=2.956, NarTop10Accuracy=0.742, over 6765.00 frames. ], tot_loss[loss=3.198, NarTop10Accuracy=0.6865, over 3859.22 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 17:50:45,160 INFO [trainer.py:765] (0/8) Epoch 16, batch 300, train_loss[loss=3.146, NarTop10Accuracy=0.697, over 7098.00 frames. ], tot_loss[loss=3.18, NarTop10Accuracy=0.69, over 4659.62 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 17:51:15,977 INFO [trainer.py:765] (0/8) Epoch 16, batch 400, train_loss[loss=3.31, NarTop10Accuracy=0.6672, over 5163.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6877, over 5123.84 frames. ], batch size: 7, lr: 5.43e-03 +2024-08-06 17:51:50,324 INFO [trainer.py:765] (0/8) Epoch 16, batch 500, train_loss[loss=2.946, NarTop10Accuracy=0.7386, over 6189.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.6907, over 5395.53 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 17:52:24,252 INFO [trainer.py:765] (0/8) Epoch 16, batch 600, train_loss[loss=2.906, NarTop10Accuracy=0.7454, over 5748.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.6877, over 5652.25 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 17:52:55,388 INFO [trainer.py:765] (0/8) Epoch 16, batch 700, train_loss[loss=2.916, NarTop10Accuracy=0.7491, over 4317.00 frames. ], tot_loss[loss=3.186, NarTop10Accuracy=0.6885, over 5705.25 frames. ], batch size: 5, lr: 5.41e-03 +2024-08-06 17:53:33,816 INFO [trainer.py:765] (0/8) Epoch 16, batch 800, train_loss[loss=3.505, NarTop10Accuracy=0.621, over 4359.00 frames. ], tot_loss[loss=3.184, NarTop10Accuracy=0.6895, over 5782.06 frames. ], batch size: 5, lr: 5.40e-03 +2024-08-06 17:54:03,924 INFO [trainer.py:765] (0/8) Epoch 16, batch 900, train_loss[loss=3.433, NarTop10Accuracy=0.6375, over 6750.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6921, over 5805.50 frames. ], batch size: 14, lr: 5.39e-03 +2024-08-06 17:54:37,608 INFO [trainer.py:765] (0/8) Epoch 16, batch 1000, train_loss[loss=3.001, NarTop10Accuracy=0.7229, over 6210.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6944, over 5910.27 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 17:55:17,197 INFO [trainer.py:765] (0/8) Epoch 16, batch 1100, train_loss[loss=3.136, NarTop10Accuracy=0.6959, over 6990.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.6871, over 5936.22 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 17:55:46,210 INFO [trainer.py:765] (0/8) Epoch 16, batch 1200, train_loss[loss=3.473, NarTop10Accuracy=0.6271, over 7092.00 frames. ], tot_loss[loss=3.197, NarTop10Accuracy=0.6865, over 5928.04 frames. ], batch size: 31, lr: 5.37e-03 +2024-08-06 17:56:22,776 INFO [trainer.py:765] (0/8) Epoch 16, batch 1300, train_loss[loss=3.351, NarTop10Accuracy=0.6526, over 4332.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6878, over 5994.89 frames. ], batch size: 5, lr: 5.37e-03 +2024-08-06 17:56:44,649 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 17:56:53,428 INFO [trainer.py:811] (0/8) Epoch 16, validation: loss=3.112, NarTop10Accuracy=0.703, over 1905321.00 frames. +2024-08-06 17:56:53,429 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 17:56:54,007 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 1.974e+02 2.136e+02 2.310e+02 5.351e+02, threshold=4.271e+02, percent-clipped=0.2 +2024-08-06 17:57:06,172 INFO [trainer.py:765] (0/8) Epoch 16, batch 1400, train_loss[loss=3.13, NarTop10Accuracy=0.7066, over 6018.00 frames. ], tot_loss[loss=3.187, NarTop10Accuracy=0.6885, over 6018.44 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 17:57:34,034 INFO [trainer.py:765] (0/8) Epoch 16, batch 1500, train_loss[loss=3.337, NarTop10Accuracy=0.6617, over 6204.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6884, over 5957.50 frames. ], batch size: 50, lr: 5.35e-03 +2024-08-06 17:58:01,775 INFO [trainer.py:765] (0/8) Epoch 16, batch 1600, train_loss[loss=3.097, NarTop10Accuracy=0.7113, over 6999.00 frames. ], tot_loss[loss=3.182, NarTop10Accuracy=0.6897, over 5927.64 frames. ], batch size: 22, lr: 5.35e-03 +2024-08-06 17:58:28,475 INFO [trainer.py:765] (0/8) Epoch 16, batch 1700, train_loss[loss=2.948, NarTop10Accuracy=0.7383, over 6663.00 frames. ], tot_loss[loss=3.195, NarTop10Accuracy=0.6867, over 5921.45 frames. ], batch size: 14, lr: 5.34e-03 +2024-08-06 17:58:54,976 INFO [trainer.py:765] (0/8) Epoch 16, batch 1800, train_loss[loss=3.077, NarTop10Accuracy=0.7017, over 7164.00 frames. ], tot_loss[loss=3.182, NarTop10Accuracy=0.6896, over 5984.18 frames. ], batch size: 22, lr: 5.33e-03 +2024-08-06 17:59:21,360 INFO [trainer.py:765] (0/8) Epoch 16, batch 1900, train_loss[loss=3.43, NarTop10Accuracy=0.6393, over 6300.00 frames. ], tot_loss[loss=3.211, NarTop10Accuracy=0.6839, over 6025.45 frames. ], batch size: 50, lr: 5.33e-03 +2024-08-06 17:59:46,857 INFO [trainer.py:765] (0/8) Epoch 16, batch 2000, train_loss[loss=3.112, NarTop10Accuracy=0.7041, over 6258.00 frames. ], tot_loss[loss=3.176, NarTop10Accuracy=0.6909, over 5998.99 frames. ], batch size: 50, lr: 5.32e-03 +2024-08-06 18:00:12,117 INFO [trainer.py:765] (0/8) Epoch 16, batch 2100, train_loss[loss=3.589, NarTop10Accuracy=0.6167, over 4869.00 frames. ], tot_loss[loss=3.202, NarTop10Accuracy=0.6852, over 5966.16 frames. ], batch size: 5, lr: 5.32e-03 +2024-08-06 18:00:37,333 INFO [trainer.py:765] (0/8) Epoch 16, batch 2200, train_loss[loss=3.335, NarTop10Accuracy=0.6617, over 7338.00 frames. ], tot_loss[loss=3.212, NarTop10Accuracy=0.6832, over 5994.09 frames. ], batch size: 31, lr: 5.31e-03 +2024-08-06 18:01:02,502 INFO [trainer.py:765] (0/8) Epoch 16, batch 2300, train_loss[loss=2.908, NarTop10Accuracy=0.7404, over 5757.00 frames. ], tot_loss[loss=3.212, NarTop10Accuracy=0.6833, over 6017.76 frames. ], batch size: 9, lr: 5.30e-03 +2024-08-06 18:01:26,883 INFO [trainer.py:765] (0/8) Epoch 16, batch 2400, train_loss[loss=2.972, NarTop10Accuracy=0.7393, over 5112.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6876, over 5777.34 frames. ], batch size: 7, lr: 5.30e-03 +2024-08-06 18:01:50,406 INFO [trainer.py:765] (0/8) Epoch 16, batch 2500, train_loss[loss=3.062, NarTop10Accuracy=0.7069, over 5130.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6928, over 5459.22 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 18:02:11,228 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 18:02:11,233 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-16.pt +2024-08-06 18:03:08,531 INFO [trainer.py:765] (0/8) Epoch 17, batch 100, train_loss[loss=3.107, NarTop10Accuracy=0.7048, over 7176.00 frames. ], tot_loss[loss=3.139, NarTop10Accuracy=0.6986, over 2371.95 frames. ], batch size: 31, lr: 5.12e-03 +2024-08-06 18:03:45,146 INFO [trainer.py:765] (0/8) Epoch 17, batch 200, train_loss[loss=3.44, NarTop10Accuracy=0.6419, over 6897.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6971, over 3859.46 frames. ], batch size: 17, lr: 5.12e-03 +2024-08-06 18:04:19,591 INFO [trainer.py:765] (0/8) Epoch 17, batch 300, train_loss[loss=3.26, NarTop10Accuracy=0.6687, over 7083.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6927, over 4651.45 frames. ], batch size: 22, lr: 5.11e-03 +2024-08-06 18:04:48,402 INFO [trainer.py:765] (0/8) Epoch 17, batch 400, train_loss[loss=3.361, NarTop10Accuracy=0.6498, over 5007.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6929, over 5081.18 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 18:05:24,681 INFO [trainer.py:765] (0/8) Epoch 17, batch 500, train_loss[loss=2.869, NarTop10Accuracy=0.7504, over 6006.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6956, over 5371.02 frames. ], batch size: 11, lr: 5.10e-03 +2024-08-06 18:05:58,740 INFO [trainer.py:765] (0/8) Epoch 17, batch 600, train_loss[loss=3.145, NarTop10Accuracy=0.7011, over 5781.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6923, over 5647.89 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 18:06:32,476 INFO [trainer.py:765] (0/8) Epoch 17, batch 700, train_loss[loss=3.03, NarTop10Accuracy=0.72, over 4950.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6926, over 5716.50 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 18:07:02,726 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 18:07:10,763 INFO [trainer.py:811] (0/8) Epoch 17, validation: loss=3.018, NarTop10Accuracy=0.7223, over 1905321.00 frames. +2024-08-06 18:07:10,764 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 18:07:11,312 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.005e+02 2.161e+02 2.341e+02 3.806e+02, threshold=4.323e+02, percent-clipped=0.0 +2024-08-06 18:07:14,354 INFO [trainer.py:765] (0/8) Epoch 17, batch 800, train_loss[loss=3.117, NarTop10Accuracy=0.6991, over 5052.00 frames. ], tot_loss[loss=3.181, NarTop10Accuracy=0.6901, over 5775.79 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 18:07:49,722 INFO [trainer.py:765] (0/8) Epoch 17, batch 900, train_loss[loss=3.445, NarTop10Accuracy=0.6338, over 6261.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6946, over 5787.70 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 18:08:21,598 INFO [trainer.py:765] (0/8) Epoch 17, batch 1000, train_loss[loss=3.29, NarTop10Accuracy=0.6699, over 6660.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6927, over 5893.70 frames. ], batch size: 14, lr: 5.07e-03 +2024-08-06 18:09:03,107 INFO [trainer.py:765] (0/8) Epoch 17, batch 1100, train_loss[loss=2.899, NarTop10Accuracy=0.747, over 7047.00 frames. ], tot_loss[loss=3.175, NarTop10Accuracy=0.6909, over 5930.68 frames. ], batch size: 18, lr: 5.06e-03 +2024-08-06 18:09:36,746 INFO [trainer.py:765] (0/8) Epoch 17, batch 1200, train_loss[loss=3.099, NarTop10Accuracy=0.709, over 7302.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6916, over 5925.34 frames. ], batch size: 31, lr: 5.06e-03 +2024-08-06 18:10:10,689 INFO [trainer.py:765] (0/8) Epoch 17, batch 1300, train_loss[loss=3.084, NarTop10Accuracy=0.7013, over 5244.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6916, over 5994.39 frames. ], batch size: 6, lr: 5.05e-03 +2024-08-06 18:10:48,027 INFO [trainer.py:765] (0/8) Epoch 17, batch 1400, train_loss[loss=3.331, NarTop10Accuracy=0.6575, over 6144.00 frames. ], tot_loss[loss=3.181, NarTop10Accuracy=0.6893, over 6008.24 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 18:11:19,106 INFO [trainer.py:765] (0/8) Epoch 17, batch 1500, train_loss[loss=3.521, NarTop10Accuracy=0.6243, over 6057.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6913, over 5971.59 frames. ], batch size: 50, lr: 5.04e-03 +2024-08-06 18:11:46,855 INFO [trainer.py:765] (0/8) Epoch 17, batch 1600, train_loss[loss=3.096, NarTop10Accuracy=0.7115, over 7062.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6944, over 5934.37 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 18:12:13,509 INFO [trainer.py:765] (0/8) Epoch 17, batch 1700, train_loss[loss=3.595, NarTop10Accuracy=0.6023, over 6174.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6909, over 5912.50 frames. ], batch size: 13, lr: 5.03e-03 +2024-08-06 18:12:40,002 INFO [trainer.py:765] (0/8) Epoch 17, batch 1800, train_loss[loss=2.894, NarTop10Accuracy=0.7465, over 7209.00 frames. ], tot_loss[loss=3.18, NarTop10Accuracy=0.6896, over 5974.11 frames. ], batch size: 23, lr: 5.02e-03 +2024-08-06 18:13:06,380 INFO [trainer.py:765] (0/8) Epoch 17, batch 1900, train_loss[loss=3.074, NarTop10Accuracy=0.7157, over 6939.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6865, over 6031.39 frames. ], batch size: 51, lr: 5.01e-03 +2024-08-06 18:13:31,923 INFO [trainer.py:765] (0/8) Epoch 17, batch 2000, train_loss[loss=3.543, NarTop10Accuracy=0.6241, over 6264.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.6913, over 5998.19 frames. ], batch size: 50, lr: 5.01e-03 +2024-08-06 18:13:57,229 INFO [trainer.py:765] (0/8) Epoch 17, batch 2100, train_loss[loss=2.858, NarTop10Accuracy=0.7498, over 3933.00 frames. ], tot_loss[loss=3.176, NarTop10Accuracy=0.6906, over 5974.67 frames. ], batch size: 4, lr: 5.00e-03 +2024-08-06 18:14:22,435 INFO [trainer.py:765] (0/8) Epoch 17, batch 2200, train_loss[loss=2.983, NarTop10Accuracy=0.7339, over 7374.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.6869, over 6007.88 frames. ], batch size: 31, lr: 5.00e-03 +2024-08-06 18:14:47,592 INFO [trainer.py:765] (0/8) Epoch 17, batch 2300, train_loss[loss=2.966, NarTop10Accuracy=0.7362, over 5673.00 frames. ], tot_loss[loss=3.185, NarTop10Accuracy=0.6887, over 6026.88 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 18:15:12,061 INFO [trainer.py:765] (0/8) Epoch 17, batch 2400, train_loss[loss=2.834, NarTop10Accuracy=0.7567, over 5094.00 frames. ], tot_loss[loss=3.18, NarTop10Accuracy=0.6891, over 5781.03 frames. ], batch size: 7, lr: 4.99e-03 +2024-08-06 18:15:35,515 INFO [trainer.py:765] (0/8) Epoch 17, batch 2500, train_loss[loss=2.984, NarTop10Accuracy=0.7275, over 5166.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6915, over 5475.78 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 18:15:55,790 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 18:15:55,795 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-17.pt +2024-08-06 18:16:49,908 INFO [trainer.py:765] (0/8) Epoch 18, batch 100, train_loss[loss=2.982, NarTop10Accuracy=0.7334, over 7287.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.6917, over 2363.43 frames. ], batch size: 31, lr: 4.83e-03 +2024-08-06 18:17:24,749 INFO [trainer.py:765] (0/8) Epoch 18, batch 200, train_loss[loss=2.958, NarTop10Accuracy=0.742, over 6777.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6931, over 3857.58 frames. ], batch size: 17, lr: 4.83e-03 +2024-08-06 18:17:27,715 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 18:17:35,927 INFO [trainer.py:811] (0/8) Epoch 18, validation: loss=3.062, NarTop10Accuracy=0.7137, over 1905321.00 frames. +2024-08-06 18:17:35,928 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 18:17:36,528 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.024e+02 2.164e+02 2.334e+02 7.024e+02, threshold=4.329e+02, percent-clipped=0.1 +2024-08-06 18:18:06,912 INFO [trainer.py:765] (0/8) Epoch 18, batch 300, train_loss[loss=3.497, NarTop10Accuracy=0.6309, over 7188.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6937, over 4661.37 frames. ], batch size: 22, lr: 4.82e-03 +2024-08-06 18:18:38,183 INFO [trainer.py:765] (0/8) Epoch 18, batch 400, train_loss[loss=3.36, NarTop10Accuracy=0.6571, over 5130.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.6956, over 5100.37 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 18:19:13,599 INFO [trainer.py:765] (0/8) Epoch 18, batch 500, train_loss[loss=3.259, NarTop10Accuracy=0.6812, over 6081.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6955, over 5398.59 frames. ], batch size: 11, lr: 4.81e-03 +2024-08-06 18:19:48,151 INFO [trainer.py:765] (0/8) Epoch 18, batch 600, train_loss[loss=3.272, NarTop10Accuracy=0.6705, over 5670.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6951, over 5662.72 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 18:20:23,869 INFO [trainer.py:765] (0/8) Epoch 18, batch 700, train_loss[loss=3.371, NarTop10Accuracy=0.6585, over 5010.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6941, over 5737.39 frames. ], batch size: 6, lr: 4.80e-03 +2024-08-06 18:21:01,026 INFO [trainer.py:765] (0/8) Epoch 18, batch 800, train_loss[loss=2.648, NarTop10Accuracy=0.7882, over 4251.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6934, over 5790.69 frames. ], batch size: 5, lr: 4.79e-03 +2024-08-06 18:21:32,408 INFO [trainer.py:765] (0/8) Epoch 18, batch 900, train_loss[loss=3.035, NarTop10Accuracy=0.7194, over 6681.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6966, over 5806.67 frames. ], batch size: 14, lr: 4.79e-03 +2024-08-06 18:22:11,192 INFO [trainer.py:765] (0/8) Epoch 18, batch 1000, train_loss[loss=2.987, NarTop10Accuracy=0.7268, over 6237.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6928, over 5909.81 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 18:22:46,969 INFO [trainer.py:765] (0/8) Epoch 18, batch 1100, train_loss[loss=3.373, NarTop10Accuracy=0.6557, over 6801.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6942, over 5932.48 frames. ], batch size: 17, lr: 4.78e-03 +2024-08-06 18:23:18,604 INFO [trainer.py:765] (0/8) Epoch 18, batch 1200, train_loss[loss=3.535, NarTop10Accuracy=0.6123, over 7419.00 frames. ], tot_loss[loss=3.178, NarTop10Accuracy=0.6905, over 5934.77 frames. ], batch size: 31, lr: 4.77e-03 +2024-08-06 18:24:00,099 INFO [trainer.py:765] (0/8) Epoch 18, batch 1300, train_loss[loss=3.231, NarTop10Accuracy=0.6853, over 5112.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6941, over 6018.75 frames. ], batch size: 6, lr: 4.77e-03 +2024-08-06 18:24:29,574 INFO [trainer.py:765] (0/8) Epoch 18, batch 1400, train_loss[loss=3.052, NarTop10Accuracy=0.7142, over 6051.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6946, over 6013.57 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 18:25:00,307 INFO [trainer.py:765] (0/8) Epoch 18, batch 1500, train_loss[loss=3.143, NarTop10Accuracy=0.7065, over 6234.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6966, over 5944.61 frames. ], batch size: 51, lr: 4.76e-03 +2024-08-06 18:25:28,085 INFO [trainer.py:765] (0/8) Epoch 18, batch 1600, train_loss[loss=3.033, NarTop10Accuracy=0.7189, over 7125.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6944, over 5922.43 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 18:25:54,687 INFO [trainer.py:765] (0/8) Epoch 18, batch 1700, train_loss[loss=3.13, NarTop10Accuracy=0.6891, over 6288.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6936, over 5908.33 frames. ], batch size: 13, lr: 4.75e-03 +2024-08-06 18:26:21,196 INFO [trainer.py:765] (0/8) Epoch 18, batch 1800, train_loss[loss=3.554, NarTop10Accuracy=0.6131, over 6999.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6947, over 5980.28 frames. ], batch size: 22, lr: 4.74e-03 +2024-08-06 18:26:47,566 INFO [trainer.py:765] (0/8) Epoch 18, batch 1900, train_loss[loss=3.023, NarTop10Accuracy=0.725, over 5964.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.6915, over 6015.40 frames. ], batch size: 50, lr: 4.74e-03 +2024-08-06 18:27:13,176 INFO [trainer.py:765] (0/8) Epoch 18, batch 2000, train_loss[loss=3.136, NarTop10Accuracy=0.7086, over 5664.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.694, over 6006.64 frames. ], batch size: 51, lr: 4.73e-03 +2024-08-06 18:27:38,528 INFO [trainer.py:765] (0/8) Epoch 18, batch 2100, train_loss[loss=3.302, NarTop10Accuracy=0.6638, over 3990.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6942, over 5979.73 frames. ], batch size: 4, lr: 4.73e-03 +2024-08-06 18:28:03,811 INFO [trainer.py:765] (0/8) Epoch 18, batch 2200, train_loss[loss=3.095, NarTop10Accuracy=0.7078, over 7290.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.694, over 6009.81 frames. ], batch size: 31, lr: 4.72e-03 +2024-08-06 18:28:06,570 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 18:28:14,649 INFO [trainer.py:811] (0/8) Epoch 18, validation: loss=3.028, NarTop10Accuracy=0.7201, over 1905321.00 frames. +2024-08-06 18:28:14,650 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 18:28:15,147 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.054e+02 2.220e+02 2.384e+02 3.992e+02, threshold=4.441e+02, percent-clipped=0.0 +2024-08-06 18:28:37,097 INFO [trainer.py:765] (0/8) Epoch 18, batch 2300, train_loss[loss=2.902, NarTop10Accuracy=0.7491, over 5712.00 frames. ], tot_loss[loss=3.175, NarTop10Accuracy=0.6908, over 6013.15 frames. ], batch size: 9, lr: 4.72e-03 +2024-08-06 18:29:01,593 INFO [trainer.py:765] (0/8) Epoch 18, batch 2400, train_loss[loss=2.812, NarTop10Accuracy=0.7603, over 5217.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6956, over 5776.49 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 18:29:25,028 INFO [trainer.py:765] (0/8) Epoch 18, batch 2500, train_loss[loss=2.93, NarTop10Accuracy=0.7465, over 5658.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7001, over 5483.81 frames. ], batch size: 8, lr: 4.71e-03 +2024-08-06 18:29:45,449 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 18:29:45,453 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-18.pt +2024-08-06 18:30:41,231 INFO [trainer.py:765] (0/8) Epoch 19, batch 100, train_loss[loss=2.951, NarTop10Accuracy=0.7314, over 7236.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6932, over 2364.78 frames. ], batch size: 31, lr: 4.57e-03 +2024-08-06 18:31:15,602 INFO [trainer.py:765] (0/8) Epoch 19, batch 200, train_loss[loss=3.001, NarTop10Accuracy=0.726, over 6852.00 frames. ], tot_loss[loss=3.156, NarTop10Accuracy=0.6956, over 3863.82 frames. ], batch size: 17, lr: 4.57e-03 +2024-08-06 18:31:47,468 INFO [trainer.py:765] (0/8) Epoch 19, batch 300, train_loss[loss=3.46, NarTop10Accuracy=0.6228, over 7287.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.6984, over 4669.20 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 18:32:20,355 INFO [trainer.py:765] (0/8) Epoch 19, batch 400, train_loss[loss=3.167, NarTop10Accuracy=0.6891, over 5181.00 frames. ], tot_loss[loss=3.14, NarTop10Accuracy=0.6984, over 5115.75 frames. ], batch size: 7, lr: 4.56e-03 +2024-08-06 18:32:50,335 INFO [trainer.py:765] (0/8) Epoch 19, batch 500, train_loss[loss=2.915, NarTop10Accuracy=0.7288, over 6084.00 frames. ], tot_loss[loss=3.139, NarTop10Accuracy=0.698, over 5381.05 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 18:33:29,610 INFO [trainer.py:765] (0/8) Epoch 19, batch 600, train_loss[loss=2.934, NarTop10Accuracy=0.739, over 5718.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6967, over 5657.15 frames. ], batch size: 9, lr: 4.55e-03 +2024-08-06 18:34:03,591 INFO [trainer.py:765] (0/8) Epoch 19, batch 700, train_loss[loss=3.029, NarTop10Accuracy=0.7277, over 5022.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6956, over 5712.90 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 18:34:35,179 INFO [trainer.py:765] (0/8) Epoch 19, batch 800, train_loss[loss=3.31, NarTop10Accuracy=0.6664, over 5106.00 frames. ], tot_loss[loss=3.156, NarTop10Accuracy=0.6949, over 5771.82 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 18:35:10,263 INFO [trainer.py:765] (0/8) Epoch 19, batch 900, train_loss[loss=2.845, NarTop10Accuracy=0.752, over 6180.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6975, over 5800.61 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 18:35:48,637 INFO [trainer.py:765] (0/8) Epoch 19, batch 1000, train_loss[loss=3.449, NarTop10Accuracy=0.6276, over 6219.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6977, over 5903.86 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 18:36:20,938 INFO [trainer.py:765] (0/8) Epoch 19, batch 1100, train_loss[loss=2.986, NarTop10Accuracy=0.734, over 6816.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.6952, over 5949.82 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 18:36:57,130 INFO [trainer.py:765] (0/8) Epoch 19, batch 1200, train_loss[loss=3.055, NarTop10Accuracy=0.7205, over 7224.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.692, over 5938.06 frames. ], batch size: 31, lr: 4.52e-03 +2024-08-06 18:37:35,315 INFO [trainer.py:765] (0/8) Epoch 19, batch 1300, train_loss[loss=2.974, NarTop10Accuracy=0.7273, over 4980.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6923, over 5993.88 frames. ], batch size: 6, lr: 4.51e-03 +2024-08-06 18:38:04,679 INFO [trainer.py:765] (0/8) Epoch 19, batch 1400, train_loss[loss=3.054, NarTop10Accuracy=0.7176, over 6147.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6919, over 6025.52 frames. ], batch size: 11, lr: 4.51e-03 +2024-08-06 18:38:34,550 INFO [trainer.py:765] (0/8) Epoch 19, batch 1500, train_loss[loss=3.432, NarTop10Accuracy=0.641, over 6135.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6956, over 5967.31 frames. ], batch size: 53, lr: 4.50e-03 +2024-08-06 18:39:02,311 INFO [trainer.py:765] (0/8) Epoch 19, batch 1600, train_loss[loss=3.485, NarTop10Accuracy=0.6285, over 7185.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6969, over 5942.89 frames. ], batch size: 22, lr: 4.50e-03 +2024-08-06 18:39:11,589 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 18:39:19,795 INFO [trainer.py:811] (0/8) Epoch 19, validation: loss=2.958, NarTop10Accuracy=0.7345, over 1905321.00 frames. +2024-08-06 18:39:19,795 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 18:39:20,378 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.040e+02 2.194e+02 2.364e+02 6.410e+02, threshold=4.387e+02, percent-clipped=0.2 +2024-08-06 18:39:37,192 INFO [trainer.py:765] (0/8) Epoch 19, batch 1700, train_loss[loss=3.485, NarTop10Accuracy=0.6302, over 6240.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6964, over 5922.00 frames. ], batch size: 13, lr: 4.49e-03 +2024-08-06 18:40:03,789 INFO [trainer.py:765] (0/8) Epoch 19, batch 1800, train_loss[loss=3.538, NarTop10Accuracy=0.617, over 7275.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6961, over 5984.17 frames. ], batch size: 23, lr: 4.49e-03 +2024-08-06 18:40:30,217 INFO [trainer.py:765] (0/8) Epoch 19, batch 1900, train_loss[loss=3.096, NarTop10Accuracy=0.7076, over 5937.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6965, over 6011.84 frames. ], batch size: 50, lr: 4.49e-03 +2024-08-06 18:40:55,793 INFO [trainer.py:765] (0/8) Epoch 19, batch 2000, train_loss[loss=3.303, NarTop10Accuracy=0.6625, over 6414.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6964, over 5986.55 frames. ], batch size: 50, lr: 4.48e-03 +2024-08-06 18:41:21,183 INFO [trainer.py:765] (0/8) Epoch 19, batch 2100, train_loss[loss=3.075, NarTop10Accuracy=0.7175, over 3891.00 frames. ], tot_loss[loss=3.138, NarTop10Accuracy=0.6984, over 5966.23 frames. ], batch size: 4, lr: 4.48e-03 +2024-08-06 18:41:46,455 INFO [trainer.py:765] (0/8) Epoch 19, batch 2200, train_loss[loss=3.117, NarTop10Accuracy=0.6987, over 7449.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6961, over 5995.22 frames. ], batch size: 33, lr: 4.47e-03 +2024-08-06 18:42:11,559 INFO [trainer.py:765] (0/8) Epoch 19, batch 2300, train_loss[loss=3.109, NarTop10Accuracy=0.6974, over 5631.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6939, over 6011.12 frames. ], batch size: 9, lr: 4.47e-03 +2024-08-06 18:42:35,987 INFO [trainer.py:765] (0/8) Epoch 19, batch 2400, train_loss[loss=2.936, NarTop10Accuracy=0.7396, over 5184.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6967, over 5787.86 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:42:59,690 INFO [trainer.py:765] (0/8) Epoch 19, batch 2500, train_loss[loss=2.78, NarTop10Accuracy=0.7644, over 5244.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.6994, over 5470.51 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:43:19,776 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 18:43:19,779 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-19.pt +2024-08-06 18:44:22,974 INFO [trainer.py:765] (0/8) Epoch 20, batch 100, train_loss[loss=3.325, NarTop10Accuracy=0.6555, over 7494.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6945, over 2374.45 frames. ], batch size: 31, lr: 4.34e-03 +2024-08-06 18:44:58,379 INFO [trainer.py:765] (0/8) Epoch 20, batch 200, train_loss[loss=3.37, NarTop10Accuracy=0.6608, over 6822.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6996, over 3850.26 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 18:45:32,279 INFO [trainer.py:765] (0/8) Epoch 20, batch 300, train_loss[loss=3.455, NarTop10Accuracy=0.6299, over 7338.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7018, over 4667.75 frames. ], batch size: 23, lr: 4.33e-03 +2024-08-06 18:46:05,128 INFO [trainer.py:765] (0/8) Epoch 20, batch 400, train_loss[loss=2.753, NarTop10Accuracy=0.7702, over 5148.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.7026, over 5114.08 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 18:46:35,770 INFO [trainer.py:765] (0/8) Epoch 20, batch 500, train_loss[loss=2.818, NarTop10Accuracy=0.7584, over 6183.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7009, over 5379.38 frames. ], batch size: 11, lr: 4.32e-03 +2024-08-06 18:47:13,255 INFO [trainer.py:765] (0/8) Epoch 20, batch 600, train_loss[loss=3.077, NarTop10Accuracy=0.702, over 5787.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7014, over 5638.99 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 18:47:44,482 INFO [trainer.py:765] (0/8) Epoch 20, batch 700, train_loss[loss=2.742, NarTop10Accuracy=0.7766, over 4998.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.704, over 5705.02 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 18:48:21,016 INFO [trainer.py:765] (0/8) Epoch 20, batch 800, train_loss[loss=2.753, NarTop10Accuracy=0.7758, over 4305.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.7002, over 5766.57 frames. ], batch size: 5, lr: 4.31e-03 +2024-08-06 18:48:56,535 INFO [trainer.py:765] (0/8) Epoch 20, batch 900, train_loss[loss=2.914, NarTop10Accuracy=0.7482, over 6681.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7007, over 5788.27 frames. ], batch size: 14, lr: 4.30e-03 +2024-08-06 18:49:29,805 INFO [trainer.py:765] (0/8) Epoch 20, batch 1000, train_loss[loss=3.133, NarTop10Accuracy=0.6954, over 6756.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6952, over 5885.12 frames. ], batch size: 14, lr: 4.30e-03 +2024-08-06 18:49:52,237 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 18:50:00,326 INFO [trainer.py:811] (0/8) Epoch 20, validation: loss=2.962, NarTop10Accuracy=0.7336, over 1905321.00 frames. +2024-08-06 18:50:00,327 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 18:50:00,875 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.061e+02 2.223e+02 2.401e+02 3.871e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 18:50:15,428 INFO [trainer.py:765] (0/8) Epoch 20, batch 1100, train_loss[loss=3.149, NarTop10Accuracy=0.6895, over 7005.00 frames. ], tot_loss[loss=3.141, NarTop10Accuracy=0.6972, over 5922.15 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 18:50:53,776 INFO [trainer.py:765] (0/8) Epoch 20, batch 1200, train_loss[loss=3.085, NarTop10Accuracy=0.7123, over 7068.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6963, over 5924.01 frames. ], batch size: 31, lr: 4.29e-03 +2024-08-06 18:51:25,130 INFO [trainer.py:765] (0/8) Epoch 20, batch 1300, train_loss[loss=3.232, NarTop10Accuracy=0.6779, over 5085.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6965, over 6003.78 frames. ], batch size: 6, lr: 4.29e-03 +2024-08-06 18:51:59,315 INFO [trainer.py:765] (0/8) Epoch 20, batch 1400, train_loss[loss=2.93, NarTop10Accuracy=0.7325, over 5955.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.6993, over 6020.72 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 18:52:32,806 INFO [trainer.py:765] (0/8) Epoch 20, batch 1500, train_loss[loss=3.341, NarTop10Accuracy=0.6577, over 6567.00 frames. ], tot_loss[loss=3.14, NarTop10Accuracy=0.6971, over 5965.48 frames. ], batch size: 50, lr: 4.28e-03 +2024-08-06 18:53:00,635 INFO [trainer.py:765] (0/8) Epoch 20, batch 1600, train_loss[loss=2.921, NarTop10Accuracy=0.7344, over 7455.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6957, over 5947.51 frames. ], batch size: 23, lr: 4.27e-03 +2024-08-06 18:53:27,328 INFO [trainer.py:765] (0/8) Epoch 20, batch 1700, train_loss[loss=3.523, NarTop10Accuracy=0.615, over 6288.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6961, over 5937.10 frames. ], batch size: 13, lr: 4.27e-03 +2024-08-06 18:53:53,851 INFO [trainer.py:765] (0/8) Epoch 20, batch 1800, train_loss[loss=3.113, NarTop10Accuracy=0.6993, over 7227.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.699, over 5999.40 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 18:54:20,316 INFO [trainer.py:765] (0/8) Epoch 20, batch 1900, train_loss[loss=3.105, NarTop10Accuracy=0.7113, over 5943.00 frames. ], tot_loss[loss=3.162, NarTop10Accuracy=0.6935, over 6035.14 frames. ], batch size: 50, lr: 4.26e-03 +2024-08-06 18:54:45,890 INFO [trainer.py:765] (0/8) Epoch 20, batch 2000, train_loss[loss=3.606, NarTop10Accuracy=0.5992, over 6099.00 frames. ], tot_loss[loss=3.162, NarTop10Accuracy=0.6932, over 6003.14 frames. ], batch size: 50, lr: 4.26e-03 +2024-08-06 18:55:11,183 INFO [trainer.py:765] (0/8) Epoch 20, batch 2100, train_loss[loss=3.428, NarTop10Accuracy=0.6317, over 4725.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6936, over 5967.21 frames. ], batch size: 5, lr: 4.25e-03 +2024-08-06 18:55:36,415 INFO [trainer.py:765] (0/8) Epoch 20, batch 2200, train_loss[loss=2.939, NarTop10Accuracy=0.7404, over 7365.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6939, over 6015.14 frames. ], batch size: 31, lr: 4.25e-03 +2024-08-06 18:56:01,636 INFO [trainer.py:765] (0/8) Epoch 20, batch 2300, train_loss[loss=3.199, NarTop10Accuracy=0.6775, over 5595.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6925, over 6038.35 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 18:56:26,050 INFO [trainer.py:765] (0/8) Epoch 20, batch 2400, train_loss[loss=2.885, NarTop10Accuracy=0.7565, over 5094.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6951, over 5772.83 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:56:49,566 INFO [trainer.py:765] (0/8) Epoch 20, batch 2500, train_loss[loss=2.936, NarTop10Accuracy=0.7448, over 5118.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7019, over 5469.27 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:57:09,280 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 18:57:09,284 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-20.pt +2024-08-06 18:58:09,585 INFO [trainer.py:765] (0/8) Epoch 21, batch 100, train_loss[loss=3.083, NarTop10Accuracy=0.7084, over 7230.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7039, over 2380.81 frames. ], batch size: 31, lr: 4.13e-03 +2024-08-06 18:58:40,417 INFO [trainer.py:765] (0/8) Epoch 21, batch 200, train_loss[loss=2.868, NarTop10Accuracy=0.7501, over 6966.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.7, over 3874.31 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 18:59:13,333 INFO [trainer.py:765] (0/8) Epoch 21, batch 300, train_loss[loss=2.873, NarTop10Accuracy=0.7475, over 7107.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6991, over 4680.79 frames. ], batch size: 22, lr: 4.12e-03 +2024-08-06 18:59:48,151 INFO [trainer.py:765] (0/8) Epoch 21, batch 400, train_loss[loss=2.859, NarTop10Accuracy=0.7488, over 5289.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7027, over 5100.97 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 19:00:16,840 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 19:00:25,075 INFO [trainer.py:811] (0/8) Epoch 21, validation: loss=2.992, NarTop10Accuracy=0.7268, over 1905321.00 frames. +2024-08-06 19:00:25,076 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 19:00:25,622 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.071e+02 2.224e+02 2.387e+02 3.839e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 19:00:29,890 INFO [trainer.py:765] (0/8) Epoch 21, batch 500, train_loss[loss=2.895, NarTop10Accuracy=0.747, over 5961.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.703, over 5369.01 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 19:01:03,329 INFO [trainer.py:765] (0/8) Epoch 21, batch 600, train_loss[loss=3.431, NarTop10Accuracy=0.6379, over 5709.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.7053, over 5646.07 frames. ], batch size: 9, lr: 4.11e-03 +2024-08-06 19:01:39,388 INFO [trainer.py:765] (0/8) Epoch 21, batch 700, train_loss[loss=2.807, NarTop10Accuracy=0.7708, over 5163.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7026, over 5708.51 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 19:02:18,047 INFO [trainer.py:765] (0/8) Epoch 21, batch 800, train_loss[loss=2.956, NarTop10Accuracy=0.7246, over 5166.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7004, over 5771.30 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 19:02:48,663 INFO [trainer.py:765] (0/8) Epoch 21, batch 900, train_loss[loss=2.977, NarTop10Accuracy=0.7316, over 6678.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7008, over 5799.40 frames. ], batch size: 14, lr: 4.09e-03 +2024-08-06 19:03:25,801 INFO [trainer.py:765] (0/8) Epoch 21, batch 1000, train_loss[loss=3.005, NarTop10Accuracy=0.7235, over 6312.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.6995, over 5903.12 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 19:04:07,206 INFO [trainer.py:765] (0/8) Epoch 21, batch 1100, train_loss[loss=3.434, NarTop10Accuracy=0.6399, over 6723.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6962, over 5932.78 frames. ], batch size: 17, lr: 4.09e-03 +2024-08-06 19:04:38,462 INFO [trainer.py:765] (0/8) Epoch 21, batch 1200, train_loss[loss=3.292, NarTop10Accuracy=0.6671, over 7023.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.6998, over 5937.17 frames. ], batch size: 31, lr: 4.08e-03 +2024-08-06 19:05:15,316 INFO [trainer.py:765] (0/8) Epoch 21, batch 1300, train_loss[loss=2.962, NarTop10Accuracy=0.7338, over 5046.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7034, over 5998.40 frames. ], batch size: 6, lr: 4.08e-03 +2024-08-06 19:05:55,559 INFO [trainer.py:765] (0/8) Epoch 21, batch 1400, train_loss[loss=3.536, NarTop10Accuracy=0.6178, over 6081.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7028, over 6025.23 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 19:06:23,599 INFO [trainer.py:765] (0/8) Epoch 21, batch 1500, train_loss[loss=3.36, NarTop10Accuracy=0.6572, over 5397.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6991, over 5953.59 frames. ], batch size: 50, lr: 4.07e-03 +2024-08-06 19:06:51,461 INFO [trainer.py:765] (0/8) Epoch 21, batch 1600, train_loss[loss=2.929, NarTop10Accuracy=0.7384, over 7104.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7001, over 5939.40 frames. ], batch size: 22, lr: 4.07e-03 +2024-08-06 19:07:18,211 INFO [trainer.py:765] (0/8) Epoch 21, batch 1700, train_loss[loss=3.258, NarTop10Accuracy=0.6826, over 6306.00 frames. ], tot_loss[loss=3.136, NarTop10Accuracy=0.6984, over 5903.55 frames. ], batch size: 13, lr: 4.06e-03 +2024-08-06 19:07:44,809 INFO [trainer.py:765] (0/8) Epoch 21, batch 1800, train_loss[loss=2.873, NarTop10Accuracy=0.7524, over 6984.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.6984, over 5986.30 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 19:08:11,369 INFO [trainer.py:765] (0/8) Epoch 21, batch 1900, train_loss[loss=3.637, NarTop10Accuracy=0.5894, over 6534.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.696, over 6035.72 frames. ], batch size: 50, lr: 4.06e-03 +2024-08-06 19:08:37,105 INFO [trainer.py:765] (0/8) Epoch 21, batch 2000, train_loss[loss=3.503, NarTop10Accuracy=0.6335, over 6084.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6972, over 6000.16 frames. ], batch size: 50, lr: 4.05e-03 +2024-08-06 19:09:02,507 INFO [trainer.py:765] (0/8) Epoch 21, batch 2100, train_loss[loss=2.897, NarTop10Accuracy=0.7413, over 4929.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6968, over 5971.51 frames. ], batch size: 5, lr: 4.05e-03 +2024-08-06 19:09:27,891 INFO [trainer.py:765] (0/8) Epoch 21, batch 2200, train_loss[loss=3.032, NarTop10Accuracy=0.7198, over 7413.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6965, over 6007.56 frames. ], batch size: 31, lr: 4.04e-03 +2024-08-06 19:09:53,222 INFO [trainer.py:765] (0/8) Epoch 21, batch 2300, train_loss[loss=3.212, NarTop10Accuracy=0.6867, over 5661.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.692, over 6018.09 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 19:10:17,596 INFO [trainer.py:765] (0/8) Epoch 21, batch 2400, train_loss[loss=3.356, NarTop10Accuracy=0.6497, over 5169.00 frames. ], tot_loss[loss=3.141, NarTop10Accuracy=0.6976, over 5774.84 frames. ], batch size: 7, lr: 4.04e-03 +2024-08-06 19:10:37,230 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 19:10:45,275 INFO [trainer.py:811] (0/8) Epoch 21, validation: loss=2.971, NarTop10Accuracy=0.7316, over 1905321.00 frames. +2024-08-06 19:10:45,276 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 19:10:45,741 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.100e+02 2.242e+02 2.407e+02 6.546e+02, threshold=4.484e+02, percent-clipped=0.1 +2024-08-06 19:10:49,272 INFO [trainer.py:765] (0/8) Epoch 21, batch 2500, train_loss[loss=3.321, NarTop10Accuracy=0.6576, over 5130.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7061, over 5480.44 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 19:11:09,071 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 19:11:09,073 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-21.pt +2024-08-06 19:12:09,054 INFO [trainer.py:765] (0/8) Epoch 22, batch 100, train_loss[loss=2.914, NarTop10Accuracy=0.7461, over 7416.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7088, over 2360.88 frames. ], batch size: 31, lr: 3.93e-03 +2024-08-06 19:12:44,462 INFO [trainer.py:765] (0/8) Epoch 22, batch 200, train_loss[loss=3.218, NarTop10Accuracy=0.6775, over 6882.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.705, over 3852.07 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 19:13:14,533 INFO [trainer.py:765] (0/8) Epoch 22, batch 300, train_loss[loss=2.869, NarTop10Accuracy=0.7556, over 6999.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7063, over 4668.34 frames. ], batch size: 22, lr: 3.93e-03 +2024-08-06 19:13:49,229 INFO [trainer.py:765] (0/8) Epoch 22, batch 400, train_loss[loss=2.816, NarTop10Accuracy=0.7637, over 5064.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7081, over 5124.43 frames. ], batch size: 7, lr: 3.92e-03 +2024-08-06 19:14:24,850 INFO [trainer.py:765] (0/8) Epoch 22, batch 500, train_loss[loss=3.19, NarTop10Accuracy=0.69, over 6030.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.708, over 5395.45 frames. ], batch size: 11, lr: 3.92e-03 +2024-08-06 19:14:55,702 INFO [trainer.py:765] (0/8) Epoch 22, batch 600, train_loss[loss=3.215, NarTop10Accuracy=0.6948, over 5730.00 frames. ], tot_loss[loss=3.121, NarTop10Accuracy=0.7013, over 5631.75 frames. ], batch size: 9, lr: 3.92e-03 +2024-08-06 19:15:30,867 INFO [trainer.py:765] (0/8) Epoch 22, batch 700, train_loss[loss=3.479, NarTop10Accuracy=0.6266, over 4242.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7003, over 5706.08 frames. ], batch size: 5, lr: 3.91e-03 +2024-08-06 19:16:10,664 INFO [trainer.py:765] (0/8) Epoch 22, batch 800, train_loss[loss=3.04, NarTop10Accuracy=0.7249, over 5076.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7016, over 5783.61 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 19:16:40,952 INFO [trainer.py:765] (0/8) Epoch 22, batch 900, train_loss[loss=2.925, NarTop10Accuracy=0.7333, over 6261.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7018, over 5806.92 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 19:17:16,434 INFO [trainer.py:765] (0/8) Epoch 22, batch 1000, train_loss[loss=3.049, NarTop10Accuracy=0.7137, over 6126.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7034, over 5909.67 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 19:17:52,085 INFO [trainer.py:765] (0/8) Epoch 22, batch 1100, train_loss[loss=3.045, NarTop10Accuracy=0.7118, over 7164.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7014, over 5929.03 frames. ], batch size: 18, lr: 3.90e-03 +2024-08-06 19:18:25,926 INFO [trainer.py:765] (0/8) Epoch 22, batch 1200, train_loss[loss=2.991, NarTop10Accuracy=0.7268, over 7362.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7055, over 5946.35 frames. ], batch size: 31, lr: 3.89e-03 +2024-08-06 19:19:01,253 INFO [trainer.py:765] (0/8) Epoch 22, batch 1300, train_loss[loss=2.806, NarTop10Accuracy=0.7583, over 5127.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7064, over 6018.84 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 19:19:33,317 INFO [trainer.py:765] (0/8) Epoch 22, batch 1400, train_loss[loss=2.761, NarTop10Accuracy=0.7796, over 5955.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7033, over 6032.79 frames. ], batch size: 11, lr: 3.89e-03 +2024-08-06 19:20:03,830 INFO [trainer.py:765] (0/8) Epoch 22, batch 1500, train_loss[loss=3.495, NarTop10Accuracy=0.6285, over 6045.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7035, over 5962.59 frames. ], batch size: 53, lr: 3.88e-03 +2024-08-06 19:20:31,647 INFO [trainer.py:765] (0/8) Epoch 22, batch 1600, train_loss[loss=3.15, NarTop10Accuracy=0.6979, over 6978.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6993, over 5939.92 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 19:20:58,418 INFO [trainer.py:765] (0/8) Epoch 22, batch 1700, train_loss[loss=3.131, NarTop10Accuracy=0.6916, over 6261.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.6996, over 5926.41 frames. ], batch size: 13, lr: 3.88e-03 +2024-08-06 19:21:25,010 INFO [trainer.py:765] (0/8) Epoch 22, batch 1800, train_loss[loss=3.056, NarTop10Accuracy=0.7131, over 7137.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.6998, over 5993.25 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 19:21:51,372 INFO [trainer.py:765] (0/8) Epoch 22, batch 1900, train_loss[loss=3.057, NarTop10Accuracy=0.7168, over 6276.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6951, over 6038.33 frames. ], batch size: 52, lr: 3.87e-03 +2024-08-06 19:21:53,110 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 19:22:01,088 INFO [trainer.py:811] (0/8) Epoch 22, validation: loss=3.009, NarTop10Accuracy=0.7241, over 1905321.00 frames. +2024-08-06 19:22:01,089 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 19:22:01,575 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.114e+02 2.276e+02 2.445e+02 4.438e+02, threshold=4.551e+02, percent-clipped=0.0 +2024-08-06 19:22:24,818 INFO [trainer.py:765] (0/8) Epoch 22, batch 2000, train_loss[loss=3.566, NarTop10Accuracy=0.6135, over 6039.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7002, over 6006.04 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 19:22:50,040 INFO [trainer.py:765] (0/8) Epoch 22, batch 2100, train_loss[loss=3.426, NarTop10Accuracy=0.6339, over 4908.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7021, over 5985.07 frames. ], batch size: 5, lr: 3.86e-03 +2024-08-06 19:23:15,229 INFO [trainer.py:765] (0/8) Epoch 22, batch 2200, train_loss[loss=3.091, NarTop10Accuracy=0.7141, over 7050.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7031, over 6018.52 frames. ], batch size: 31, lr: 3.86e-03 +2024-08-06 19:23:40,314 INFO [trainer.py:765] (0/8) Epoch 22, batch 2300, train_loss[loss=3.222, NarTop10Accuracy=0.6792, over 5655.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6993, over 6046.07 frames. ], batch size: 9, lr: 3.86e-03 +2024-08-06 19:24:04,601 INFO [trainer.py:765] (0/8) Epoch 22, batch 2400, train_loss[loss=3.011, NarTop10Accuracy=0.7226, over 5346.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7024, over 5790.91 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:28,024 INFO [trainer.py:765] (0/8) Epoch 22, batch 2500, train_loss[loss=3.162, NarTop10Accuracy=0.6975, over 5244.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.705, over 5482.98 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:47,604 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 19:24:47,607 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-22.pt +2024-08-06 19:25:45,385 INFO [trainer.py:765] (0/8) Epoch 23, batch 100, train_loss[loss=3.048, NarTop10Accuracy=0.7266, over 6879.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7015, over 2367.69 frames. ], batch size: 31, lr: 3.76e-03 +2024-08-06 19:26:21,309 INFO [trainer.py:765] (0/8) Epoch 23, batch 200, train_loss[loss=3.457, NarTop10Accuracy=0.6366, over 6573.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.6997, over 3855.86 frames. ], batch size: 17, lr: 3.76e-03 +2024-08-06 19:26:57,603 INFO [trainer.py:765] (0/8) Epoch 23, batch 300, train_loss[loss=2.893, NarTop10Accuracy=0.7498, over 7248.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.7051, over 4650.89 frames. ], batch size: 22, lr: 3.75e-03 +2024-08-06 19:27:26,540 INFO [trainer.py:765] (0/8) Epoch 23, batch 400, train_loss[loss=3.173, NarTop10Accuracy=0.6811, over 5037.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.703, over 5091.99 frames. ], batch size: 7, lr: 3.75e-03 +2024-08-06 19:27:59,713 INFO [trainer.py:765] (0/8) Epoch 23, batch 500, train_loss[loss=3.371, NarTop10Accuracy=0.6512, over 6243.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.7017, over 5365.62 frames. ], batch size: 11, lr: 3.75e-03 +2024-08-06 19:28:35,883 INFO [trainer.py:765] (0/8) Epoch 23, batch 600, train_loss[loss=3.262, NarTop10Accuracy=0.6714, over 5706.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7037, over 5640.50 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 19:29:11,367 INFO [trainer.py:765] (0/8) Epoch 23, batch 700, train_loss[loss=3.101, NarTop10Accuracy=0.7063, over 5130.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7063, over 5725.55 frames. ], batch size: 6, lr: 3.74e-03 +2024-08-06 19:29:43,613 INFO [trainer.py:765] (0/8) Epoch 23, batch 800, train_loss[loss=2.964, NarTop10Accuracy=0.7444, over 4314.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.7049, over 5777.89 frames. ], batch size: 5, lr: 3.74e-03 +2024-08-06 19:30:19,390 INFO [trainer.py:765] (0/8) Epoch 23, batch 900, train_loss[loss=3.261, NarTop10Accuracy=0.6655, over 6564.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7071, over 5803.39 frames. ], batch size: 14, lr: 3.73e-03 +2024-08-06 19:30:58,195 INFO [trainer.py:765] (0/8) Epoch 23, batch 1000, train_loss[loss=2.97, NarTop10Accuracy=0.7348, over 6207.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7075, over 5893.95 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 19:31:31,521 INFO [trainer.py:765] (0/8) Epoch 23, batch 1100, train_loss[loss=3.053, NarTop10Accuracy=0.7173, over 6588.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7069, over 5923.62 frames. ], batch size: 17, lr: 3.73e-03 +2024-08-06 19:32:08,518 INFO [trainer.py:765] (0/8) Epoch 23, batch 1200, train_loss[loss=3.019, NarTop10Accuracy=0.724, over 7374.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.705, over 5919.75 frames. ], batch size: 31, lr: 3.72e-03 +2024-08-06 19:32:46,937 INFO [trainer.py:765] (0/8) Epoch 23, batch 1300, train_loss[loss=3.162, NarTop10Accuracy=0.6922, over 5073.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7045, over 5992.69 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 19:32:56,402 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 19:33:04,722 INFO [trainer.py:811] (0/8) Epoch 23, validation: loss=2.893, NarTop10Accuracy=0.7468, over 1905321.00 frames. +2024-08-06 19:33:04,723 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 19:33:05,263 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.108e+02 2.273e+02 2.457e+02 3.966e+02, threshold=4.546e+02, percent-clipped=0.0 +2024-08-06 19:33:27,410 INFO [trainer.py:765] (0/8) Epoch 23, batch 1400, train_loss[loss=2.872, NarTop10Accuracy=0.7613, over 6153.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7039, over 6022.29 frames. ], batch size: 11, lr: 3.72e-03 +2024-08-06 19:33:58,216 INFO [trainer.py:765] (0/8) Epoch 23, batch 1500, train_loss[loss=3.338, NarTop10Accuracy=0.657, over 6066.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.7058, over 5942.79 frames. ], batch size: 51, lr: 3.71e-03 +2024-08-06 19:34:26,015 INFO [trainer.py:765] (0/8) Epoch 23, batch 1600, train_loss[loss=2.802, NarTop10Accuracy=0.7616, over 6864.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7052, over 5938.90 frames. ], batch size: 22, lr: 3.71e-03 +2024-08-06 19:34:52,783 INFO [trainer.py:765] (0/8) Epoch 23, batch 1700, train_loss[loss=3.398, NarTop10Accuracy=0.6497, over 6114.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7003, over 5905.78 frames. ], batch size: 13, lr: 3.71e-03 +2024-08-06 19:35:19,262 INFO [trainer.py:765] (0/8) Epoch 23, batch 1800, train_loss[loss=3.007, NarTop10Accuracy=0.7272, over 7020.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7026, over 5965.70 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 19:35:45,596 INFO [trainer.py:765] (0/8) Epoch 23, batch 1900, train_loss[loss=3.386, NarTop10Accuracy=0.6504, over 6378.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.6996, over 6017.65 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 19:36:11,171 INFO [trainer.py:765] (0/8) Epoch 23, batch 2000, train_loss[loss=3.577, NarTop10Accuracy=0.6125, over 5976.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.703, over 5996.30 frames. ], batch size: 51, lr: 3.70e-03 +2024-08-06 19:36:36,518 INFO [trainer.py:765] (0/8) Epoch 23, batch 2100, train_loss[loss=3.465, NarTop10Accuracy=0.622, over 4773.00 frames. ], tot_loss[loss=3.119, NarTop10Accuracy=0.702, over 5968.14 frames. ], batch size: 5, lr: 3.69e-03 +2024-08-06 19:37:01,909 INFO [trainer.py:765] (0/8) Epoch 23, batch 2200, train_loss[loss=3.109, NarTop10Accuracy=0.701, over 6960.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7002, over 5990.37 frames. ], batch size: 31, lr: 3.69e-03 +2024-08-06 19:37:27,061 INFO [trainer.py:765] (0/8) Epoch 23, batch 2300, train_loss[loss=2.809, NarTop10Accuracy=0.7649, over 5742.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7009, over 6012.09 frames. ], batch size: 9, lr: 3.69e-03 +2024-08-06 19:37:51,424 INFO [trainer.py:765] (0/8) Epoch 23, batch 2400, train_loss[loss=3.113, NarTop10Accuracy=0.7019, over 5127.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7017, over 5768.10 frames. ], batch size: 7, lr: 3.69e-03 +2024-08-06 19:38:15,053 INFO [trainer.py:765] (0/8) Epoch 23, batch 2500, train_loss[loss=3.322, NarTop10Accuracy=0.6559, over 5184.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7065, over 5482.83 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 19:38:35,103 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 19:38:35,106 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-23.pt +2024-08-06 19:39:37,632 INFO [trainer.py:765] (0/8) Epoch 24, batch 100, train_loss[loss=3.448, NarTop10Accuracy=0.6339, over 7455.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7007, over 2360.94 frames. ], batch size: 31, lr: 3.60e-03 +2024-08-06 19:40:10,190 INFO [trainer.py:765] (0/8) Epoch 24, batch 200, train_loss[loss=2.769, NarTop10Accuracy=0.7747, over 6624.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7066, over 3857.77 frames. ], batch size: 17, lr: 3.60e-03 +2024-08-06 19:40:40,556 INFO [trainer.py:765] (0/8) Epoch 24, batch 300, train_loss[loss=2.79, NarTop10Accuracy=0.765, over 6885.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7075, over 4665.05 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 19:41:18,234 INFO [trainer.py:765] (0/8) Epoch 24, batch 400, train_loss[loss=2.934, NarTop10Accuracy=0.734, over 5631.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7065, over 5124.13 frames. ], batch size: 8, lr: 3.59e-03 +2024-08-06 19:41:50,322 INFO [trainer.py:765] (0/8) Epoch 24, batch 500, train_loss[loss=2.99, NarTop10Accuracy=0.7414, over 5985.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7087, over 5382.61 frames. ], batch size: 11, lr: 3.59e-03 +2024-08-06 19:42:21,452 INFO [trainer.py:765] (0/8) Epoch 24, batch 600, train_loss[loss=2.799, NarTop10Accuracy=0.7634, over 5763.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7076, over 5647.36 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 19:42:52,843 INFO [trainer.py:765] (0/8) Epoch 24, batch 700, train_loss[loss=2.8, NarTop10Accuracy=0.7627, over 5151.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7074, over 5718.92 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 19:43:17,381 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 19:43:25,410 INFO [trainer.py:811] (0/8) Epoch 24, validation: loss=3.021, NarTop10Accuracy=0.7204, over 1905321.00 frames. +2024-08-06 19:43:25,411 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 19:43:28,561 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.113e+02 2.282e+02 2.472e+02 2.357e+03, threshold=4.564e+02, percent-clipped=0.2 +2024-08-06 19:43:40,815 INFO [trainer.py:765] (0/8) Epoch 24, batch 800, train_loss[loss=2.789, NarTop10Accuracy=0.7694, over 5118.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7083, over 5780.60 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 19:44:11,410 INFO [trainer.py:765] (0/8) Epoch 24, batch 900, train_loss[loss=2.803, NarTop10Accuracy=0.7739, over 6261.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7088, over 5781.55 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 19:44:47,490 INFO [trainer.py:765] (0/8) Epoch 24, batch 1000, train_loss[loss=3.141, NarTop10Accuracy=0.6996, over 6162.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.7062, over 5892.96 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 19:45:27,108 INFO [trainer.py:765] (0/8) Epoch 24, batch 1100, train_loss[loss=3.3, NarTop10Accuracy=0.6566, over 6900.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7031, over 5933.46 frames. ], batch size: 17, lr: 3.57e-03 +2024-08-06 19:45:58,438 INFO [trainer.py:765] (0/8) Epoch 24, batch 1200, train_loss[loss=3.095, NarTop10Accuracy=0.7116, over 7158.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7038, over 5923.83 frames. ], batch size: 31, lr: 3.57e-03 +2024-08-06 19:46:30,295 INFO [trainer.py:765] (0/8) Epoch 24, batch 1300, train_loss[loss=3.184, NarTop10Accuracy=0.6852, over 5133.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7052, over 5991.82 frames. ], batch size: 6, lr: 3.56e-03 +2024-08-06 19:47:07,860 INFO [trainer.py:765] (0/8) Epoch 24, batch 1400, train_loss[loss=3.15, NarTop10Accuracy=0.6961, over 5997.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7029, over 6009.56 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 19:47:40,958 INFO [trainer.py:765] (0/8) Epoch 24, batch 1500, train_loss[loss=3.417, NarTop10Accuracy=0.6465, over 6282.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.7004, over 5961.78 frames. ], batch size: 50, lr: 3.56e-03 +2024-08-06 19:48:08,676 INFO [trainer.py:765] (0/8) Epoch 24, batch 1600, train_loss[loss=3.439, NarTop10Accuracy=0.6335, over 7260.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.6996, over 5938.73 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:48:35,267 INFO [trainer.py:765] (0/8) Epoch 24, batch 1700, train_loss[loss=2.786, NarTop10Accuracy=0.7659, over 6204.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7004, over 5922.16 frames. ], batch size: 13, lr: 3.55e-03 +2024-08-06 19:49:01,638 INFO [trainer.py:765] (0/8) Epoch 24, batch 1800, train_loss[loss=2.863, NarTop10Accuracy=0.7494, over 7203.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.6995, over 5965.89 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:49:28,042 INFO [trainer.py:765] (0/8) Epoch 24, batch 1900, train_loss[loss=3.463, NarTop10Accuracy=0.6291, over 6162.00 frames. ], tot_loss[loss=3.139, NarTop10Accuracy=0.6975, over 6012.45 frames. ], batch size: 50, lr: 3.55e-03 +2024-08-06 19:49:53,534 INFO [trainer.py:765] (0/8) Epoch 24, batch 2000, train_loss[loss=3.595, NarTop10Accuracy=0.6035, over 5871.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7027, over 5992.16 frames. ], batch size: 50, lr: 3.54e-03 +2024-08-06 19:50:18,821 INFO [trainer.py:765] (0/8) Epoch 24, batch 2100, train_loss[loss=2.704, NarTop10Accuracy=0.7788, over 4038.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7027, over 5965.35 frames. ], batch size: 4, lr: 3.54e-03 +2024-08-06 19:50:43,942 INFO [trainer.py:765] (0/8) Epoch 24, batch 2200, train_loss[loss=3.457, NarTop10Accuracy=0.6208, over 7101.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.703, over 6004.67 frames. ], batch size: 31, lr: 3.54e-03 +2024-08-06 19:51:09,024 INFO [trainer.py:765] (0/8) Epoch 24, batch 2300, train_loss[loss=2.843, NarTop10Accuracy=0.7489, over 5772.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7039, over 6024.40 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 19:51:33,349 INFO [trainer.py:765] (0/8) Epoch 24, batch 2400, train_loss[loss=3.022, NarTop10Accuracy=0.7153, over 5232.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7064, over 5784.55 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 19:51:56,783 INFO [trainer.py:765] (0/8) Epoch 24, batch 2500, train_loss[loss=2.93, NarTop10Accuracy=0.7388, over 5136.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7104, over 5476.33 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 19:52:16,751 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 19:52:16,754 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-24.pt +2024-08-06 19:53:22,198 INFO [trainer.py:765] (0/8) Epoch 25, batch 100, train_loss[loss=3.356, NarTop10Accuracy=0.6517, over 7527.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.71, over 2358.94 frames. ], batch size: 31, lr: 3.45e-03 +2024-08-06 19:53:47,263 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 19:53:55,329 INFO [trainer.py:811] (0/8) Epoch 25, validation: loss=2.96, NarTop10Accuracy=0.7332, over 1905321.00 frames. +2024-08-06 19:53:55,330 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 19:53:55,916 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.155e+02 2.306e+02 2.475e+02 6.485e+02, threshold=4.611e+02, percent-clipped=0.1 +2024-08-06 19:54:01,177 INFO [trainer.py:765] (0/8) Epoch 25, batch 200, train_loss[loss=2.885, NarTop10Accuracy=0.7498, over 6792.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7081, over 3844.08 frames. ], batch size: 17, lr: 3.45e-03 +2024-08-06 19:54:35,648 INFO [trainer.py:765] (0/8) Epoch 25, batch 300, train_loss[loss=3.178, NarTop10Accuracy=0.6943, over 7080.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7093, over 4663.88 frames. ], batch size: 22, lr: 3.45e-03 +2024-08-06 19:55:12,958 INFO [trainer.py:765] (0/8) Epoch 25, batch 400, train_loss[loss=3.068, NarTop10Accuracy=0.7084, over 5220.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7083, over 5103.21 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 19:55:43,739 INFO [trainer.py:765] (0/8) Epoch 25, batch 500, train_loss[loss=2.868, NarTop10Accuracy=0.7602, over 6114.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7094, over 5363.83 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 19:56:14,815 INFO [trainer.py:765] (0/8) Epoch 25, batch 600, train_loss[loss=2.734, NarTop10Accuracy=0.7768, over 5670.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.709, over 5652.15 frames. ], batch size: 9, lr: 3.44e-03 +2024-08-06 19:56:55,497 INFO [trainer.py:765] (0/8) Epoch 25, batch 700, train_loss[loss=2.625, NarTop10Accuracy=0.7956, over 4926.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.71, over 5707.99 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 19:57:30,137 INFO [trainer.py:765] (0/8) Epoch 25, batch 800, train_loss[loss=2.9, NarTop10Accuracy=0.7408, over 4293.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7091, over 5762.51 frames. ], batch size: 5, lr: 3.43e-03 +2024-08-06 19:58:00,679 INFO [trainer.py:765] (0/8) Epoch 25, batch 900, train_loss[loss=3.145, NarTop10Accuracy=0.6957, over 6321.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7095, over 5787.48 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 19:58:37,640 INFO [trainer.py:765] (0/8) Epoch 25, batch 1000, train_loss[loss=2.849, NarTop10Accuracy=0.7637, over 6396.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7069, over 5884.36 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 19:59:14,856 INFO [trainer.py:765] (0/8) Epoch 25, batch 1100, train_loss[loss=3.329, NarTop10Accuracy=0.6469, over 6840.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7061, over 5929.45 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 19:59:49,040 INFO [trainer.py:765] (0/8) Epoch 25, batch 1200, train_loss[loss=3.389, NarTop10Accuracy=0.6459, over 7326.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7064, over 5936.07 frames. ], batch size: 31, lr: 3.42e-03 +2024-08-06 20:00:25,599 INFO [trainer.py:765] (0/8) Epoch 25, batch 1300, train_loss[loss=3.032, NarTop10Accuracy=0.724, over 5049.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7076, over 5993.29 frames. ], batch size: 6, lr: 3.42e-03 +2024-08-06 20:01:02,016 INFO [trainer.py:765] (0/8) Epoch 25, batch 1400, train_loss[loss=2.936, NarTop10Accuracy=0.744, over 5979.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7084, over 6018.26 frames. ], batch size: 11, lr: 3.42e-03 +2024-08-06 20:01:32,823 INFO [trainer.py:765] (0/8) Epoch 25, batch 1500, train_loss[loss=3.187, NarTop10Accuracy=0.6888, over 6459.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7074, over 5954.16 frames. ], batch size: 50, lr: 3.41e-03 +2024-08-06 20:02:00,625 INFO [trainer.py:765] (0/8) Epoch 25, batch 1600, train_loss[loss=2.931, NarTop10Accuracy=0.7378, over 7110.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7091, over 5944.66 frames. ], batch size: 22, lr: 3.41e-03 +2024-08-06 20:02:27,360 INFO [trainer.py:765] (0/8) Epoch 25, batch 1700, train_loss[loss=2.92, NarTop10Accuracy=0.7405, over 6240.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7094, over 5941.12 frames. ], batch size: 13, lr: 3.41e-03 +2024-08-06 20:02:53,854 INFO [trainer.py:765] (0/8) Epoch 25, batch 1800, train_loss[loss=3.333, NarTop10Accuracy=0.6622, over 7188.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7069, over 5988.11 frames. ], batch size: 22, lr: 3.40e-03 +2024-08-06 20:03:20,341 INFO [trainer.py:765] (0/8) Epoch 25, batch 1900, train_loss[loss=3.264, NarTop10Accuracy=0.6804, over 6246.00 frames. ], tot_loss[loss=3.105, NarTop10Accuracy=0.7049, over 6034.88 frames. ], batch size: 51, lr: 3.40e-03 +2024-08-06 20:03:45,934 INFO [trainer.py:765] (0/8) Epoch 25, batch 2000, train_loss[loss=3.515, NarTop10Accuracy=0.6243, over 6087.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7016, over 5992.65 frames. ], batch size: 50, lr: 3.40e-03 +2024-08-06 20:04:11,246 INFO [trainer.py:765] (0/8) Epoch 25, batch 2100, train_loss[loss=2.727, NarTop10Accuracy=0.771, over 4005.00 frames. ], tot_loss[loss=3.105, NarTop10Accuracy=0.7046, over 5976.45 frames. ], batch size: 4, lr: 3.40e-03 +2024-08-06 20:04:31,410 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 20:04:39,343 INFO [trainer.py:811] (0/8) Epoch 25, validation: loss=2.999, NarTop10Accuracy=0.7251, over 1905321.00 frames. +2024-08-06 20:04:39,344 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 20:04:39,840 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.185e+02 2.339e+02 2.507e+02 3.640e+02, threshold=4.678e+02, percent-clipped=0.0 +2024-08-06 20:04:44,513 INFO [trainer.py:765] (0/8) Epoch 25, batch 2200, train_loss[loss=3.188, NarTop10Accuracy=0.687, over 7182.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7028, over 6020.18 frames. ], batch size: 31, lr: 3.39e-03 +2024-08-06 20:05:09,645 INFO [trainer.py:765] (0/8) Epoch 25, batch 2300, train_loss[loss=2.921, NarTop10Accuracy=0.7395, over 5835.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.702, over 6030.89 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 20:05:34,141 INFO [trainer.py:765] (0/8) Epoch 25, batch 2400, train_loss[loss=2.808, NarTop10Accuracy=0.7686, over 5226.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7062, over 5796.32 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:05:57,846 INFO [trainer.py:765] (0/8) Epoch 25, batch 2500, train_loss[loss=2.764, NarTop10Accuracy=0.7656, over 5013.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7125, over 5507.11 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:06:17,995 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 20:06:17,997 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-25.pt +2024-08-06 20:07:19,305 INFO [trainer.py:765] (0/8) Epoch 26, batch 100, train_loss[loss=3.051, NarTop10Accuracy=0.712, over 7386.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7092, over 2346.51 frames. ], batch size: 32, lr: 3.32e-03 +2024-08-06 20:07:52,383 INFO [trainer.py:765] (0/8) Epoch 26, batch 200, train_loss[loss=2.878, NarTop10Accuracy=0.7567, over 6597.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7092, over 3849.10 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 20:08:24,734 INFO [trainer.py:765] (0/8) Epoch 26, batch 300, train_loss[loss=2.927, NarTop10Accuracy=0.7405, over 7068.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7088, over 4644.56 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 20:08:58,185 INFO [trainer.py:765] (0/8) Epoch 26, batch 400, train_loss[loss=2.895, NarTop10Accuracy=0.7408, over 5145.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7091, over 5090.50 frames. ], batch size: 7, lr: 3.31e-03 +2024-08-06 20:09:33,148 INFO [trainer.py:765] (0/8) Epoch 26, batch 500, train_loss[loss=2.908, NarTop10Accuracy=0.7452, over 6084.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7079, over 5382.31 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 20:10:03,891 INFO [trainer.py:765] (0/8) Epoch 26, batch 600, train_loss[loss=2.67, NarTop10Accuracy=0.7919, over 5622.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7125, over 5666.54 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 20:10:39,873 INFO [trainer.py:765] (0/8) Epoch 26, batch 700, train_loss[loss=3.254, NarTop10Accuracy=0.6656, over 4956.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7081, over 5719.12 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 20:11:19,061 INFO [trainer.py:765] (0/8) Epoch 26, batch 800, train_loss[loss=2.916, NarTop10Accuracy=0.7403, over 5151.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7087, over 5775.64 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 20:11:49,316 INFO [trainer.py:765] (0/8) Epoch 26, batch 900, train_loss[loss=2.858, NarTop10Accuracy=0.7559, over 6219.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7088, over 5796.02 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 20:12:25,974 INFO [trainer.py:765] (0/8) Epoch 26, batch 1000, train_loss[loss=2.843, NarTop10Accuracy=0.7492, over 6114.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7076, over 5887.13 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 20:13:06,377 INFO [trainer.py:765] (0/8) Epoch 26, batch 1100, train_loss[loss=3.213, NarTop10Accuracy=0.6776, over 6795.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7059, over 5932.37 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 20:13:37,536 INFO [trainer.py:765] (0/8) Epoch 26, batch 1200, train_loss[loss=3.389, NarTop10Accuracy=0.6397, over 6942.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7087, over 5913.20 frames. ], batch size: 31, lr: 3.29e-03 +2024-08-06 20:14:13,696 INFO [trainer.py:765] (0/8) Epoch 26, batch 1300, train_loss[loss=2.818, NarTop10Accuracy=0.7712, over 5070.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7093, over 5992.31 frames. ], batch size: 6, lr: 3.28e-03 +2024-08-06 20:14:50,538 INFO [trainer.py:765] (0/8) Epoch 26, batch 1400, train_loss[loss=2.849, NarTop10Accuracy=0.7567, over 6018.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7088, over 5999.88 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 20:15:21,156 INFO [trainer.py:765] (0/8) Epoch 26, batch 1500, train_loss[loss=3.165, NarTop10Accuracy=0.6924, over 6462.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7087, over 5957.13 frames. ], batch size: 50, lr: 3.28e-03 +2024-08-06 20:15:48,980 INFO [trainer.py:765] (0/8) Epoch 26, batch 1600, train_loss[loss=2.88, NarTop10Accuracy=0.7425, over 7185.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7093, over 5947.54 frames. ], batch size: 22, lr: 3.28e-03 +2024-08-06 20:15:50,002 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 20:15:58,239 INFO [trainer.py:811] (0/8) Epoch 26, validation: loss=2.899, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 20:15:58,239 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 20:15:58,779 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.166e+02 2.322e+02 2.511e+02 3.952e+02, threshold=4.644e+02, percent-clipped=0.0 +2024-08-06 20:16:23,953 INFO [trainer.py:765] (0/8) Epoch 26, batch 1700, train_loss[loss=3.269, NarTop10Accuracy=0.6728, over 6201.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7112, over 5905.74 frames. ], batch size: 13, lr: 3.28e-03 +2024-08-06 20:16:50,427 INFO [trainer.py:765] (0/8) Epoch 26, batch 1800, train_loss[loss=2.815, NarTop10Accuracy=0.7671, over 7095.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7111, over 5982.70 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 20:17:16,841 INFO [trainer.py:765] (0/8) Epoch 26, batch 1900, train_loss[loss=3.083, NarTop10Accuracy=0.7169, over 5655.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7077, over 6007.31 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 20:17:42,380 INFO [trainer.py:765] (0/8) Epoch 26, batch 2000, train_loss[loss=3.593, NarTop10Accuracy=0.6028, over 6330.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7078, over 5983.26 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 20:18:07,564 INFO [trainer.py:765] (0/8) Epoch 26, batch 2100, train_loss[loss=2.971, NarTop10Accuracy=0.7258, over 4929.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7059, over 5958.33 frames. ], batch size: 5, lr: 3.27e-03 +2024-08-06 20:18:32,778 INFO [trainer.py:765] (0/8) Epoch 26, batch 2200, train_loss[loss=2.917, NarTop10Accuracy=0.7514, over 7293.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7071, over 6015.88 frames. ], batch size: 31, lr: 3.26e-03 +2024-08-06 20:18:57,898 INFO [trainer.py:765] (0/8) Epoch 26, batch 2300, train_loss[loss=3.095, NarTop10Accuracy=0.707, over 5724.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7061, over 6037.89 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 20:19:22,206 INFO [trainer.py:765] (0/8) Epoch 26, batch 2400, train_loss[loss=2.854, NarTop10Accuracy=0.7535, over 5076.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7102, over 5795.61 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:19:45,652 INFO [trainer.py:765] (0/8) Epoch 26, batch 2500, train_loss[loss=2.817, NarTop10Accuracy=0.7665, over 5238.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7147, over 5483.06 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:20:05,616 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 20:20:05,618 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-26.pt +2024-08-06 20:21:04,874 INFO [trainer.py:765] (0/8) Epoch 27, batch 100, train_loss[loss=3.253, NarTop10Accuracy=0.6781, over 7272.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7101, over 2363.40 frames. ], batch size: 31, lr: 3.19e-03 +2024-08-06 20:21:39,783 INFO [trainer.py:765] (0/8) Epoch 27, batch 200, train_loss[loss=2.862, NarTop10Accuracy=0.7523, over 6744.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7101, over 3848.36 frames. ], batch size: 17, lr: 3.19e-03 +2024-08-06 20:22:13,049 INFO [trainer.py:765] (0/8) Epoch 27, batch 300, train_loss[loss=2.928, NarTop10Accuracy=0.7475, over 7116.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7095, over 4648.48 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 20:22:43,557 INFO [trainer.py:765] (0/8) Epoch 27, batch 400, train_loss[loss=2.987, NarTop10Accuracy=0.7405, over 5172.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7126, over 5107.51 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 20:23:18,084 INFO [trainer.py:765] (0/8) Epoch 27, batch 500, train_loss[loss=2.701, NarTop10Accuracy=0.8009, over 6087.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7146, over 5376.10 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 20:23:51,436 INFO [trainer.py:765] (0/8) Epoch 27, batch 600, train_loss[loss=3.115, NarTop10Accuracy=0.6975, over 5676.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7151, over 5628.18 frames. ], batch size: 9, lr: 3.18e-03 +2024-08-06 20:24:24,976 INFO [trainer.py:765] (0/8) Epoch 27, batch 700, train_loss[loss=2.879, NarTop10Accuracy=0.758, over 4278.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7162, over 5698.84 frames. ], batch size: 5, lr: 3.18e-03 +2024-08-06 20:25:03,408 INFO [trainer.py:765] (0/8) Epoch 27, batch 800, train_loss[loss=3.122, NarTop10Accuracy=0.6966, over 4992.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7119, over 5773.65 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 20:25:34,176 INFO [trainer.py:765] (0/8) Epoch 27, batch 900, train_loss[loss=3.28, NarTop10Accuracy=0.6675, over 6219.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7111, over 5789.23 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 20:26:10,097 INFO [trainer.py:765] (0/8) Epoch 27, batch 1000, train_loss[loss=2.91, NarTop10Accuracy=0.7549, over 6147.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7101, over 5892.94 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 20:26:18,316 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 20:26:26,346 INFO [trainer.py:811] (0/8) Epoch 27, validation: loss=2.95, NarTop10Accuracy=0.735, over 1905321.00 frames. +2024-08-06 20:26:26,347 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 20:26:26,877 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.166e+02 2.331e+02 2.512e+02 4.284e+02, threshold=4.663e+02, percent-clipped=0.0 +2024-08-06 20:26:50,899 INFO [trainer.py:765] (0/8) Epoch 27, batch 1100, train_loss[loss=3.003, NarTop10Accuracy=0.7228, over 6789.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7087, over 5933.36 frames. ], batch size: 17, lr: 3.17e-03 +2024-08-06 20:27:24,545 INFO [trainer.py:765] (0/8) Epoch 27, batch 1200, train_loss[loss=2.878, NarTop10Accuracy=0.7453, over 7182.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7096, over 5926.45 frames. ], batch size: 31, lr: 3.16e-03 +2024-08-06 20:27:58,568 INFO [trainer.py:765] (0/8) Epoch 27, batch 1300, train_loss[loss=2.747, NarTop10Accuracy=0.7731, over 5025.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.711, over 5984.52 frames. ], batch size: 6, lr: 3.16e-03 +2024-08-06 20:28:36,745 INFO [trainer.py:765] (0/8) Epoch 27, batch 1400, train_loss[loss=3.538, NarTop10Accuracy=0.6219, over 6114.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7075, over 6003.97 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 20:29:04,632 INFO [trainer.py:765] (0/8) Epoch 27, batch 1500, train_loss[loss=3.078, NarTop10Accuracy=0.7123, over 6093.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7085, over 5937.07 frames. ], batch size: 50, lr: 3.16e-03 +2024-08-06 20:29:32,362 INFO [trainer.py:765] (0/8) Epoch 27, batch 1600, train_loss[loss=2.904, NarTop10Accuracy=0.7492, over 7149.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7068, over 5934.06 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:29:58,978 INFO [trainer.py:765] (0/8) Epoch 27, batch 1700, train_loss[loss=3.153, NarTop10Accuracy=0.6926, over 6237.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7099, over 5915.57 frames. ], batch size: 13, lr: 3.15e-03 +2024-08-06 20:30:25,463 INFO [trainer.py:765] (0/8) Epoch 27, batch 1800, train_loss[loss=3.422, NarTop10Accuracy=0.6414, over 7146.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7089, over 5983.19 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:30:51,845 INFO [trainer.py:765] (0/8) Epoch 27, batch 1900, train_loss[loss=3.123, NarTop10Accuracy=0.7077, over 6348.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7096, over 6029.45 frames. ], batch size: 50, lr: 3.15e-03 +2024-08-06 20:31:17,390 INFO [trainer.py:765] (0/8) Epoch 27, batch 2000, train_loss[loss=3.133, NarTop10Accuracy=0.7067, over 5910.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7119, over 6017.61 frames. ], batch size: 50, lr: 3.15e-03 +2024-08-06 20:31:42,660 INFO [trainer.py:765] (0/8) Epoch 27, batch 2100, train_loss[loss=2.892, NarTop10Accuracy=0.7511, over 3957.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7109, over 5977.21 frames. ], batch size: 4, lr: 3.14e-03 +2024-08-06 20:32:07,804 INFO [trainer.py:765] (0/8) Epoch 27, batch 2200, train_loss[loss=3.478, NarTop10Accuracy=0.628, over 7176.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7084, over 6025.53 frames. ], batch size: 31, lr: 3.14e-03 +2024-08-06 20:32:32,942 INFO [trainer.py:765] (0/8) Epoch 27, batch 2300, train_loss[loss=2.769, NarTop10Accuracy=0.77, over 5769.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7082, over 6044.97 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 20:32:57,246 INFO [trainer.py:765] (0/8) Epoch 27, batch 2400, train_loss[loss=2.7, NarTop10Accuracy=0.789, over 5130.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7082, over 5785.10 frames. ], batch size: 7, lr: 3.14e-03 +2024-08-06 20:33:20,615 INFO [trainer.py:765] (0/8) Epoch 27, batch 2500, train_loss[loss=3.357, NarTop10Accuracy=0.6494, over 5211.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7136, over 5489.39 frames. ], batch size: 7, lr: 3.13e-03 +2024-08-06 20:33:40,481 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 20:33:40,486 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-27.pt +2024-08-06 20:34:35,829 INFO [trainer.py:765] (0/8) Epoch 28, batch 100, train_loss[loss=2.87, NarTop10Accuracy=0.7505, over 7086.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7073, over 2388.53 frames. ], batch size: 32, lr: 3.07e-03 +2024-08-06 20:35:07,393 INFO [trainer.py:765] (0/8) Epoch 28, batch 200, train_loss[loss=2.671, NarTop10Accuracy=0.7829, over 6801.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7057, over 3859.63 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 20:35:45,422 INFO [trainer.py:765] (0/8) Epoch 28, batch 300, train_loss[loss=3.066, NarTop10Accuracy=0.724, over 7200.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7091, over 4649.48 frames. ], batch size: 22, lr: 3.07e-03 +2024-08-06 20:36:15,865 INFO [trainer.py:765] (0/8) Epoch 28, batch 400, train_loss[loss=3.31, NarTop10Accuracy=0.6536, over 5085.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7083, over 5107.47 frames. ], batch size: 7, lr: 3.07e-03 +2024-08-06 20:36:32,407 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 20:36:40,530 INFO [trainer.py:811] (0/8) Epoch 28, validation: loss=2.963, NarTop10Accuracy=0.7327, over 1905321.00 frames. +2024-08-06 20:36:40,531 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 20:36:41,102 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.179e+02 2.348e+02 2.536e+02 3.573e+02, threshold=4.696e+02, percent-clipped=0.0 +2024-08-06 20:36:56,662 INFO [trainer.py:765] (0/8) Epoch 28, batch 500, train_loss[loss=3.357, NarTop10Accuracy=0.6565, over 6168.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7109, over 5377.54 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 20:37:29,461 INFO [trainer.py:765] (0/8) Epoch 28, batch 600, train_loss[loss=3, NarTop10Accuracy=0.7267, over 5655.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7102, over 5641.28 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 20:38:08,890 INFO [trainer.py:765] (0/8) Epoch 28, batch 700, train_loss[loss=3.183, NarTop10Accuracy=0.6784, over 5007.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7084, over 5714.36 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 20:38:42,488 INFO [trainer.py:765] (0/8) Epoch 28, batch 800, train_loss[loss=2.771, NarTop10Accuracy=0.7751, over 4989.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7138, over 5781.38 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 20:39:15,505 INFO [trainer.py:765] (0/8) Epoch 28, batch 900, train_loss[loss=3.329, NarTop10Accuracy=0.6571, over 6657.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7141, over 5802.27 frames. ], batch size: 14, lr: 3.06e-03 +2024-08-06 20:39:53,239 INFO [trainer.py:765] (0/8) Epoch 28, batch 1000, train_loss[loss=3.284, NarTop10Accuracy=0.6726, over 6660.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7138, over 5900.27 frames. ], batch size: 14, lr: 3.05e-03 +2024-08-06 20:40:25,866 INFO [trainer.py:765] (0/8) Epoch 28, batch 1100, train_loss[loss=2.788, NarTop10Accuracy=0.7701, over 6744.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7107, over 5942.01 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 20:40:59,417 INFO [trainer.py:765] (0/8) Epoch 28, batch 1200, train_loss[loss=3.293, NarTop10Accuracy=0.6604, over 7401.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7098, over 5928.91 frames. ], batch size: 31, lr: 3.05e-03 +2024-08-06 20:41:38,680 INFO [trainer.py:765] (0/8) Epoch 28, batch 1300, train_loss[loss=3.304, NarTop10Accuracy=0.675, over 4374.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7105, over 5980.61 frames. ], batch size: 5, lr: 3.05e-03 +2024-08-06 20:42:13,046 INFO [trainer.py:765] (0/8) Epoch 28, batch 1400, train_loss[loss=2.94, NarTop10Accuracy=0.7302, over 6069.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7088, over 6014.47 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 20:42:43,170 INFO [trainer.py:765] (0/8) Epoch 28, batch 1500, train_loss[loss=3.446, NarTop10Accuracy=0.6362, over 6213.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7113, over 5971.79 frames. ], batch size: 50, lr: 3.04e-03 +2024-08-06 20:43:11,079 INFO [trainer.py:765] (0/8) Epoch 28, batch 1600, train_loss[loss=2.887, NarTop10Accuracy=0.748, over 7182.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7111, over 5935.61 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 20:43:37,784 INFO [trainer.py:765] (0/8) Epoch 28, batch 1700, train_loss[loss=3.098, NarTop10Accuracy=0.705, over 6624.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7105, over 5923.70 frames. ], batch size: 14, lr: 3.04e-03 +2024-08-06 20:44:04,325 INFO [trainer.py:765] (0/8) Epoch 28, batch 1800, train_loss[loss=3.081, NarTop10Accuracy=0.7053, over 6942.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7105, over 5983.06 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 20:44:30,756 INFO [trainer.py:765] (0/8) Epoch 28, batch 1900, train_loss[loss=3.076, NarTop10Accuracy=0.7136, over 5343.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7111, over 6019.24 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 20:44:56,327 INFO [trainer.py:765] (0/8) Epoch 28, batch 2000, train_loss[loss=2.978, NarTop10Accuracy=0.7281, over 6255.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7141, over 6008.22 frames. ], batch size: 51, lr: 3.03e-03 +2024-08-06 20:45:21,649 INFO [trainer.py:765] (0/8) Epoch 28, batch 2100, train_loss[loss=2.852, NarTop10Accuracy=0.7513, over 4824.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7149, over 5972.98 frames. ], batch size: 5, lr: 3.03e-03 +2024-08-06 20:45:47,075 INFO [trainer.py:765] (0/8) Epoch 28, batch 2200, train_loss[loss=2.973, NarTop10Accuracy=0.7384, over 7287.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7125, over 6010.74 frames. ], batch size: 31, lr: 3.03e-03 +2024-08-06 20:46:12,306 INFO [trainer.py:765] (0/8) Epoch 28, batch 2300, train_loss[loss=3.345, NarTop10Accuracy=0.6458, over 5712.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7087, over 6022.40 frames. ], batch size: 9, lr: 3.03e-03 +2024-08-06 20:46:36,806 INFO [trainer.py:765] (0/8) Epoch 28, batch 2400, train_loss[loss=3, NarTop10Accuracy=0.7266, over 5178.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7071, over 5777.48 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:46:48,594 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 20:46:56,604 INFO [trainer.py:811] (0/8) Epoch 28, validation: loss=2.931, NarTop10Accuracy=0.7396, over 1905321.00 frames. +2024-08-06 20:46:56,605 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 20:46:57,082 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.201e+02 2.381e+02 2.551e+02 4.872e+02, threshold=4.762e+02, percent-clipped=0.1 +2024-08-06 20:47:08,293 INFO [trainer.py:765] (0/8) Epoch 28, batch 2500, train_loss[loss=2.923, NarTop10Accuracy=0.7435, over 5088.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7121, over 5482.16 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:47:27,712 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 20:47:27,715 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-28.pt +2024-08-06 20:48:21,052 INFO [trainer.py:765] (0/8) Epoch 29, batch 100, train_loss[loss=3.038, NarTop10Accuracy=0.7229, over 7365.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7115, over 2377.68 frames. ], batch size: 31, lr: 2.96e-03 +2024-08-06 20:48:53,405 INFO [trainer.py:765] (0/8) Epoch 29, batch 200, train_loss[loss=3.247, NarTop10Accuracy=0.6812, over 6828.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7171, over 3867.50 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 20:49:27,477 INFO [trainer.py:765] (0/8) Epoch 29, batch 300, train_loss[loss=3.21, NarTop10Accuracy=0.6888, over 7422.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7168, over 4677.17 frames. ], batch size: 23, lr: 2.96e-03 +2024-08-06 20:49:56,052 INFO [trainer.py:765] (0/8) Epoch 29, batch 400, train_loss[loss=3.393, NarTop10Accuracy=0.6299, over 5223.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7114, over 5119.56 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 20:50:29,435 INFO [trainer.py:765] (0/8) Epoch 29, batch 500, train_loss[loss=3.116, NarTop10Accuracy=0.6999, over 6114.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7147, over 5403.51 frames. ], batch size: 11, lr: 2.96e-03 +2024-08-06 20:51:00,024 INFO [trainer.py:765] (0/8) Epoch 29, batch 600, train_loss[loss=2.741, NarTop10Accuracy=0.7829, over 5580.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.715, over 5655.55 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 20:51:35,677 INFO [trainer.py:765] (0/8) Epoch 29, batch 700, train_loss[loss=2.776, NarTop10Accuracy=0.7716, over 5097.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7107, over 5722.72 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 20:52:10,724 INFO [trainer.py:765] (0/8) Epoch 29, batch 800, train_loss[loss=2.757, NarTop10Accuracy=0.7775, over 5133.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7126, over 5763.90 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 20:52:40,742 INFO [trainer.py:765] (0/8) Epoch 29, batch 900, train_loss[loss=2.737, NarTop10Accuracy=0.776, over 6135.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7087, over 5790.31 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 20:53:16,861 INFO [trainer.py:765] (0/8) Epoch 29, batch 1000, train_loss[loss=3.499, NarTop10Accuracy=0.6218, over 6657.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7086, over 5897.68 frames. ], batch size: 14, lr: 2.95e-03 +2024-08-06 20:53:52,902 INFO [trainer.py:765] (0/8) Epoch 29, batch 1100, train_loss[loss=3.186, NarTop10Accuracy=0.6957, over 6756.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7079, over 5922.52 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 20:54:23,690 INFO [trainer.py:765] (0/8) Epoch 29, batch 1200, train_loss[loss=3.147, NarTop10Accuracy=0.697, over 7236.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7098, over 5932.17 frames. ], batch size: 31, lr: 2.94e-03 +2024-08-06 20:55:01,428 INFO [trainer.py:765] (0/8) Epoch 29, batch 1300, train_loss[loss=2.999, NarTop10Accuracy=0.7396, over 4212.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7108, over 5980.96 frames. ], batch size: 5, lr: 2.94e-03 +2024-08-06 20:55:32,557 INFO [trainer.py:765] (0/8) Epoch 29, batch 1400, train_loss[loss=3.427, NarTop10Accuracy=0.6432, over 6051.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7107, over 6014.35 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 20:56:04,359 INFO [trainer.py:765] (0/8) Epoch 29, batch 1500, train_loss[loss=3.306, NarTop10Accuracy=0.6611, over 5826.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7116, over 5948.53 frames. ], batch size: 51, lr: 2.94e-03 +2024-08-06 20:56:32,040 INFO [trainer.py:765] (0/8) Epoch 29, batch 1600, train_loss[loss=3.274, NarTop10Accuracy=0.6752, over 7161.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7092, over 5932.52 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:56:58,640 INFO [trainer.py:765] (0/8) Epoch 29, batch 1700, train_loss[loss=2.846, NarTop10Accuracy=0.7611, over 6396.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7102, over 5923.87 frames. ], batch size: 13, lr: 2.93e-03 +2024-08-06 20:57:25,000 INFO [trainer.py:765] (0/8) Epoch 29, batch 1800, train_loss[loss=3.064, NarTop10Accuracy=0.7095, over 6975.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7109, over 5970.48 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:57:44,621 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 20:57:52,863 INFO [trainer.py:811] (0/8) Epoch 29, validation: loss=2.897, NarTop10Accuracy=0.7458, over 1905321.00 frames. +2024-08-06 20:57:52,864 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 20:57:53,424 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.206e+02 2.380e+02 2.554e+02 4.464e+02, threshold=4.759e+02, percent-clipped=0.0 +2024-08-06 20:57:59,756 INFO [trainer.py:765] (0/8) Epoch 29, batch 1900, train_loss[loss=3.127, NarTop10Accuracy=0.7114, over 6084.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7076, over 6024.48 frames. ], batch size: 51, lr: 2.93e-03 +2024-08-06 20:58:25,308 INFO [trainer.py:765] (0/8) Epoch 29, batch 2000, train_loss[loss=3.528, NarTop10Accuracy=0.6206, over 6486.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7082, over 5994.52 frames. ], batch size: 53, lr: 2.93e-03 +2024-08-06 20:58:50,630 INFO [trainer.py:765] (0/8) Epoch 29, batch 2100, train_loss[loss=2.854, NarTop10Accuracy=0.7477, over 4803.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7084, over 5975.90 frames. ], batch size: 5, lr: 2.92e-03 +2024-08-06 20:59:15,805 INFO [trainer.py:765] (0/8) Epoch 29, batch 2200, train_loss[loss=2.847, NarTop10Accuracy=0.7553, over 7242.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7108, over 6012.33 frames. ], batch size: 31, lr: 2.92e-03 +2024-08-06 20:59:40,910 INFO [trainer.py:765] (0/8) Epoch 29, batch 2300, train_loss[loss=2.938, NarTop10Accuracy=0.7374, over 5631.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7077, over 6029.58 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 21:00:05,155 INFO [trainer.py:765] (0/8) Epoch 29, batch 2400, train_loss[loss=2.809, NarTop10Accuracy=0.7671, over 5091.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7105, over 5788.88 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 21:00:28,742 INFO [trainer.py:765] (0/8) Epoch 29, batch 2500, train_loss[loss=3.374, NarTop10Accuracy=0.6547, over 5100.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7155, over 5481.23 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 21:00:48,734 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 21:00:48,737 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-29.pt +2024-08-06 21:01:41,716 INFO [trainer.py:765] (0/8) Epoch 30, batch 100, train_loss[loss=2.9, NarTop10Accuracy=0.7499, over 7212.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7206, over 2360.50 frames. ], batch size: 31, lr: 2.86e-03 +2024-08-06 21:02:17,013 INFO [trainer.py:765] (0/8) Epoch 30, batch 200, train_loss[loss=2.827, NarTop10Accuracy=0.7563, over 6930.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7225, over 3859.46 frames. ], batch size: 17, lr: 2.86e-03 +2024-08-06 21:02:51,343 INFO [trainer.py:765] (0/8) Epoch 30, batch 300, train_loss[loss=2.864, NarTop10Accuracy=0.7525, over 7107.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7235, over 4650.21 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 21:03:21,643 INFO [trainer.py:765] (0/8) Epoch 30, batch 400, train_loss[loss=2.801, NarTop10Accuracy=0.7688, over 5073.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7206, over 5116.43 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 21:03:58,546 INFO [trainer.py:765] (0/8) Epoch 30, batch 500, train_loss[loss=3.283, NarTop10Accuracy=0.6568, over 6123.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7177, over 5418.98 frames. ], batch size: 11, lr: 2.86e-03 +2024-08-06 21:04:31,656 INFO [trainer.py:765] (0/8) Epoch 30, batch 600, train_loss[loss=2.924, NarTop10Accuracy=0.748, over 5739.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.717, over 5667.28 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 21:05:03,525 INFO [trainer.py:765] (0/8) Epoch 30, batch 700, train_loss[loss=2.872, NarTop10Accuracy=0.7449, over 5070.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7194, over 5738.20 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 21:05:44,132 INFO [trainer.py:765] (0/8) Epoch 30, batch 800, train_loss[loss=2.933, NarTop10Accuracy=0.7227, over 5082.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.719, over 5787.05 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 21:06:14,843 INFO [trainer.py:765] (0/8) Epoch 30, batch 900, train_loss[loss=2.915, NarTop10Accuracy=0.7466, over 6177.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7185, over 5787.67 frames. ], batch size: 13, lr: 2.85e-03 +2024-08-06 21:06:48,952 INFO [trainer.py:765] (0/8) Epoch 30, batch 1000, train_loss[loss=2.898, NarTop10Accuracy=0.7417, over 6609.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7122, over 5888.62 frames. ], batch size: 14, lr: 2.85e-03 +2024-08-06 21:07:25,936 INFO [trainer.py:765] (0/8) Epoch 30, batch 1100, train_loss[loss=3.368, NarTop10Accuracy=0.649, over 6813.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7088, over 5920.10 frames. ], batch size: 17, lr: 2.84e-03 +2024-08-06 21:08:02,380 INFO [trainer.py:765] (0/8) Epoch 30, batch 1200, train_loss[loss=3.004, NarTop10Accuracy=0.7333, over 7245.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7104, over 5918.24 frames. ], batch size: 31, lr: 2.84e-03 +2024-08-06 21:08:35,371 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 21:08:43,457 INFO [trainer.py:811] (0/8) Epoch 30, validation: loss=2.93, NarTop10Accuracy=0.7391, over 1905321.00 frames. +2024-08-06 21:08:43,458 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 21:08:44,197 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.209e+02 2.377e+02 2.553e+02 3.956e+02, threshold=4.754e+02, percent-clipped=0.0 +2024-08-06 21:08:44,203 INFO [trainer.py:765] (0/8) Epoch 30, batch 1300, train_loss[loss=3.01, NarTop10Accuracy=0.7266, over 4278.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7119, over 5995.53 frames. ], batch size: 5, lr: 2.84e-03 +2024-08-06 21:09:22,396 INFO [trainer.py:765] (0/8) Epoch 30, batch 1400, train_loss[loss=2.805, NarTop10Accuracy=0.7688, over 6546.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7107, over 6026.67 frames. ], batch size: 12, lr: 2.84e-03 +2024-08-06 21:09:52,372 INFO [trainer.py:765] (0/8) Epoch 30, batch 1500, train_loss[loss=3.068, NarTop10Accuracy=0.7152, over 6066.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7133, over 5952.64 frames. ], batch size: 50, lr: 2.84e-03 +2024-08-06 21:10:20,083 INFO [trainer.py:765] (0/8) Epoch 30, batch 1600, train_loss[loss=2.964, NarTop10Accuracy=0.7287, over 7209.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7121, over 5941.02 frames. ], batch size: 22, lr: 2.84e-03 +2024-08-06 21:10:46,679 INFO [trainer.py:765] (0/8) Epoch 30, batch 1700, train_loss[loss=3.107, NarTop10Accuracy=0.6961, over 6453.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7108, over 5915.59 frames. ], batch size: 14, lr: 2.83e-03 +2024-08-06 21:11:13,058 INFO [trainer.py:765] (0/8) Epoch 30, batch 1800, train_loss[loss=3.384, NarTop10Accuracy=0.6375, over 6915.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7112, over 5998.78 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 21:11:39,418 INFO [trainer.py:765] (0/8) Epoch 30, batch 1900, train_loss[loss=3.095, NarTop10Accuracy=0.7077, over 6132.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7101, over 6032.07 frames. ], batch size: 51, lr: 2.83e-03 +2024-08-06 21:12:04,825 INFO [trainer.py:765] (0/8) Epoch 30, batch 2000, train_loss[loss=3.34, NarTop10Accuracy=0.663, over 6063.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7127, over 6007.14 frames. ], batch size: 51, lr: 2.83e-03 +2024-08-06 21:12:30,088 INFO [trainer.py:765] (0/8) Epoch 30, batch 2100, train_loss[loss=2.837, NarTop10Accuracy=0.754, over 4731.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7111, over 5997.16 frames. ], batch size: 5, lr: 2.83e-03 +2024-08-06 21:12:55,224 INFO [trainer.py:765] (0/8) Epoch 30, batch 2200, train_loss[loss=2.985, NarTop10Accuracy=0.7372, over 7170.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7114, over 6033.40 frames. ], batch size: 31, lr: 2.82e-03 +2024-08-06 21:13:20,297 INFO [trainer.py:765] (0/8) Epoch 30, batch 2300, train_loss[loss=2.696, NarTop10Accuracy=0.7858, over 5802.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7073, over 6030.54 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 21:13:44,491 INFO [trainer.py:765] (0/8) Epoch 30, batch 2400, train_loss[loss=2.842, NarTop10Accuracy=0.7592, over 5217.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7156, over 5781.48 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:07,987 INFO [trainer.py:765] (0/8) Epoch 30, batch 2500, train_loss[loss=3.021, NarTop10Accuracy=0.7288, over 5085.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7165, over 5488.98 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:28,045 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 21:14:28,047 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-30.pt +2024-08-06 21:15:23,633 INFO [trainer.py:765] (0/8) Epoch 31, batch 100, train_loss[loss=3.437, NarTop10Accuracy=0.6418, over 6909.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7127, over 2354.80 frames. ], batch size: 31, lr: 2.77e-03 +2024-08-06 21:15:55,128 INFO [trainer.py:765] (0/8) Epoch 31, batch 200, train_loss[loss=2.907, NarTop10Accuracy=0.7337, over 6774.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7189, over 3854.05 frames. ], batch size: 17, lr: 2.77e-03 +2024-08-06 21:16:31,216 INFO [trainer.py:765] (0/8) Epoch 31, batch 300, train_loss[loss=2.881, NarTop10Accuracy=0.7532, over 7224.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7188, over 4661.08 frames. ], batch size: 22, lr: 2.77e-03 +2024-08-06 21:17:01,625 INFO [trainer.py:765] (0/8) Epoch 31, batch 400, train_loss[loss=3.279, NarTop10Accuracy=0.6737, over 4995.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7169, over 5102.88 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 21:17:35,725 INFO [trainer.py:765] (0/8) Epoch 31, batch 500, train_loss[loss=2.71, NarTop10Accuracy=0.7874, over 6144.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7179, over 5393.41 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 21:18:07,084 INFO [trainer.py:765] (0/8) Epoch 31, batch 600, train_loss[loss=2.804, NarTop10Accuracy=0.7651, over 5628.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.714, over 5659.82 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 21:18:44,611 INFO [trainer.py:765] (0/8) Epoch 31, batch 700, train_loss[loss=3.46, NarTop10Accuracy=0.6403, over 5184.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7137, over 5749.92 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 21:18:51,096 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 21:18:59,276 INFO [trainer.py:811] (0/8) Epoch 31, validation: loss=2.984, NarTop10Accuracy=0.7279, over 1905321.00 frames. +2024-08-06 21:18:59,276 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 21:18:59,986 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.222e+02 2.378e+02 2.557e+02 4.306e+02, threshold=4.755e+02, percent-clipped=0.0 +2024-08-06 21:19:24,245 INFO [trainer.py:765] (0/8) Epoch 31, batch 800, train_loss[loss=2.86, NarTop10Accuracy=0.7536, over 4368.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7154, over 5785.74 frames. ], batch size: 5, lr: 2.76e-03 +2024-08-06 21:19:56,950 INFO [trainer.py:765] (0/8) Epoch 31, batch 900, train_loss[loss=3.313, NarTop10Accuracy=0.6632, over 6321.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7166, over 5796.31 frames. ], batch size: 13, lr: 2.76e-03 +2024-08-06 21:20:33,310 INFO [trainer.py:765] (0/8) Epoch 31, batch 1000, train_loss[loss=3.338, NarTop10Accuracy=0.6507, over 6177.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7168, over 5896.37 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 21:21:10,215 INFO [trainer.py:765] (0/8) Epoch 31, batch 1100, train_loss[loss=3.201, NarTop10Accuracy=0.678, over 6825.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7152, over 5936.56 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 21:21:41,119 INFO [trainer.py:765] (0/8) Epoch 31, batch 1200, train_loss[loss=2.915, NarTop10Accuracy=0.7349, over 7371.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7189, over 5942.26 frames. ], batch size: 31, lr: 2.75e-03 +2024-08-06 21:22:19,741 INFO [trainer.py:765] (0/8) Epoch 31, batch 1300, train_loss[loss=2.824, NarTop10Accuracy=0.7607, over 4188.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.714, over 5993.03 frames. ], batch size: 5, lr: 2.75e-03 +2024-08-06 21:22:53,533 INFO [trainer.py:765] (0/8) Epoch 31, batch 1400, train_loss[loss=2.885, NarTop10Accuracy=0.7513, over 6066.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7124, over 6032.99 frames. ], batch size: 11, lr: 2.75e-03 +2024-08-06 21:23:21,269 INFO [trainer.py:765] (0/8) Epoch 31, batch 1500, train_loss[loss=3.38, NarTop10Accuracy=0.6477, over 5973.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7156, over 5954.59 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 21:23:49,005 INFO [trainer.py:765] (0/8) Epoch 31, batch 1600, train_loss[loss=3.461, NarTop10Accuracy=0.6352, over 7293.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7158, over 5928.33 frames. ], batch size: 23, lr: 2.74e-03 +2024-08-06 21:24:15,511 INFO [trainer.py:765] (0/8) Epoch 31, batch 1700, train_loss[loss=3.442, NarTop10Accuracy=0.6369, over 6585.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7149, over 5913.04 frames. ], batch size: 14, lr: 2.74e-03 +2024-08-06 21:24:41,995 INFO [trainer.py:765] (0/8) Epoch 31, batch 1800, train_loss[loss=2.759, NarTop10Accuracy=0.7716, over 7122.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7169, over 5979.74 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 21:25:08,356 INFO [trainer.py:765] (0/8) Epoch 31, batch 1900, train_loss[loss=3.144, NarTop10Accuracy=0.7, over 5934.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7141, over 6016.55 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 21:25:33,774 INFO [trainer.py:765] (0/8) Epoch 31, batch 2000, train_loss[loss=2.98, NarTop10Accuracy=0.731, over 6165.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7151, over 5983.87 frames. ], batch size: 51, lr: 2.74e-03 +2024-08-06 21:25:59,106 INFO [trainer.py:765] (0/8) Epoch 31, batch 2100, train_loss[loss=2.718, NarTop10Accuracy=0.7824, over 3873.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7158, over 5974.24 frames. ], batch size: 4, lr: 2.73e-03 +2024-08-06 21:26:24,237 INFO [trainer.py:765] (0/8) Epoch 31, batch 2200, train_loss[loss=3.107, NarTop10Accuracy=0.7072, over 7278.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7179, over 6001.15 frames. ], batch size: 31, lr: 2.73e-03 +2024-08-06 21:26:49,322 INFO [trainer.py:765] (0/8) Epoch 31, batch 2300, train_loss[loss=2.688, NarTop10Accuracy=0.7883, over 5691.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7146, over 6019.91 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 21:27:13,607 INFO [trainer.py:765] (0/8) Epoch 31, batch 2400, train_loss[loss=2.856, NarTop10Accuracy=0.76, over 5091.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7153, over 5758.17 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 21:27:37,027 INFO [trainer.py:765] (0/8) Epoch 31, batch 2500, train_loss[loss=2.83, NarTop10Accuracy=0.7584, over 5076.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7171, over 5444.66 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 21:27:57,087 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 21:27:57,090 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-31.pt +2024-08-06 21:28:49,393 INFO [trainer.py:765] (0/8) Epoch 32, batch 100, train_loss[loss=2.971, NarTop10Accuracy=0.7359, over 7092.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7153, over 2365.03 frames. ], batch size: 31, lr: 2.68e-03 +2024-08-06 21:29:08,161 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 21:29:16,392 INFO [trainer.py:811] (0/8) Epoch 32, validation: loss=2.919, NarTop10Accuracy=0.7409, over 1905321.00 frames. +2024-08-06 21:29:16,393 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 21:29:16,940 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.842e+02 2.253e+02 2.413e+02 2.600e+02 5.680e+02, threshold=4.826e+02, percent-clipped=0.1 +2024-08-06 21:29:32,273 INFO [trainer.py:765] (0/8) Epoch 32, batch 200, train_loss[loss=3.325, NarTop10Accuracy=0.6535, over 6741.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.712, over 3860.62 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 21:30:05,279 INFO [trainer.py:765] (0/8) Epoch 32, batch 300, train_loss[loss=2.958, NarTop10Accuracy=0.7345, over 7152.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7154, over 4650.27 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 21:30:34,103 INFO [trainer.py:765] (0/8) Epoch 32, batch 400, train_loss[loss=2.838, NarTop10Accuracy=0.7609, over 5322.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.713, over 5113.61 frames. ], batch size: 7, lr: 2.68e-03 +2024-08-06 21:31:13,531 INFO [trainer.py:765] (0/8) Epoch 32, batch 500, train_loss[loss=2.891, NarTop10Accuracy=0.7438, over 6138.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7134, over 5388.26 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 21:31:42,487 INFO [trainer.py:765] (0/8) Epoch 32, batch 600, train_loss[loss=3.371, NarTop10Accuracy=0.645, over 5739.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7142, over 5653.28 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 21:32:17,029 INFO [trainer.py:765] (0/8) Epoch 32, batch 700, train_loss[loss=2.891, NarTop10Accuracy=0.7469, over 5163.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7173, over 5726.95 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 21:33:00,647 INFO [trainer.py:765] (0/8) Epoch 32, batch 800, train_loss[loss=3.289, NarTop10Accuracy=0.6618, over 5235.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7171, over 5787.72 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 21:33:28,992 INFO [trainer.py:765] (0/8) Epoch 32, batch 900, train_loss[loss=2.761, NarTop10Accuracy=0.7786, over 6270.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7194, over 5806.27 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 21:34:04,050 INFO [trainer.py:765] (0/8) Epoch 32, batch 1000, train_loss[loss=3.301, NarTop10Accuracy=0.6682, over 6150.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7172, over 5902.23 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 21:34:46,675 INFO [trainer.py:765] (0/8) Epoch 32, batch 1100, train_loss[loss=3.129, NarTop10Accuracy=0.6979, over 6732.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7156, over 5935.58 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 21:35:18,172 INFO [trainer.py:765] (0/8) Epoch 32, batch 1200, train_loss[loss=3.164, NarTop10Accuracy=0.6852, over 7398.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.713, over 5935.57 frames. ], batch size: 32, lr: 2.66e-03 +2024-08-06 21:35:52,802 INFO [trainer.py:765] (0/8) Epoch 32, batch 1300, train_loss[loss=2.918, NarTop10Accuracy=0.7335, over 5166.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7132, over 6004.80 frames. ], batch size: 6, lr: 2.66e-03 +2024-08-06 21:36:29,479 INFO [trainer.py:765] (0/8) Epoch 32, batch 1400, train_loss[loss=3.188, NarTop10Accuracy=0.6841, over 6132.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7141, over 5995.60 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 21:37:04,734 INFO [trainer.py:765] (0/8) Epoch 32, batch 1500, train_loss[loss=3.475, NarTop10Accuracy=0.6273, over 6105.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7145, over 5945.13 frames. ], batch size: 51, lr: 2.66e-03 +2024-08-06 21:37:32,522 INFO [trainer.py:765] (0/8) Epoch 32, batch 1600, train_loss[loss=3.048, NarTop10Accuracy=0.7171, over 7119.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7149, over 5927.45 frames. ], batch size: 22, lr: 2.66e-03 +2024-08-06 21:37:59,161 INFO [trainer.py:765] (0/8) Epoch 32, batch 1700, train_loss[loss=3.132, NarTop10Accuracy=0.6905, over 6657.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7156, over 5920.16 frames. ], batch size: 14, lr: 2.65e-03 +2024-08-06 21:38:25,703 INFO [trainer.py:765] (0/8) Epoch 32, batch 1800, train_loss[loss=2.926, NarTop10Accuracy=0.7333, over 7137.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7132, over 5971.95 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 21:38:52,170 INFO [trainer.py:765] (0/8) Epoch 32, batch 1900, train_loss[loss=3.056, NarTop10Accuracy=0.7148, over 6102.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7096, over 6016.42 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 21:39:17,769 INFO [trainer.py:765] (0/8) Epoch 32, batch 2000, train_loss[loss=3.476, NarTop10Accuracy=0.6313, over 6147.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7137, over 5985.50 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 21:39:43,178 INFO [trainer.py:765] (0/8) Epoch 32, batch 2100, train_loss[loss=2.798, NarTop10Accuracy=0.7709, over 4752.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7144, over 5964.35 frames. ], batch size: 5, lr: 2.65e-03 +2024-08-06 21:39:54,783 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 21:40:02,941 INFO [trainer.py:811] (0/8) Epoch 32, validation: loss=2.886, NarTop10Accuracy=0.7482, over 1905321.00 frames. +2024-08-06 21:40:02,942 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 21:40:03,423 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.874e+02 2.278e+02 2.449e+02 2.609e+02 8.207e+02, threshold=4.898e+02, percent-clipped=0.3 +2024-08-06 21:40:16,628 INFO [trainer.py:765] (0/8) Epoch 32, batch 2200, train_loss[loss=3.078, NarTop10Accuracy=0.7026, over 7434.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7151, over 6007.45 frames. ], batch size: 31, lr: 2.65e-03 +2024-08-06 21:40:41,717 INFO [trainer.py:765] (0/8) Epoch 32, batch 2300, train_loss[loss=3.229, NarTop10Accuracy=0.6701, over 5820.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7109, over 6021.42 frames. ], batch size: 9, lr: 2.65e-03 +2024-08-06 21:41:06,072 INFO [trainer.py:765] (0/8) Epoch 32, batch 2400, train_loss[loss=3.117, NarTop10Accuracy=0.7051, over 5061.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7153, over 5772.31 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:29,537 INFO [trainer.py:765] (0/8) Epoch 32, batch 2500, train_loss[loss=2.84, NarTop10Accuracy=0.7588, over 5298.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7205, over 5462.96 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:50,019 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 21:41:50,022 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-32.pt +2024-08-06 21:42:47,616 INFO [trainer.py:765] (0/8) Epoch 33, batch 100, train_loss[loss=3.137, NarTop10Accuracy=0.7014, over 7383.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7244, over 2357.38 frames. ], batch size: 32, lr: 2.60e-03 +2024-08-06 21:43:22,368 INFO [trainer.py:765] (0/8) Epoch 33, batch 200, train_loss[loss=2.858, NarTop10Accuracy=0.7561, over 6822.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7204, over 3864.17 frames. ], batch size: 17, lr: 2.60e-03 +2024-08-06 21:43:56,513 INFO [trainer.py:765] (0/8) Epoch 33, batch 300, train_loss[loss=3.424, NarTop10Accuracy=0.6385, over 7005.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7175, over 4661.23 frames. ], batch size: 22, lr: 2.60e-03 +2024-08-06 21:44:30,316 INFO [trainer.py:765] (0/8) Epoch 33, batch 400, train_loss[loss=2.76, NarTop10Accuracy=0.7671, over 5268.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7179, over 5099.87 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 21:45:02,870 INFO [trainer.py:765] (0/8) Epoch 33, batch 500, train_loss[loss=2.708, NarTop10Accuracy=0.7872, over 6069.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7212, over 5380.30 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 21:45:36,226 INFO [trainer.py:765] (0/8) Epoch 33, batch 600, train_loss[loss=3.307, NarTop10Accuracy=0.6578, over 5727.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7142, over 5655.40 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 21:46:11,317 INFO [trainer.py:765] (0/8) Epoch 33, batch 700, train_loss[loss=2.615, NarTop10Accuracy=0.8054, over 5085.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7144, over 5718.21 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:46:46,170 INFO [trainer.py:765] (0/8) Epoch 33, batch 800, train_loss[loss=2.747, NarTop10Accuracy=0.7703, over 5007.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.715, over 5776.23 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:47:18,908 INFO [trainer.py:765] (0/8) Epoch 33, batch 900, train_loss[loss=3.235, NarTop10Accuracy=0.6805, over 6162.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7145, over 5791.07 frames. ], batch size: 13, lr: 2.59e-03 +2024-08-06 21:47:57,316 INFO [trainer.py:765] (0/8) Epoch 33, batch 1000, train_loss[loss=3.001, NarTop10Accuracy=0.7309, over 6744.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7141, over 5886.47 frames. ], batch size: 14, lr: 2.58e-03 +2024-08-06 21:48:30,908 INFO [trainer.py:765] (0/8) Epoch 33, batch 1100, train_loss[loss=2.924, NarTop10Accuracy=0.744, over 6819.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7105, over 5917.13 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 21:49:06,660 INFO [trainer.py:765] (0/8) Epoch 33, batch 1200, train_loss[loss=2.903, NarTop10Accuracy=0.7539, over 7212.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7126, over 5940.79 frames. ], batch size: 31, lr: 2.58e-03 +2024-08-06 21:49:42,816 INFO [trainer.py:765] (0/8) Epoch 33, batch 1300, train_loss[loss=2.932, NarTop10Accuracy=0.7384, over 4233.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7141, over 6002.97 frames. ], batch size: 5, lr: 2.58e-03 +2024-08-06 21:50:17,310 INFO [trainer.py:765] (0/8) Epoch 33, batch 1400, train_loss[loss=3.206, NarTop10Accuracy=0.6836, over 6051.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7137, over 6036.94 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 21:50:45,370 INFO [trainer.py:765] (0/8) Epoch 33, batch 1500, train_loss[loss=3.104, NarTop10Accuracy=0.7038, over 5937.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7141, over 5968.94 frames. ], batch size: 50, lr: 2.58e-03 +2024-08-06 21:51:04,608 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 21:51:12,662 INFO [trainer.py:811] (0/8) Epoch 33, validation: loss=2.938, NarTop10Accuracy=0.7372, over 1905321.00 frames. +2024-08-06 21:51:12,662 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 21:51:13,180 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.834e+02 2.250e+02 2.409e+02 2.586e+02 3.975e+02, threshold=4.818e+02, percent-clipped=0.0 +2024-08-06 21:51:21,261 INFO [trainer.py:765] (0/8) Epoch 33, batch 1600, train_loss[loss=3.247, NarTop10Accuracy=0.6744, over 7005.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7172, over 5933.86 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:51:47,923 INFO [trainer.py:765] (0/8) Epoch 33, batch 1700, train_loss[loss=2.768, NarTop10Accuracy=0.7714, over 6801.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7149, over 5921.33 frames. ], batch size: 14, lr: 2.57e-03 +2024-08-06 21:52:14,392 INFO [trainer.py:765] (0/8) Epoch 33, batch 1800, train_loss[loss=2.797, NarTop10Accuracy=0.7661, over 7323.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7168, over 5982.28 frames. ], batch size: 23, lr: 2.57e-03 +2024-08-06 21:52:40,856 INFO [trainer.py:765] (0/8) Epoch 33, batch 1900, train_loss[loss=3.413, NarTop10Accuracy=0.6431, over 6222.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.712, over 6029.31 frames. ], batch size: 50, lr: 2.57e-03 +2024-08-06 21:53:06,352 INFO [trainer.py:765] (0/8) Epoch 33, batch 2000, train_loss[loss=3.496, NarTop10Accuracy=0.6251, over 6339.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.717, over 5987.06 frames. ], batch size: 50, lr: 2.57e-03 +2024-08-06 21:53:31,659 INFO [trainer.py:765] (0/8) Epoch 33, batch 2100, train_loss[loss=3.242, NarTop10Accuracy=0.67, over 4689.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7166, over 5969.80 frames. ], batch size: 5, lr: 2.57e-03 +2024-08-06 21:53:56,890 INFO [trainer.py:765] (0/8) Epoch 33, batch 2200, train_loss[loss=3.377, NarTop10Accuracy=0.64, over 7008.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7151, over 6010.10 frames. ], batch size: 31, lr: 2.57e-03 +2024-08-06 21:54:21,990 INFO [trainer.py:765] (0/8) Epoch 33, batch 2300, train_loss[loss=2.765, NarTop10Accuracy=0.7799, over 5682.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7162, over 6014.22 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 21:54:46,429 INFO [trainer.py:765] (0/8) Epoch 33, batch 2400, train_loss[loss=2.715, NarTop10Accuracy=0.7864, over 5208.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7176, over 5768.29 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:09,862 INFO [trainer.py:765] (0/8) Epoch 33, batch 2500, train_loss[loss=2.533, NarTop10Accuracy=0.8059, over 5685.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7225, over 5471.52 frames. ], batch size: 8, lr: 2.56e-03 +2024-08-06 21:55:29,882 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 21:55:29,886 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-33.pt +2024-08-06 21:56:24,721 INFO [trainer.py:765] (0/8) Epoch 34, batch 100, train_loss[loss=3.423, NarTop10Accuracy=0.6383, over 7041.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7185, over 2357.83 frames. ], batch size: 31, lr: 2.52e-03 +2024-08-06 21:56:55,613 INFO [trainer.py:765] (0/8) Epoch 34, batch 200, train_loss[loss=3.165, NarTop10Accuracy=0.7001, over 6786.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7227, over 3860.53 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 21:57:31,776 INFO [trainer.py:765] (0/8) Epoch 34, batch 300, train_loss[loss=2.881, NarTop10Accuracy=0.7473, over 7254.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7193, over 4671.56 frames. ], batch size: 22, lr: 2.52e-03 +2024-08-06 21:58:02,724 INFO [trainer.py:765] (0/8) Epoch 34, batch 400, train_loss[loss=3.205, NarTop10Accuracy=0.6772, over 5175.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7216, over 5111.58 frames. ], batch size: 7, lr: 2.52e-03 +2024-08-06 21:58:34,689 INFO [trainer.py:765] (0/8) Epoch 34, batch 500, train_loss[loss=3.257, NarTop10Accuracy=0.6681, over 6114.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7189, over 5402.39 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 21:59:09,616 INFO [trainer.py:765] (0/8) Epoch 34, batch 600, train_loss[loss=2.895, NarTop10Accuracy=0.7587, over 5742.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7181, over 5660.05 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 21:59:46,056 INFO [trainer.py:765] (0/8) Epoch 34, batch 700, train_loss[loss=3.126, NarTop10Accuracy=0.7114, over 5193.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7177, over 5718.00 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:17,575 INFO [trainer.py:765] (0/8) Epoch 34, batch 800, train_loss[loss=2.986, NarTop10Accuracy=0.7261, over 5184.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7196, over 5778.64 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:49,874 INFO [trainer.py:765] (0/8) Epoch 34, batch 900, train_loss[loss=2.84, NarTop10Accuracy=0.7567, over 6195.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7196, over 5800.10 frames. ], batch size: 13, lr: 2.51e-03 +2024-08-06 22:01:25,338 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 22:01:33,386 INFO [trainer.py:811] (0/8) Epoch 34, validation: loss=2.9, NarTop10Accuracy=0.7444, over 1905321.00 frames. +2024-08-06 22:01:33,387 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 22:01:34,092 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.259e+02 2.434e+02 2.615e+02 5.125e+02, threshold=4.868e+02, percent-clipped=0.1 +2024-08-06 22:01:35,624 INFO [trainer.py:765] (0/8) Epoch 34, batch 1000, train_loss[loss=3.429, NarTop10Accuracy=0.6342, over 6207.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7174, over 5902.05 frames. ], batch size: 13, lr: 2.51e-03 +2024-08-06 22:02:10,829 INFO [trainer.py:765] (0/8) Epoch 34, batch 1100, train_loss[loss=3.173, NarTop10Accuracy=0.6965, over 6642.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7167, over 5925.36 frames. ], batch size: 17, lr: 2.51e-03 +2024-08-06 22:02:46,787 INFO [trainer.py:765] (0/8) Epoch 34, batch 1200, train_loss[loss=2.915, NarTop10Accuracy=0.7436, over 7173.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7168, over 5915.35 frames. ], batch size: 31, lr: 2.50e-03 +2024-08-06 22:03:20,814 INFO [trainer.py:765] (0/8) Epoch 34, batch 1300, train_loss[loss=2.903, NarTop10Accuracy=0.7466, over 4452.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7165, over 5977.56 frames. ], batch size: 5, lr: 2.50e-03 +2024-08-06 22:03:52,950 INFO [trainer.py:765] (0/8) Epoch 34, batch 1400, train_loss[loss=3.294, NarTop10Accuracy=0.6736, over 6033.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7175, over 5998.77 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 22:04:20,822 INFO [trainer.py:765] (0/8) Epoch 34, batch 1500, train_loss[loss=3.124, NarTop10Accuracy=0.7027, over 6069.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7176, over 5945.30 frames. ], batch size: 50, lr: 2.50e-03 +2024-08-06 22:04:48,600 INFO [trainer.py:765] (0/8) Epoch 34, batch 1600, train_loss[loss=2.917, NarTop10Accuracy=0.7397, over 7068.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7164, over 5932.89 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 22:05:15,241 INFO [trainer.py:765] (0/8) Epoch 34, batch 1700, train_loss[loss=3.24, NarTop10Accuracy=0.6832, over 6657.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7184, over 5903.10 frames. ], batch size: 14, lr: 2.50e-03 +2024-08-06 22:05:41,721 INFO [trainer.py:765] (0/8) Epoch 34, batch 1800, train_loss[loss=3.27, NarTop10Accuracy=0.6698, over 7113.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7165, over 5975.24 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 22:06:08,206 INFO [trainer.py:765] (0/8) Epoch 34, batch 1900, train_loss[loss=3.097, NarTop10Accuracy=0.7145, over 6312.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7122, over 6029.04 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 22:06:33,770 INFO [trainer.py:765] (0/8) Epoch 34, batch 2000, train_loss[loss=3.066, NarTop10Accuracy=0.7153, over 6369.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7149, over 6023.68 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 22:06:59,126 INFO [trainer.py:765] (0/8) Epoch 34, batch 2100, train_loss[loss=3.209, NarTop10Accuracy=0.6788, over 4908.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7114, over 6002.39 frames. ], batch size: 5, lr: 2.49e-03 +2024-08-06 22:07:24,399 INFO [trainer.py:765] (0/8) Epoch 34, batch 2200, train_loss[loss=2.816, NarTop10Accuracy=0.7689, over 7029.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7119, over 6029.05 frames. ], batch size: 31, lr: 2.49e-03 +2024-08-06 22:07:49,535 INFO [trainer.py:765] (0/8) Epoch 34, batch 2300, train_loss[loss=2.918, NarTop10Accuracy=0.76, over 5727.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7123, over 6051.24 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 22:08:14,059 INFO [trainer.py:765] (0/8) Epoch 34, batch 2400, train_loss[loss=3.412, NarTop10Accuracy=0.6428, over 5088.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7127, over 5794.89 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:37,648 INFO [trainer.py:765] (0/8) Epoch 34, batch 2500, train_loss[loss=2.675, NarTop10Accuracy=0.7916, over 5097.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7204, over 5495.42 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:57,384 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 22:08:57,387 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-34.pt +2024-08-06 22:09:52,639 INFO [trainer.py:765] (0/8) Epoch 35, batch 100, train_loss[loss=2.951, NarTop10Accuracy=0.7332, over 7188.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7149, over 2357.24 frames. ], batch size: 31, lr: 2.45e-03 +2024-08-06 22:10:29,697 INFO [trainer.py:765] (0/8) Epoch 35, batch 200, train_loss[loss=3.096, NarTop10Accuracy=0.7112, over 6714.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7157, over 3845.27 frames. ], batch size: 17, lr: 2.45e-03 +2024-08-06 22:11:04,942 INFO [trainer.py:765] (0/8) Epoch 35, batch 300, train_loss[loss=2.854, NarTop10Accuracy=0.7566, over 7173.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7193, over 4655.41 frames. ], batch size: 22, lr: 2.44e-03 +2024-08-06 22:11:35,332 INFO [trainer.py:765] (0/8) Epoch 35, batch 400, train_loss[loss=2.943, NarTop10Accuracy=0.7445, over 5352.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7195, over 5119.25 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 22:11:40,047 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 22:11:48,129 INFO [trainer.py:811] (0/8) Epoch 35, validation: loss=2.84, NarTop10Accuracy=0.7576, over 1905321.00 frames. +2024-08-06 22:11:48,129 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30264MB +2024-08-06 22:11:48,702 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.275e+02 2.426e+02 2.615e+02 4.095e+02, threshold=4.852e+02, percent-clipped=0.0 +2024-08-06 22:12:17,723 INFO [trainer.py:765] (0/8) Epoch 35, batch 500, train_loss[loss=2.743, NarTop10Accuracy=0.776, over 6189.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7213, over 5402.06 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 22:12:51,424 INFO [trainer.py:765] (0/8) Epoch 35, batch 600, train_loss[loss=3.264, NarTop10Accuracy=0.6675, over 5757.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.718, over 5665.08 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 22:13:24,940 INFO [trainer.py:765] (0/8) Epoch 35, batch 700, train_loss[loss=2.803, NarTop10Accuracy=0.7694, over 5067.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7175, over 5727.61 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 22:14:01,383 INFO [trainer.py:765] (0/8) Epoch 35, batch 800, train_loss[loss=2.734, NarTop10Accuracy=0.7795, over 5055.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7164, over 5775.70 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 22:14:34,372 INFO [trainer.py:765] (0/8) Epoch 35, batch 900, train_loss[loss=3.229, NarTop10Accuracy=0.6791, over 6366.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7186, over 5787.84 frames. ], batch size: 13, lr: 2.44e-03 +2024-08-06 22:15:09,372 INFO [trainer.py:765] (0/8) Epoch 35, batch 1000, train_loss[loss=2.907, NarTop10Accuracy=0.7482, over 6585.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7174, over 5883.15 frames. ], batch size: 14, lr: 2.43e-03 +2024-08-06 22:15:48,495 INFO [trainer.py:765] (0/8) Epoch 35, batch 1100, train_loss[loss=2.987, NarTop10Accuracy=0.7304, over 6903.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7158, over 5915.31 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 22:16:22,483 INFO [trainer.py:765] (0/8) Epoch 35, batch 1200, train_loss[loss=2.96, NarTop10Accuracy=0.7404, over 7242.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7191, over 5902.19 frames. ], batch size: 31, lr: 2.43e-03 +2024-08-06 22:16:57,060 INFO [trainer.py:765] (0/8) Epoch 35, batch 1300, train_loss[loss=2.947, NarTop10Accuracy=0.7462, over 5082.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7211, over 5973.27 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 22:17:31,060 INFO [trainer.py:765] (0/8) Epoch 35, batch 1400, train_loss[loss=3.072, NarTop10Accuracy=0.6995, over 6093.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7187, over 5991.44 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 22:18:03,062 INFO [trainer.py:765] (0/8) Epoch 35, batch 1500, train_loss[loss=3.034, NarTop10Accuracy=0.7128, over 6102.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7174, over 5971.20 frames. ], batch size: 50, lr: 2.43e-03 +2024-08-06 22:18:30,728 INFO [trainer.py:765] (0/8) Epoch 35, batch 1600, train_loss[loss=2.938, NarTop10Accuracy=0.7451, over 7209.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7154, over 5959.90 frames. ], batch size: 22, lr: 2.43e-03 +2024-08-06 22:18:57,319 INFO [trainer.py:765] (0/8) Epoch 35, batch 1700, train_loss[loss=2.769, NarTop10Accuracy=0.7678, over 6135.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7141, over 5939.25 frames. ], batch size: 13, lr: 2.42e-03 +2024-08-06 22:19:23,702 INFO [trainer.py:765] (0/8) Epoch 35, batch 1800, train_loss[loss=3.456, NarTop10Accuracy=0.6273, over 7035.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7153, over 6009.46 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 22:19:50,201 INFO [trainer.py:765] (0/8) Epoch 35, batch 1900, train_loss[loss=3.192, NarTop10Accuracy=0.6897, over 5937.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.714, over 6027.41 frames. ], batch size: 50, lr: 2.42e-03 +2024-08-06 22:20:15,762 INFO [trainer.py:765] (0/8) Epoch 35, batch 2000, train_loss[loss=3.102, NarTop10Accuracy=0.7097, over 6543.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7157, over 6000.15 frames. ], batch size: 50, lr: 2.42e-03 +2024-08-06 22:20:41,045 INFO [trainer.py:765] (0/8) Epoch 35, batch 2100, train_loss[loss=2.749, NarTop10Accuracy=0.7863, over 4695.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7159, over 5966.40 frames. ], batch size: 5, lr: 2.42e-03 +2024-08-06 22:21:06,226 INFO [trainer.py:765] (0/8) Epoch 35, batch 2200, train_loss[loss=2.858, NarTop10Accuracy=0.7622, over 7515.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7152, over 6011.35 frames. ], batch size: 33, lr: 2.42e-03 +2024-08-06 22:21:31,286 INFO [trainer.py:765] (0/8) Epoch 35, batch 2300, train_loss[loss=2.861, NarTop10Accuracy=0.7468, over 5742.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7157, over 6025.43 frames. ], batch size: 9, lr: 2.42e-03 +2024-08-06 22:21:55,648 INFO [trainer.py:765] (0/8) Epoch 35, batch 2400, train_loss[loss=3.239, NarTop10Accuracy=0.6721, over 5223.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7166, over 5764.81 frames. ], batch size: 7, lr: 2.42e-03 +2024-08-06 22:21:59,680 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 22:22:07,656 INFO [trainer.py:811] (0/8) Epoch 35, validation: loss=2.905, NarTop10Accuracy=0.7437, over 1905321.00 frames. +2024-08-06 22:22:07,657 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30377MB +2024-08-06 22:22:08,115 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.895e+02 2.316e+02 2.462e+02 2.653e+02 5.566e+02, threshold=4.923e+02, percent-clipped=0.1 +2024-08-06 22:22:27,127 INFO [trainer.py:765] (0/8) Epoch 35, batch 2500, train_loss[loss=3.184, NarTop10Accuracy=0.685, over 5118.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7203, over 5478.79 frames. ], batch size: 7, lr: 2.41e-03 +2024-08-06 22:22:46,663 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 22:22:46,666 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-35.pt +2024-08-06 22:23:47,171 INFO [trainer.py:765] (0/8) Epoch 36, batch 100, train_loss[loss=3.252, NarTop10Accuracy=0.6746, over 7521.00 frames. ], tot_loss[loss=2.997, NarTop10Accuracy=0.7266, over 2354.48 frames. ], batch size: 33, lr: 2.38e-03 +2024-08-06 22:24:22,494 INFO [trainer.py:765] (0/8) Epoch 36, batch 200, train_loss[loss=2.998, NarTop10Accuracy=0.7299, over 6846.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7204, over 3845.61 frames. ], batch size: 17, lr: 2.38e-03 +2024-08-06 22:24:54,720 INFO [trainer.py:765] (0/8) Epoch 36, batch 300, train_loss[loss=3.267, NarTop10Accuracy=0.6597, over 7428.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7186, over 4643.41 frames. ], batch size: 23, lr: 2.37e-03 +2024-08-06 22:25:29,275 INFO [trainer.py:765] (0/8) Epoch 36, batch 400, train_loss[loss=2.964, NarTop10Accuracy=0.733, over 5010.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7219, over 5095.36 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 22:26:01,818 INFO [trainer.py:765] (0/8) Epoch 36, batch 500, train_loss[loss=3.242, NarTop10Accuracy=0.6735, over 6114.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7215, over 5369.33 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 22:26:35,025 INFO [trainer.py:765] (0/8) Epoch 36, batch 600, train_loss[loss=2.986, NarTop10Accuracy=0.7288, over 5772.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7217, over 5649.62 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 22:27:10,990 INFO [trainer.py:765] (0/8) Epoch 36, batch 700, train_loss[loss=3.288, NarTop10Accuracy=0.6613, over 5058.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.722, over 5711.49 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 22:27:44,914 INFO [trainer.py:765] (0/8) Epoch 36, batch 800, train_loss[loss=3.369, NarTop10Accuracy=0.6516, over 5157.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7184, over 5755.67 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 22:28:17,811 INFO [trainer.py:765] (0/8) Epoch 36, batch 900, train_loss[loss=2.794, NarTop10Accuracy=0.7714, over 6612.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.721, over 5797.24 frames. ], batch size: 14, lr: 2.37e-03 +2024-08-06 22:28:56,983 INFO [trainer.py:765] (0/8) Epoch 36, batch 1000, train_loss[loss=3.364, NarTop10Accuracy=0.6556, over 6609.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7208, over 5916.29 frames. ], batch size: 14, lr: 2.37e-03 +2024-08-06 22:29:29,364 INFO [trainer.py:765] (0/8) Epoch 36, batch 1100, train_loss[loss=2.8, NarTop10Accuracy=0.774, over 6861.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7193, over 5940.63 frames. ], batch size: 17, lr: 2.36e-03 +2024-08-06 22:30:05,680 INFO [trainer.py:765] (0/8) Epoch 36, batch 1200, train_loss[loss=3.103, NarTop10Accuracy=0.7028, over 6993.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7203, over 5935.40 frames. ], batch size: 31, lr: 2.36e-03 +2024-08-06 22:30:42,575 INFO [trainer.py:765] (0/8) Epoch 36, batch 1300, train_loss[loss=2.951, NarTop10Accuracy=0.7383, over 4323.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7188, over 5995.39 frames. ], batch size: 5, lr: 2.36e-03 +2024-08-06 22:31:15,938 INFO [trainer.py:765] (0/8) Epoch 36, batch 1400, train_loss[loss=3.23, NarTop10Accuracy=0.6899, over 6072.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7214, over 6015.57 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 22:31:43,748 INFO [trainer.py:765] (0/8) Epoch 36, batch 1500, train_loss[loss=3.454, NarTop10Accuracy=0.6235, over 6333.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7218, over 5956.44 frames. ], batch size: 50, lr: 2.36e-03 +2024-08-06 22:32:11,459 INFO [trainer.py:765] (0/8) Epoch 36, batch 1600, train_loss[loss=3.425, NarTop10Accuracy=0.6343, over 7044.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.721, over 5946.79 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 22:32:38,108 INFO [trainer.py:765] (0/8) Epoch 36, batch 1700, train_loss[loss=3.395, NarTop10Accuracy=0.6444, over 6201.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7194, over 5925.71 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 22:33:04,554 INFO [trainer.py:765] (0/8) Epoch 36, batch 1800, train_loss[loss=3.112, NarTop10Accuracy=0.6956, over 6993.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7189, over 5976.36 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 22:33:15,169 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 22:33:23,567 INFO [trainer.py:811] (0/8) Epoch 36, validation: loss=2.897, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 22:33:23,568 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30377MB +2024-08-06 22:33:24,096 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.876e+02 2.309e+02 2.476e+02 2.664e+02 4.811e+02, threshold=4.951e+02, percent-clipped=0.0 +2024-08-06 22:33:39,456 INFO [trainer.py:765] (0/8) Epoch 36, batch 1900, train_loss[loss=2.982, NarTop10Accuracy=0.7291, over 6366.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7181, over 6012.50 frames. ], batch size: 50, lr: 2.35e-03 +2024-08-06 22:34:05,077 INFO [trainer.py:765] (0/8) Epoch 36, batch 2000, train_loss[loss=3.241, NarTop10Accuracy=0.6798, over 6261.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7195, over 5991.45 frames. ], batch size: 51, lr: 2.35e-03 +2024-08-06 22:34:30,514 INFO [trainer.py:765] (0/8) Epoch 36, batch 2100, train_loss[loss=2.767, NarTop10Accuracy=0.7699, over 4893.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7206, over 5990.26 frames. ], batch size: 5, lr: 2.35e-03 +2024-08-06 22:34:55,938 INFO [trainer.py:765] (0/8) Epoch 36, batch 2200, train_loss[loss=3.327, NarTop10Accuracy=0.654, over 7185.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7162, over 6029.57 frames. ], batch size: 31, lr: 2.35e-03 +2024-08-06 22:35:21,146 INFO [trainer.py:765] (0/8) Epoch 36, batch 2300, train_loss[loss=3.461, NarTop10Accuracy=0.6395, over 5634.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7138, over 6038.87 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 22:35:45,601 INFO [trainer.py:765] (0/8) Epoch 36, batch 2400, train_loss[loss=3.235, NarTop10Accuracy=0.6767, over 5166.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.716, over 5792.30 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:09,182 INFO [trainer.py:765] (0/8) Epoch 36, batch 2500, train_loss[loss=2.779, NarTop10Accuracy=0.7636, over 5145.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7195, over 5485.34 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:29,315 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 22:36:29,318 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-36.pt +2024-08-06 22:37:29,727 INFO [trainer.py:765] (0/8) Epoch 37, batch 100, train_loss[loss=2.78, NarTop10Accuracy=0.7697, over 7221.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7174, over 2350.44 frames. ], batch size: 31, lr: 2.31e-03 +2024-08-06 22:38:01,273 INFO [trainer.py:765] (0/8) Epoch 37, batch 200, train_loss[loss=2.687, NarTop10Accuracy=0.7829, over 6858.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7211, over 3860.82 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 22:38:35,958 INFO [trainer.py:765] (0/8) Epoch 37, batch 300, train_loss[loss=3.213, NarTop10Accuracy=0.6845, over 7032.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7226, over 4654.83 frames. ], batch size: 22, lr: 2.31e-03 +2024-08-06 22:39:09,308 INFO [trainer.py:765] (0/8) Epoch 37, batch 400, train_loss[loss=2.679, NarTop10Accuracy=0.7919, over 5025.00 frames. ], tot_loss[loss=3.003, NarTop10Accuracy=0.7247, over 5120.46 frames. ], batch size: 7, lr: 2.31e-03 +2024-08-06 22:39:43,862 INFO [trainer.py:765] (0/8) Epoch 37, batch 500, train_loss[loss=3.388, NarTop10Accuracy=0.6394, over 6054.00 frames. ], tot_loss[loss=3, NarTop10Accuracy=0.7249, over 5391.90 frames. ], batch size: 11, lr: 2.31e-03 +2024-08-06 22:40:17,335 INFO [trainer.py:765] (0/8) Epoch 37, batch 600, train_loss[loss=2.735, NarTop10Accuracy=0.7772, over 5790.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.723, over 5649.52 frames. ], batch size: 9, lr: 2.31e-03 +2024-08-06 22:40:51,618 INFO [trainer.py:765] (0/8) Epoch 37, batch 700, train_loss[loss=3.144, NarTop10Accuracy=0.702, over 4293.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7186, over 5733.19 frames. ], batch size: 5, lr: 2.30e-03 +2024-08-06 22:41:30,566 INFO [trainer.py:765] (0/8) Epoch 37, batch 800, train_loss[loss=2.811, NarTop10Accuracy=0.7619, over 4956.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7171, over 5778.52 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:41:59,085 INFO [trainer.py:765] (0/8) Epoch 37, batch 900, train_loss[loss=2.905, NarTop10Accuracy=0.75, over 6162.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7202, over 5804.61 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 22:42:38,268 INFO [trainer.py:765] (0/8) Epoch 37, batch 1000, train_loss[loss=3.279, NarTop10Accuracy=0.6652, over 6561.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7165, over 5892.00 frames. ], batch size: 14, lr: 2.30e-03 +2024-08-06 22:43:15,907 INFO [trainer.py:765] (0/8) Epoch 37, batch 1100, train_loss[loss=3.076, NarTop10Accuracy=0.7112, over 6969.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7166, over 5940.44 frames. ], batch size: 17, lr: 2.30e-03 +2024-08-06 22:43:47,740 INFO [trainer.py:765] (0/8) Epoch 37, batch 1200, train_loss[loss=2.906, NarTop10Accuracy=0.7434, over 7488.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7164, over 5939.64 frames. ], batch size: 31, lr: 2.30e-03 +2024-08-06 22:44:11,755 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 22:44:20,075 INFO [trainer.py:811] (0/8) Epoch 37, validation: loss=2.92, NarTop10Accuracy=0.7407, over 1905321.00 frames. +2024-08-06 22:44:20,076 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30377MB +2024-08-06 22:44:20,606 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.887e+02 2.309e+02 2.481e+02 2.647e+02 8.766e+02, threshold=4.961e+02, percent-clipped=0.1 +2024-08-06 22:44:32,785 INFO [trainer.py:765] (0/8) Epoch 37, batch 1300, train_loss[loss=2.863, NarTop10Accuracy=0.7552, over 5100.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7203, over 5999.56 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:45:10,388 INFO [trainer.py:765] (0/8) Epoch 37, batch 1400, train_loss[loss=2.72, NarTop10Accuracy=0.7755, over 6468.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7216, over 6016.13 frames. ], batch size: 12, lr: 2.30e-03 +2024-08-06 22:45:40,512 INFO [trainer.py:765] (0/8) Epoch 37, batch 1500, train_loss[loss=2.947, NarTop10Accuracy=0.7393, over 6216.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7188, over 5949.66 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:46:08,438 INFO [trainer.py:765] (0/8) Epoch 37, batch 1600, train_loss[loss=3.359, NarTop10Accuracy=0.653, over 7104.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7178, over 5929.61 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 22:46:35,187 INFO [trainer.py:765] (0/8) Epoch 37, batch 1700, train_loss[loss=3.473, NarTop10Accuracy=0.63, over 6207.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.718, over 5916.42 frames. ], batch size: 13, lr: 2.29e-03 +2024-08-06 22:47:01,793 INFO [trainer.py:765] (0/8) Epoch 37, batch 1800, train_loss[loss=2.803, NarTop10Accuracy=0.7608, over 7101.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7181, over 5978.77 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 22:47:28,312 INFO [trainer.py:765] (0/8) Epoch 37, batch 1900, train_loss[loss=3.176, NarTop10Accuracy=0.6877, over 6186.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7173, over 6020.79 frames. ], batch size: 51, lr: 2.29e-03 +2024-08-06 22:47:53,925 INFO [trainer.py:765] (0/8) Epoch 37, batch 2000, train_loss[loss=3.102, NarTop10Accuracy=0.7024, over 6123.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7184, over 5999.03 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:48:19,326 INFO [trainer.py:765] (0/8) Epoch 37, batch 2100, train_loss[loss=2.893, NarTop10Accuracy=0.7338, over 3894.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7181, over 5967.60 frames. ], batch size: 4, lr: 2.29e-03 +2024-08-06 22:48:44,708 INFO [trainer.py:765] (0/8) Epoch 37, batch 2200, train_loss[loss=2.919, NarTop10Accuracy=0.7389, over 7311.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7171, over 6001.26 frames. ], batch size: 32, lr: 2.29e-03 +2024-08-06 22:49:09,913 INFO [trainer.py:765] (0/8) Epoch 37, batch 2300, train_loss[loss=2.755, NarTop10Accuracy=0.7755, over 5577.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7166, over 6007.28 frames. ], batch size: 9, lr: 2.29e-03 +2024-08-06 22:49:34,318 INFO [trainer.py:765] (0/8) Epoch 37, batch 2400, train_loss[loss=3.133, NarTop10Accuracy=0.6955, over 5229.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7203, over 5762.43 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:49:57,861 INFO [trainer.py:765] (0/8) Epoch 37, batch 2500, train_loss[loss=3.159, NarTop10Accuracy=0.6838, over 5013.00 frames. ], tot_loss[loss=2.99, NarTop10Accuracy=0.727, over 5454.12 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:50:18,222 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 22:50:18,227 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-37.pt +2024-08-06 22:51:16,151 INFO [trainer.py:765] (0/8) Epoch 38, batch 100, train_loss[loss=3.123, NarTop10Accuracy=0.699, over 7104.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7205, over 2378.19 frames. ], batch size: 31, lr: 2.25e-03 +2024-08-06 22:51:53,014 INFO [trainer.py:765] (0/8) Epoch 38, batch 200, train_loss[loss=3.293, NarTop10Accuracy=0.6651, over 6765.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.72, over 3849.23 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 22:52:25,202 INFO [trainer.py:765] (0/8) Epoch 38, batch 300, train_loss[loss=2.911, NarTop10Accuracy=0.7498, over 7146.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7174, over 4652.97 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 22:52:55,626 INFO [trainer.py:765] (0/8) Epoch 38, batch 400, train_loss[loss=3.239, NarTop10Accuracy=0.6696, over 5247.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7208, over 5089.75 frames. ], batch size: 7, lr: 2.25e-03 +2024-08-06 22:53:32,228 INFO [trainer.py:765] (0/8) Epoch 38, batch 500, train_loss[loss=2.744, NarTop10Accuracy=0.7815, over 6165.00 frames. ], tot_loss[loss=2.986, NarTop10Accuracy=0.7277, over 5359.36 frames. ], batch size: 11, lr: 2.25e-03 +2024-08-06 22:54:05,497 INFO [trainer.py:765] (0/8) Epoch 38, batch 600, train_loss[loss=3.135, NarTop10Accuracy=0.7017, over 5562.00 frames. ], tot_loss[loss=3.001, NarTop10Accuracy=0.7248, over 5641.47 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 22:54:36,002 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 22:54:43,918 INFO [trainer.py:811] (0/8) Epoch 38, validation: loss=2.939, NarTop10Accuracy=0.7369, over 1905321.00 frames. +2024-08-06 22:54:43,919 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30377MB +2024-08-06 22:54:44,427 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.313e+02 2.478e+02 2.663e+02 7.254e+02, threshold=4.957e+02, percent-clipped=0.3 +2024-08-06 22:54:46,658 INFO [trainer.py:765] (0/8) Epoch 38, batch 700, train_loss[loss=2.777, NarTop10Accuracy=0.765, over 5193.00 frames. ], tot_loss[loss=3.004, NarTop10Accuracy=0.7243, over 5730.92 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:24,937 INFO [trainer.py:765] (0/8) Epoch 38, batch 800, train_loss[loss=3.06, NarTop10Accuracy=0.7129, over 4221.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7206, over 5759.80 frames. ], batch size: 5, lr: 2.24e-03 +2024-08-06 22:55:59,702 INFO [trainer.py:765] (0/8) Epoch 38, batch 900, train_loss[loss=2.898, NarTop10Accuracy=0.7474, over 6255.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7218, over 5794.93 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 22:56:32,090 INFO [trainer.py:765] (0/8) Epoch 38, batch 1000, train_loss[loss=3.243, NarTop10Accuracy=0.6749, over 6093.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7213, over 5920.90 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 22:57:08,990 INFO [trainer.py:765] (0/8) Epoch 38, batch 1100, train_loss[loss=3.215, NarTop10Accuracy=0.6852, over 6822.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7172, over 5937.97 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 22:57:42,661 INFO [trainer.py:765] (0/8) Epoch 38, batch 1200, train_loss[loss=2.826, NarTop10Accuracy=0.7552, over 7251.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7169, over 5936.21 frames. ], batch size: 32, lr: 2.24e-03 +2024-08-06 22:58:16,545 INFO [trainer.py:765] (0/8) Epoch 38, batch 1300, train_loss[loss=3.38, NarTop10Accuracy=0.6484, over 5181.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7162, over 5993.46 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:58:49,810 INFO [trainer.py:765] (0/8) Epoch 38, batch 1400, train_loss[loss=2.938, NarTop10Accuracy=0.7425, over 5955.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7129, over 6016.31 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 22:59:22,854 INFO [trainer.py:765] (0/8) Epoch 38, batch 1500, train_loss[loss=3.513, NarTop10Accuracy=0.6256, over 6171.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7164, over 5955.23 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 22:59:50,643 INFO [trainer.py:765] (0/8) Epoch 38, batch 1600, train_loss[loss=3.358, NarTop10Accuracy=0.6577, over 7047.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.717, over 5935.26 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 23:00:17,314 INFO [trainer.py:765] (0/8) Epoch 38, batch 1700, train_loss[loss=3.08, NarTop10Accuracy=0.7179, over 6702.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7132, over 5925.47 frames. ], batch size: 14, lr: 2.23e-03 +2024-08-06 23:00:43,763 INFO [trainer.py:765] (0/8) Epoch 38, batch 1800, train_loss[loss=3.177, NarTop10Accuracy=0.6918, over 6960.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7147, over 5974.34 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 23:01:10,191 INFO [trainer.py:765] (0/8) Epoch 38, batch 1900, train_loss[loss=3.53, NarTop10Accuracy=0.6258, over 5526.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7134, over 6006.09 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 23:01:35,681 INFO [trainer.py:765] (0/8) Epoch 38, batch 2000, train_loss[loss=3.324, NarTop10Accuracy=0.6584, over 6408.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7139, over 5990.88 frames. ], batch size: 53, lr: 2.23e-03 +2024-08-06 23:02:01,050 INFO [trainer.py:765] (0/8) Epoch 38, batch 2100, train_loss[loss=2.968, NarTop10Accuracy=0.7213, over 4068.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.717, over 5963.07 frames. ], batch size: 4, lr: 2.23e-03 +2024-08-06 23:02:26,314 INFO [trainer.py:765] (0/8) Epoch 38, batch 2200, train_loss[loss=2.795, NarTop10Accuracy=0.7693, over 7173.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7183, over 5996.84 frames. ], batch size: 31, lr: 2.23e-03 +2024-08-06 23:02:51,419 INFO [trainer.py:765] (0/8) Epoch 38, batch 2300, train_loss[loss=2.701, NarTop10Accuracy=0.7893, over 5748.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7177, over 6015.64 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 23:03:16,347 INFO [trainer.py:765] (0/8) Epoch 38, batch 2400, train_loss[loss=2.751, NarTop10Accuracy=0.7736, over 5118.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7199, over 5778.36 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:39,824 INFO [trainer.py:765] (0/8) Epoch 38, batch 2500, train_loss[loss=3.364, NarTop10Accuracy=0.641, over 5094.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7234, over 5487.70 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:59,636 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 23:03:59,638 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-38.pt +2024-08-06 23:04:58,941 INFO [trainer.py:765] (0/8) Epoch 39, batch 100, train_loss[loss=3.322, NarTop10Accuracy=0.6577, over 7263.00 frames. ], tot_loss[loss=2.993, NarTop10Accuracy=0.7274, over 2366.12 frames. ], batch size: 31, lr: 2.19e-03 +2024-08-06 23:05:03,469 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 23:05:11,563 INFO [trainer.py:811] (0/8) Epoch 39, validation: loss=2.9, NarTop10Accuracy=0.7445, over 1905321.00 frames. +2024-08-06 23:05:11,564 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30377MB +2024-08-06 23:05:12,137 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 2.316e+02 2.500e+02 2.688e+02 4.683e+02, threshold=5.001e+02, percent-clipped=0.0 +2024-08-06 23:05:40,163 INFO [trainer.py:765] (0/8) Epoch 39, batch 200, train_loss[loss=2.758, NarTop10Accuracy=0.7693, over 6672.00 frames. ], tot_loss[loss=2.997, NarTop10Accuracy=0.7266, over 3845.01 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 23:06:17,293 INFO [trainer.py:765] (0/8) Epoch 39, batch 300, train_loss[loss=3.016, NarTop10Accuracy=0.7265, over 7113.00 frames. ], tot_loss[loss=2.991, NarTop10Accuracy=0.7275, over 4651.05 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 23:06:48,276 INFO [trainer.py:765] (0/8) Epoch 39, batch 400, train_loss[loss=2.899, NarTop10Accuracy=0.7433, over 5766.00 frames. ], tot_loss[loss=2.987, NarTop10Accuracy=0.7285, over 5101.46 frames. ], batch size: 8, lr: 2.19e-03 +2024-08-06 23:07:19,175 INFO [trainer.py:765] (0/8) Epoch 39, batch 500, train_loss[loss=3.358, NarTop10Accuracy=0.6523, over 6054.00 frames. ], tot_loss[loss=2.994, NarTop10Accuracy=0.7265, over 5384.28 frames. ], batch size: 11, lr: 2.19e-03 +2024-08-06 23:07:52,563 INFO [trainer.py:765] (0/8) Epoch 39, batch 600, train_loss[loss=2.662, NarTop10Accuracy=0.7865, over 5790.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7225, over 5649.98 frames. ], batch size: 9, lr: 2.19e-03 +2024-08-06 23:08:33,695 INFO [trainer.py:765] (0/8) Epoch 39, batch 700, train_loss[loss=3.172, NarTop10Accuracy=0.6775, over 4263.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7204, over 5728.22 frames. ], batch size: 5, lr: 2.18e-03 +2024-08-06 23:09:05,861 INFO [trainer.py:765] (0/8) Epoch 39, batch 800, train_loss[loss=2.719, NarTop10Accuracy=0.7768, over 5073.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7205, over 5794.26 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:09:38,866 INFO [trainer.py:765] (0/8) Epoch 39, batch 900, train_loss[loss=3.362, NarTop10Accuracy=0.6546, over 6588.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7221, over 5813.35 frames. ], batch size: 14, lr: 2.18e-03 +2024-08-06 23:10:18,460 INFO [trainer.py:765] (0/8) Epoch 39, batch 1000, train_loss[loss=2.785, NarTop10Accuracy=0.7649, over 6162.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7237, over 5916.11 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 23:10:53,934 INFO [trainer.py:765] (0/8) Epoch 39, batch 1100, train_loss[loss=2.794, NarTop10Accuracy=0.7749, over 6846.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7202, over 5939.74 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 23:11:27,822 INFO [trainer.py:765] (0/8) Epoch 39, batch 1200, train_loss[loss=2.957, NarTop10Accuracy=0.7368, over 7392.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7231, over 5921.27 frames. ], batch size: 32, lr: 2.18e-03 +2024-08-06 23:12:07,253 INFO [trainer.py:765] (0/8) Epoch 39, batch 1300, train_loss[loss=2.832, NarTop10Accuracy=0.763, over 4992.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7231, over 5983.71 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:12:39,302 INFO [trainer.py:765] (0/8) Epoch 39, batch 1400, train_loss[loss=3.031, NarTop10Accuracy=0.7158, over 6153.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.722, over 6021.95 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 23:13:09,756 INFO [trainer.py:765] (0/8) Epoch 39, batch 1500, train_loss[loss=3.547, NarTop10Accuracy=0.6093, over 6093.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.723, over 5965.83 frames. ], batch size: 50, lr: 2.18e-03 +2024-08-06 23:13:37,586 INFO [trainer.py:765] (0/8) Epoch 39, batch 1600, train_loss[loss=2.909, NarTop10Accuracy=0.7423, over 6873.00 frames. ], tot_loss[loss=3.001, NarTop10Accuracy=0.7246, over 5947.17 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:04,220 INFO [trainer.py:765] (0/8) Epoch 39, batch 1700, train_loss[loss=3.37, NarTop10Accuracy=0.6405, over 6312.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7188, over 5939.65 frames. ], batch size: 13, lr: 2.17e-03 +2024-08-06 23:14:30,768 INFO [trainer.py:765] (0/8) Epoch 39, batch 1800, train_loss[loss=2.849, NarTop10Accuracy=0.7575, over 7008.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7187, over 6009.21 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:57,180 INFO [trainer.py:765] (0/8) Epoch 39, batch 1900, train_loss[loss=2.985, NarTop10Accuracy=0.7342, over 5973.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7162, over 6043.25 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 23:15:22,751 INFO [trainer.py:765] (0/8) Epoch 39, batch 2000, train_loss[loss=3.317, NarTop10Accuracy=0.6617, over 6441.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7193, over 6013.44 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 23:15:48,060 INFO [trainer.py:765] (0/8) Epoch 39, batch 2100, train_loss[loss=3.343, NarTop10Accuracy=0.6575, over 4731.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7185, over 5999.68 frames. ], batch size: 5, lr: 2.17e-03 +2024-08-06 23:15:51,871 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-100000.pt +2024-08-06 23:15:55,412 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 23:16:02,156 INFO [trainer.py:811] (0/8) Epoch 39, validation: loss=2.85, NarTop10Accuracy=0.7552, over 1905321.00 frames. +2024-08-06 23:16:02,156 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30377MB +2024-08-06 23:16:02,645 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.369e+02 2.530e+02 2.720e+02 6.127e+02, threshold=5.059e+02, percent-clipped=0.2 +2024-08-06 23:16:23,653 INFO [trainer.py:765] (0/8) Epoch 39, batch 2200, train_loss[loss=3.189, NarTop10Accuracy=0.6877, over 7389.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7198, over 6000.59 frames. ], batch size: 31, lr: 2.17e-03 +2024-08-06 23:16:48,847 INFO [trainer.py:765] (0/8) Epoch 39, batch 2300, train_loss[loss=2.836, NarTop10Accuracy=0.7645, over 5706.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7168, over 6019.09 frames. ], batch size: 9, lr: 2.17e-03 +2024-08-06 23:17:13,136 INFO [trainer.py:765] (0/8) Epoch 39, batch 2400, train_loss[loss=2.752, NarTop10Accuracy=0.7768, over 5133.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7211, over 5763.18 frames. ], batch size: 7, lr: 2.17e-03 +2024-08-06 23:17:36,712 INFO [trainer.py:765] (0/8) Epoch 39, batch 2500, train_loss[loss=2.806, NarTop10Accuracy=0.7707, over 5133.00 frames. ], tot_loss[loss=2.997, NarTop10Accuracy=0.7256, over 5477.56 frames. ], batch size: 7, lr: 2.16e-03 +2024-08-06 23:17:56,435 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 23:17:56,438 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-39.pt +2024-08-06 23:18:48,946 INFO [trainer.py:765] (0/8) Epoch 40, batch 100, train_loss[loss=2.964, NarTop10Accuracy=0.728, over 7278.00 frames. ], tot_loss[loss=2.993, NarTop10Accuracy=0.7263, over 2368.03 frames. ], batch size: 31, lr: 2.14e-03 +2024-08-06 23:19:23,035 INFO [trainer.py:765] (0/8) Epoch 40, batch 200, train_loss[loss=2.576, NarTop10Accuracy=0.8148, over 6933.00 frames. ], tot_loss[loss=2.987, NarTop10Accuracy=0.7284, over 3875.96 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 23:19:57,187 INFO [trainer.py:765] (0/8) Epoch 40, batch 300, train_loss[loss=2.892, NarTop10Accuracy=0.7645, over 7047.00 frames. ], tot_loss[loss=3.006, NarTop10Accuracy=0.7241, over 4678.12 frames. ], batch size: 22, lr: 2.13e-03 +2024-08-06 23:20:30,182 INFO [trainer.py:765] (0/8) Epoch 40, batch 400, train_loss[loss=2.812, NarTop10Accuracy=0.7721, over 5064.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7237, over 5121.13 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 23:21:00,250 INFO [trainer.py:765] (0/8) Epoch 40, batch 500, train_loss[loss=2.719, NarTop10Accuracy=0.7716, over 6096.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7224, over 5396.55 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 23:21:34,881 INFO [trainer.py:765] (0/8) Epoch 40, batch 600, train_loss[loss=2.975, NarTop10Accuracy=0.7358, over 5670.00 frames. ], tot_loss[loss=3.002, NarTop10Accuracy=0.7248, over 5653.13 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 23:22:11,097 INFO [trainer.py:765] (0/8) Epoch 40, batch 700, train_loss[loss=3.177, NarTop10Accuracy=0.6928, over 5022.00 frames. ], tot_loss[loss=3.007, NarTop10Accuracy=0.7236, over 5705.80 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:22:44,753 INFO [trainer.py:765] (0/8) Epoch 40, batch 800, train_loss[loss=2.687, NarTop10Accuracy=0.791, over 4992.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7207, over 5781.59 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:23:16,635 INFO [trainer.py:765] (0/8) Epoch 40, batch 900, train_loss[loss=3.46, NarTop10Accuracy=0.6333, over 6288.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7213, over 5786.92 frames. ], batch size: 13, lr: 2.13e-03 +2024-08-06 23:23:55,591 INFO [trainer.py:765] (0/8) Epoch 40, batch 1000, train_loss[loss=3.377, NarTop10Accuracy=0.6535, over 6666.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7201, over 5896.48 frames. ], batch size: 14, lr: 2.13e-03 +2024-08-06 23:24:30,208 INFO [trainer.py:765] (0/8) Epoch 40, batch 1100, train_loss[loss=2.757, NarTop10Accuracy=0.7834, over 7080.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7202, over 5955.34 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 23:25:03,090 INFO [trainer.py:765] (0/8) Epoch 40, batch 1200, train_loss[loss=2.994, NarTop10Accuracy=0.7242, over 6966.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7215, over 5934.81 frames. ], batch size: 31, lr: 2.12e-03 +2024-08-06 23:25:41,842 INFO [trainer.py:765] (0/8) Epoch 40, batch 1300, train_loss[loss=2.705, NarTop10Accuracy=0.7792, over 5040.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7232, over 5999.37 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 23:26:13,384 INFO [trainer.py:765] (0/8) Epoch 40, batch 1400, train_loss[loss=2.913, NarTop10Accuracy=0.7478, over 6195.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.721, over 6028.53 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 23:26:43,377 INFO [trainer.py:765] (0/8) Epoch 40, batch 1500, train_loss[loss=3.218, NarTop10Accuracy=0.6857, over 6384.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.723, over 5977.39 frames. ], batch size: 53, lr: 2.12e-03 +2024-08-06 23:26:54,419 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 23:27:02,676 INFO [trainer.py:811] (0/8) Epoch 40, validation: loss=2.86, NarTop10Accuracy=0.7522, over 1905321.00 frames. +2024-08-06 23:27:02,677 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30377MB +2024-08-06 23:27:03,156 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.329e+02 2.511e+02 2.723e+02 1.241e+03, threshold=5.022e+02, percent-clipped=0.2 +2024-08-06 23:27:19,382 INFO [trainer.py:765] (0/8) Epoch 40, batch 1600, train_loss[loss=2.888, NarTop10Accuracy=0.7514, over 7035.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7215, over 5944.04 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:27:46,057 INFO [trainer.py:765] (0/8) Epoch 40, batch 1700, train_loss[loss=3.489, NarTop10Accuracy=0.6228, over 6528.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7209, over 5929.79 frames. ], batch size: 14, lr: 2.12e-03 +2024-08-06 23:28:12,579 INFO [trainer.py:765] (0/8) Epoch 40, batch 1800, train_loss[loss=3.083, NarTop10Accuracy=0.7111, over 7194.00 frames. ], tot_loss[loss=3.003, NarTop10Accuracy=0.7253, over 6005.15 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:28:38,909 INFO [trainer.py:765] (0/8) Epoch 40, batch 1900, train_loss[loss=3.144, NarTop10Accuracy=0.6979, over 6165.00 frames. ], tot_loss[loss=3.007, NarTop10Accuracy=0.7241, over 6033.11 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:04,445 INFO [trainer.py:765] (0/8) Epoch 40, batch 2000, train_loss[loss=3.533, NarTop10Accuracy=0.6157, over 5862.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7233, over 6015.70 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:29,750 INFO [trainer.py:765] (0/8) Epoch 40, batch 2100, train_loss[loss=2.802, NarTop10Accuracy=0.771, over 4845.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.723, over 5995.82 frames. ], batch size: 5, lr: 2.11e-03 +2024-08-06 23:29:54,940 INFO [trainer.py:765] (0/8) Epoch 40, batch 2200, train_loss[loss=3.09, NarTop10Accuracy=0.7064, over 7146.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7204, over 6011.48 frames. ], batch size: 31, lr: 2.11e-03 +2024-08-06 23:30:20,013 INFO [trainer.py:765] (0/8) Epoch 40, batch 2300, train_loss[loss=2.779, NarTop10Accuracy=0.7556, over 5679.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.719, over 6015.89 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 23:30:44,296 INFO [trainer.py:765] (0/8) Epoch 40, batch 2400, train_loss[loss=2.797, NarTop10Accuracy=0.77, over 5193.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7202, over 5772.23 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:07,738 INFO [trainer.py:765] (0/8) Epoch 40, batch 2500, train_loss[loss=3.059, NarTop10Accuracy=0.7101, over 5094.00 frames. ], tot_loss[loss=2.992, NarTop10Accuracy=0.7266, over 5462.68 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:27,336 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 23:31:27,339 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-40.pt +2024-08-06 23:31:34,264 INFO [trainer.py:1069] (0/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-14-23-41-1 b/libritts-r/log/log-train-2024-08-06-14-23-41-1 new file mode 100644 index 0000000000000000000000000000000000000000..d28f4dba1e7efc50b7c7144f6e27cac2ce95037f --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-14-23-41-1 @@ -0,0 +1,1260 @@ +2024-08-06 14:23:41,788 INFO [trainer.py:870] (1/8) Training started +2024-08-06 14:23:41,789 INFO [trainer.py:889] (1/8) Device: cuda:1 +2024-08-06 14:23:41,789 INFO [trainer.py:890] (1/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 100000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 14:23:41,789 INFO [trainer.py:892] (1/8) About to create model +2024-08-06 14:23:42,552 INFO [trainer.py:899] (1/8) Number of model parameters: 367386628 +2024-08-06 14:23:42,552 INFO [checkpoint.py:112] (1/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 14:23:47,458 INFO [trainer.py:914] (1/8) Using DDP +2024-08-06 14:23:49,641 INFO [datamodule.py:427] (1/8) About to get train cuts +2024-08-06 14:23:49,643 INFO [datamodule.py:434] (1/8) About to get dev cuts +2024-08-06 14:23:49,644 INFO [datamodule.py:292] (1/8) Disable SpecAugment +2024-08-06 14:23:49,644 INFO [datamodule.py:294] (1/8) About to create train dataset +2024-08-06 14:23:49,645 INFO [datamodule.py:323] (1/8) Using DynamicBucketingSampler +2024-08-06 14:23:50,266 INFO [datamodule.py:344] (1/8) About to create train dataloader +2024-08-06 14:23:50,267 INFO [datamodule.py:367] (1/8) About to create dev dataset +2024-08-06 14:23:50,600 INFO [datamodule.py:388] (1/8) About to create dev dataloader +2024-08-06 14:24:38,249 INFO [trainer.py:765] (1/8) Epoch 1, batch 100, train_loss[loss=105.1, NarTop10Accuracy=0.01887, over 7386.00 frames. ], tot_loss[loss=74.58, NarTop10Accuracy=0.04796, over 2371.63 frames. ], batch size: 31, lr: 2.25e-02 +2024-08-06 14:25:07,518 INFO [trainer.py:765] (1/8) Epoch 1, batch 200, train_loss[loss=136.9, NarTop10Accuracy=0.01535, over 6936.00 frames. ], tot_loss[loss=98, NarTop10Accuracy=0.04192, over 3863.19 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 14:25:37,111 INFO [trainer.py:765] (1/8) Epoch 1, batch 300, train_loss[loss=105.6, NarTop10Accuracy=0.02372, over 6975.00 frames. ], tot_loss[loss=85.32, NarTop10Accuracy=0.04288, over 4656.54 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 14:26:07,482 INFO [trainer.py:765] (1/8) Epoch 1, batch 400, train_loss[loss=53.59, NarTop10Accuracy=0.01745, over 5070.00 frames. ], tot_loss[loss=67.84, NarTop10Accuracy=0.04713, over 5111.55 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 14:26:35,357 INFO [trainer.py:765] (1/8) Epoch 1, batch 500, train_loss[loss=14.57, NarTop10Accuracy=0.02618, over 6162.00 frames. ], tot_loss[loss=49.03, NarTop10Accuracy=0.05107, over 5383.33 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 14:27:04,000 INFO [trainer.py:765] (1/8) Epoch 1, batch 600, train_loss[loss=6.264, NarTop10Accuracy=0.1547, over 6210.00 frames. ], tot_loss[loss=33.34, NarTop10Accuracy=0.05558, over 5666.53 frames. ], batch size: 10, lr: 2.99e-02 +2024-08-06 14:27:39,490 INFO [trainer.py:765] (1/8) Epoch 1, batch 700, train_loss[loss=6.77, NarTop10Accuracy=0.1121, over 4221.00 frames. ], tot_loss[loss=23.38, NarTop10Accuracy=0.06406, over 5724.39 frames. ], batch size: 5, lr: 2.99e-02 +2024-08-06 14:28:08,831 INFO [trainer.py:765] (1/8) Epoch 1, batch 800, train_loss[loss=6.379, NarTop10Accuracy=0.1529, over 5031.00 frames. ], tot_loss[loss=17.18, NarTop10Accuracy=0.08526, over 5761.81 frames. ], batch size: 6, lr: 2.98e-02 +2024-08-06 14:28:36,758 INFO [trainer.py:765] (1/8) Epoch 1, batch 900, train_loss[loss=5.81, NarTop10Accuracy=0.1604, over 6342.00 frames. ], tot_loss[loss=12.78, NarTop10Accuracy=0.1119, over 5807.44 frames. ], batch size: 13, lr: 2.98e-02 +2024-08-06 14:29:12,586 INFO [trainer.py:765] (1/8) Epoch 1, batch 1000, train_loss[loss=5.72, NarTop10Accuracy=0.1873, over 6681.00 frames. ], tot_loss[loss=10.09, NarTop10Accuracy=0.1341, over 5909.13 frames. ], batch size: 14, lr: 2.97e-02 +2024-08-06 14:29:42,825 INFO [trainer.py:765] (1/8) Epoch 1, batch 1100, train_loss[loss=5.576, NarTop10Accuracy=0.2159, over 6795.00 frames. ], tot_loss[loss=8.411, NarTop10Accuracy=0.153, over 5934.85 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 14:30:11,468 INFO [trainer.py:765] (1/8) Epoch 1, batch 1200, train_loss[loss=5.92, NarTop10Accuracy=0.1551, over 7311.00 frames. ], tot_loss[loss=7.351, NarTop10Accuracy=0.1702, over 5922.73 frames. ], batch size: 32, lr: 2.96e-02 +2024-08-06 14:30:48,747 INFO [trainer.py:765] (1/8) Epoch 1, batch 1300, train_loss[loss=5.12, NarTop10Accuracy=0.3, over 4455.00 frames. ], tot_loss[loss=6.673, NarTop10Accuracy=0.1875, over 6005.12 frames. ], batch size: 5, lr: 2.95e-02 +2024-08-06 14:31:18,143 INFO [trainer.py:765] (1/8) Epoch 1, batch 1400, train_loss[loss=5.67, NarTop10Accuracy=0.1867, over 6117.00 frames. ], tot_loss[loss=6.248, NarTop10Accuracy=0.1975, over 6028.89 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 14:31:46,026 INFO [trainer.py:765] (1/8) Epoch 1, batch 1500, train_loss[loss=5.814, NarTop10Accuracy=0.1693, over 6342.00 frames. ], tot_loss[loss=5.973, NarTop10Accuracy=0.2081, over 5971.60 frames. ], batch size: 50, lr: 2.94e-02 +2024-08-06 14:32:13,691 INFO [trainer.py:765] (1/8) Epoch 1, batch 1600, train_loss[loss=5.551, NarTop10Accuracy=0.2154, over 7086.00 frames. ], tot_loss[loss=5.791, NarTop10Accuracy=0.2173, over 5944.23 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 14:32:40,198 INFO [trainer.py:765] (1/8) Epoch 1, batch 1700, train_loss[loss=5.398, NarTop10Accuracy=0.2549, over 6663.00 frames. ], tot_loss[loss=5.672, NarTop10Accuracy=0.2242, over 5909.80 frames. ], batch size: 14, lr: 2.92e-02 +2024-08-06 14:33:06,499 INFO [trainer.py:765] (1/8) Epoch 1, batch 1800, train_loss[loss=5.449, NarTop10Accuracy=0.2401, over 7149.00 frames. ], tot_loss[loss=5.579, NarTop10Accuracy=0.2326, over 5980.24 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 14:33:32,625 INFO [trainer.py:765] (1/8) Epoch 1, batch 1900, train_loss[loss=5.585, NarTop10Accuracy=0.2097, over 6150.00 frames. ], tot_loss[loss=5.514, NarTop10Accuracy=0.2396, over 6024.68 frames. ], batch size: 50, lr: 2.90e-02 +2024-08-06 14:33:58,014 INFO [trainer.py:765] (1/8) Epoch 1, batch 2000, train_loss[loss=5.457, NarTop10Accuracy=0.2511, over 5736.00 frames. ], tot_loss[loss=5.446, NarTop10Accuracy=0.2498, over 6003.38 frames. ], batch size: 51, lr: 2.89e-02 +2024-08-06 14:33:58,016 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 14:34:06,103 INFO [trainer.py:811] (1/8) Epoch 1, validation: loss=5.397, NarTop10Accuracy=0.2581, over 1905321.00 frames. +2024-08-06 14:34:06,104 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 26863MB +2024-08-06 14:34:06,612 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 4.749e+01 2.278e+02 7.300e+02 1.664e+04 7.177e+05, threshold=1.460e+03, percent-clipped=0.0 +2024-08-06 14:34:32,061 INFO [trainer.py:765] (1/8) Epoch 1, batch 2100, train_loss[loss=4.95, NarTop10Accuracy=0.3428, over 3948.00 frames. ], tot_loss[loss=5.382, NarTop10Accuracy=0.2605, over 5984.71 frames. ], batch size: 4, lr: 2.88e-02 +2024-08-06 14:34:57,303 INFO [trainer.py:765] (1/8) Epoch 1, batch 2200, train_loss[loss=5.446, NarTop10Accuracy=0.2452, over 7416.00 frames. ], tot_loss[loss=5.349, NarTop10Accuracy=0.2652, over 6018.35 frames. ], batch size: 31, lr: 2.87e-02 +2024-08-06 14:35:22,455 INFO [trainer.py:765] (1/8) Epoch 1, batch 2300, train_loss[loss=5.358, NarTop10Accuracy=0.2628, over 5661.00 frames. ], tot_loss[loss=5.336, NarTop10Accuracy=0.267, over 6004.60 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 14:35:46,815 INFO [trainer.py:765] (1/8) Epoch 1, batch 2400, train_loss[loss=5.225, NarTop10Accuracy=0.2812, over 5280.00 frames. ], tot_loss[loss=5.284, NarTop10Accuracy=0.2765, over 5772.29 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 14:36:10,408 INFO [trainer.py:765] (1/8) Epoch 1, batch 2500, train_loss[loss=5.069, NarTop10Accuracy=0.3156, over 5217.00 frames. ], tot_loss[loss=5.223, NarTop10Accuracy=0.2879, over 5476.61 frames. ], batch size: 7, lr: 2.84e-02 +2024-08-06 14:36:31,006 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 14:37:29,669 INFO [trainer.py:765] (1/8) Epoch 2, batch 100, train_loss[loss=4.988, NarTop10Accuracy=0.3371, over 7368.00 frames. ], tot_loss[loss=5.188, NarTop10Accuracy=0.2946, over 2359.01 frames. ], batch size: 31, lr: 2.77e-02 +2024-08-06 14:38:10,014 INFO [trainer.py:765] (1/8) Epoch 2, batch 200, train_loss[loss=5.279, NarTop10Accuracy=0.277, over 6852.00 frames. ], tot_loss[loss=5.169, NarTop10Accuracy=0.2976, over 3851.85 frames. ], batch size: 17, lr: 2.76e-02 +2024-08-06 14:38:38,297 INFO [trainer.py:765] (1/8) Epoch 2, batch 300, train_loss[loss=4.977, NarTop10Accuracy=0.3384, over 7173.00 frames. ], tot_loss[loss=5.142, NarTop10Accuracy=0.3018, over 4637.45 frames. ], batch size: 22, lr: 2.75e-02 +2024-08-06 14:39:06,999 INFO [trainer.py:765] (1/8) Epoch 2, batch 400, train_loss[loss=4.804, NarTop10Accuracy=0.378, over 5109.00 frames. ], tot_loss[loss=5.104, NarTop10Accuracy=0.3085, over 5104.14 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 14:39:46,119 INFO [trainer.py:765] (1/8) Epoch 2, batch 500, train_loss[loss=4.931, NarTop10Accuracy=0.3444, over 6087.00 frames. ], tot_loss[loss=5.068, NarTop10Accuracy=0.3162, over 5369.66 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 14:40:15,083 INFO [trainer.py:765] (1/8) Epoch 2, batch 600, train_loss[loss=4.953, NarTop10Accuracy=0.3566, over 5598.00 frames. ], tot_loss[loss=5.04, NarTop10Accuracy=0.3216, over 5637.00 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 14:40:44,589 INFO [trainer.py:765] (1/8) Epoch 2, batch 700, train_loss[loss=5.128, NarTop10Accuracy=0.2983, over 4293.00 frames. ], tot_loss[loss=5.023, NarTop10Accuracy=0.3245, over 5712.10 frames. ], batch size: 5, lr: 2.70e-02 +2024-08-06 14:41:24,514 INFO [trainer.py:765] (1/8) Epoch 2, batch 800, train_loss[loss=5.058, NarTop10Accuracy=0.3157, over 5208.00 frames. ], tot_loss[loss=5.004, NarTop10Accuracy=0.3279, over 5769.80 frames. ], batch size: 6, lr: 2.69e-02 +2024-08-06 14:41:54,404 INFO [trainer.py:765] (1/8) Epoch 2, batch 900, train_loss[loss=4.71, NarTop10Accuracy=0.3832, over 6624.00 frames. ], tot_loss[loss=4.966, NarTop10Accuracy=0.3353, over 5798.53 frames. ], batch size: 14, lr: 2.68e-02 +2024-08-06 14:42:23,902 INFO [trainer.py:765] (1/8) Epoch 2, batch 1000, train_loss[loss=4.85, NarTop10Accuracy=0.3651, over 6837.00 frames. ], tot_loss[loss=4.938, NarTop10Accuracy=0.3408, over 5901.76 frames. ], batch size: 14, lr: 2.66e-02 +2024-08-06 14:42:56,254 INFO [trainer.py:765] (1/8) Epoch 2, batch 1100, train_loss[loss=4.879, NarTop10Accuracy=0.3463, over 7116.00 frames. ], tot_loss[loss=4.934, NarTop10Accuracy=0.3416, over 5928.58 frames. ], batch size: 18, lr: 2.65e-02 +2024-08-06 14:43:35,186 INFO [trainer.py:765] (1/8) Epoch 2, batch 1200, train_loss[loss=4.832, NarTop10Accuracy=0.3596, over 7599.00 frames. ], tot_loss[loss=4.901, NarTop10Accuracy=0.3476, over 5935.33 frames. ], batch size: 32, lr: 2.64e-02 +2024-08-06 14:44:04,345 INFO [trainer.py:765] (1/8) Epoch 2, batch 1300, train_loss[loss=4.855, NarTop10Accuracy=0.3489, over 5085.00 frames. ], tot_loss[loss=4.864, NarTop10Accuracy=0.3546, over 6013.20 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 14:44:33,727 INFO [trainer.py:765] (1/8) Epoch 2, batch 1400, train_loss[loss=5.003, NarTop10Accuracy=0.3238, over 6114.00 frames. ], tot_loss[loss=4.852, NarTop10Accuracy=0.3567, over 6023.15 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 14:44:40,441 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 14:44:48,506 INFO [trainer.py:811] (1/8) Epoch 2, validation: loss=4.808, NarTop10Accuracy=0.3642, over 1905321.00 frames. +2024-08-06 14:44:48,506 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 14:44:49,204 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 6.328e+01 1.178e+02 1.410e+02 1.789e+02 6.269e+02, threshold=2.821e+02, percent-clipped=0.0 +2024-08-06 14:45:09,806 INFO [trainer.py:765] (1/8) Epoch 2, batch 1500, train_loss[loss=4.755, NarTop10Accuracy=0.3763, over 5796.00 frames. ], tot_loss[loss=4.825, NarTop10Accuracy=0.3618, over 5947.03 frames. ], batch size: 50, lr: 2.60e-02 +2024-08-06 14:45:37,659 INFO [trainer.py:765] (1/8) Epoch 2, batch 1600, train_loss[loss=4.696, NarTop10Accuracy=0.3896, over 7335.00 frames. ], tot_loss[loss=4.804, NarTop10Accuracy=0.366, over 5921.42 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 14:46:04,368 INFO [trainer.py:765] (1/8) Epoch 2, batch 1700, train_loss[loss=4.844, NarTop10Accuracy=0.362, over 6141.00 frames. ], tot_loss[loss=4.798, NarTop10Accuracy=0.3665, over 5904.52 frames. ], batch size: 13, lr: 2.58e-02 +2024-08-06 14:46:31,034 INFO [trainer.py:765] (1/8) Epoch 2, batch 1800, train_loss[loss=4.834, NarTop10Accuracy=0.3564, over 7107.00 frames. ], tot_loss[loss=4.773, NarTop10Accuracy=0.3713, over 5980.89 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 14:46:57,532 INFO [trainer.py:765] (1/8) Epoch 2, batch 1900, train_loss[loss=4.725, NarTop10Accuracy=0.3842, over 6390.00 frames. ], tot_loss[loss=4.749, NarTop10Accuracy=0.376, over 6019.87 frames. ], batch size: 50, lr: 2.55e-02 +2024-08-06 14:47:23,233 INFO [trainer.py:765] (1/8) Epoch 2, batch 2000, train_loss[loss=4.865, NarTop10Accuracy=0.3492, over 6477.00 frames. ], tot_loss[loss=4.727, NarTop10Accuracy=0.3801, over 5993.84 frames. ], batch size: 50, lr: 2.54e-02 +2024-08-06 14:47:48,589 INFO [trainer.py:765] (1/8) Epoch 2, batch 2100, train_loss[loss=4.816, NarTop10Accuracy=0.369, over 4008.00 frames. ], tot_loss[loss=4.718, NarTop10Accuracy=0.3819, over 5966.03 frames. ], batch size: 4, lr: 2.53e-02 +2024-08-06 14:48:13,765 INFO [trainer.py:765] (1/8) Epoch 2, batch 2200, train_loss[loss=4.75, NarTop10Accuracy=0.3687, over 7398.00 frames. ], tot_loss[loss=4.681, NarTop10Accuracy=0.3893, over 5994.19 frames. ], batch size: 32, lr: 2.51e-02 +2024-08-06 14:48:38,951 INFO [trainer.py:765] (1/8) Epoch 2, batch 2300, train_loss[loss=4.805, NarTop10Accuracy=0.3684, over 5784.00 frames. ], tot_loss[loss=4.686, NarTop10Accuracy=0.388, over 6019.97 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 14:49:03,320 INFO [trainer.py:765] (1/8) Epoch 2, batch 2400, train_loss[loss=4.233, NarTop10Accuracy=0.4782, over 4998.00 frames. ], tot_loss[loss=4.651, NarTop10Accuracy=0.3952, over 5784.93 frames. ], batch size: 7, lr: 2.49e-02 +2024-08-06 14:49:26,867 INFO [trainer.py:765] (1/8) Epoch 2, batch 2500, train_loss[loss=4.84, NarTop10Accuracy=0.3573, over 5154.00 frames. ], tot_loss[loss=4.618, NarTop10Accuracy=0.4012, over 5491.63 frames. ], batch size: 7, lr: 2.48e-02 +2024-08-06 14:49:46,729 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 14:50:51,117 INFO [trainer.py:765] (1/8) Epoch 3, batch 100, train_loss[loss=4.724, NarTop10Accuracy=0.3771, over 7014.00 frames. ], tot_loss[loss=4.584, NarTop10Accuracy=0.4074, over 2370.50 frames. ], batch size: 31, lr: 2.36e-02 +2024-08-06 14:51:20,388 INFO [trainer.py:765] (1/8) Epoch 3, batch 200, train_loss[loss=4.609, NarTop10Accuracy=0.3925, over 6879.00 frames. ], tot_loss[loss=4.541, NarTop10Accuracy=0.4164, over 3860.05 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 14:51:50,954 INFO [trainer.py:765] (1/8) Epoch 3, batch 300, train_loss[loss=4.762, NarTop10Accuracy=0.3737, over 7164.00 frames. ], tot_loss[loss=4.514, NarTop10Accuracy=0.4217, over 4639.81 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 14:52:32,359 INFO [trainer.py:765] (1/8) Epoch 3, batch 400, train_loss[loss=4.558, NarTop10Accuracy=0.4129, over 4980.00 frames. ], tot_loss[loss=4.494, NarTop10Accuracy=0.4256, over 5076.60 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 14:53:00,680 INFO [trainer.py:765] (1/8) Epoch 3, batch 500, train_loss[loss=4.475, NarTop10Accuracy=0.4354, over 6501.00 frames. ], tot_loss[loss=4.489, NarTop10Accuracy=0.4262, over 5381.19 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 14:53:29,551 INFO [trainer.py:765] (1/8) Epoch 3, batch 600, train_loss[loss=4.167, NarTop10Accuracy=0.4923, over 5769.00 frames. ], tot_loss[loss=4.472, NarTop10Accuracy=0.4301, over 5647.48 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 14:54:12,466 INFO [trainer.py:765] (1/8) Epoch 3, batch 700, train_loss[loss=4.356, NarTop10Accuracy=0.4423, over 4893.00 frames. ], tot_loss[loss=4.451, NarTop10Accuracy=0.4341, over 5705.99 frames. ], batch size: 6, lr: 2.29e-02 +2024-08-06 14:54:44,785 INFO [trainer.py:765] (1/8) Epoch 3, batch 800, train_loss[loss=4.314, NarTop10Accuracy=0.4638, over 4320.00 frames. ], tot_loss[loss=4.43, NarTop10Accuracy=0.4384, over 5770.15 frames. ], batch size: 5, lr: 2.28e-02 +2024-08-06 14:54:58,684 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 14:55:06,655 INFO [trainer.py:811] (1/8) Epoch 3, validation: loss=4.276, NarTop10Accuracy=0.4689, over 1905321.00 frames. +2024-08-06 14:55:06,656 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 14:55:07,183 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 8.443e+01 1.396e+02 1.639e+02 2.017e+02 7.124e+02, threshold=3.277e+02, percent-clipped=4.5 +2024-08-06 14:55:21,051 INFO [trainer.py:765] (1/8) Epoch 3, batch 900, train_loss[loss=4.188, NarTop10Accuracy=0.4813, over 6279.00 frames. ], tot_loss[loss=4.404, NarTop10Accuracy=0.4437, over 5783.89 frames. ], batch size: 13, lr: 2.26e-02 +2024-08-06 14:56:04,957 INFO [trainer.py:765] (1/8) Epoch 3, batch 1000, train_loss[loss=4.248, NarTop10Accuracy=0.4859, over 6522.00 frames. ], tot_loss[loss=4.381, NarTop10Accuracy=0.448, over 5901.79 frames. ], batch size: 14, lr: 2.25e-02 +2024-08-06 14:56:37,300 INFO [trainer.py:765] (1/8) Epoch 3, batch 1100, train_loss[loss=4.577, NarTop10Accuracy=0.4033, over 6750.00 frames. ], tot_loss[loss=4.358, NarTop10Accuracy=0.4525, over 5938.30 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 14:57:06,377 INFO [trainer.py:765] (1/8) Epoch 3, batch 1200, train_loss[loss=4.429, NarTop10Accuracy=0.4404, over 7092.00 frames. ], tot_loss[loss=4.338, NarTop10Accuracy=0.4562, over 5923.78 frames. ], batch size: 31, lr: 2.23e-02 +2024-08-06 14:57:51,630 INFO [trainer.py:765] (1/8) Epoch 3, batch 1300, train_loss[loss=4.184, NarTop10Accuracy=0.4836, over 4329.00 frames. ], tot_loss[loss=4.313, NarTop10Accuracy=0.4612, over 5981.83 frames. ], batch size: 5, lr: 2.22e-02 +2024-08-06 14:58:22,899 INFO [trainer.py:765] (1/8) Epoch 3, batch 1400, train_loss[loss=4.058, NarTop10Accuracy=0.5129, over 6177.00 frames. ], tot_loss[loss=4.294, NarTop10Accuracy=0.4646, over 6012.60 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 14:58:50,854 INFO [trainer.py:765] (1/8) Epoch 3, batch 1500, train_loss[loss=4.312, NarTop10Accuracy=0.4692, over 6168.00 frames. ], tot_loss[loss=4.274, NarTop10Accuracy=0.4682, over 5953.88 frames. ], batch size: 50, lr: 2.20e-02 +2024-08-06 14:59:18,714 INFO [trainer.py:765] (1/8) Epoch 3, batch 1600, train_loss[loss=4.011, NarTop10Accuracy=0.5185, over 7152.00 frames. ], tot_loss[loss=4.251, NarTop10Accuracy=0.4725, over 5925.80 frames. ], batch size: 22, lr: 2.19e-02 +2024-08-06 14:59:45,952 INFO [trainer.py:765] (1/8) Epoch 3, batch 1700, train_loss[loss=4.066, NarTop10Accuracy=0.5119, over 6636.00 frames. ], tot_loss[loss=4.225, NarTop10Accuracy=0.478, over 5919.63 frames. ], batch size: 14, lr: 2.18e-02 +2024-08-06 15:00:12,497 INFO [trainer.py:765] (1/8) Epoch 3, batch 1800, train_loss[loss=3.905, NarTop10Accuracy=0.541, over 7047.00 frames. ], tot_loss[loss=4.207, NarTop10Accuracy=0.4817, over 5976.05 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 15:00:38,948 INFO [trainer.py:765] (1/8) Epoch 3, batch 1900, train_loss[loss=4.59, NarTop10Accuracy=0.4064, over 5799.00 frames. ], tot_loss[loss=4.191, NarTop10Accuracy=0.4851, over 6008.77 frames. ], batch size: 50, lr: 2.16e-02 +2024-08-06 15:01:04,605 INFO [trainer.py:765] (1/8) Epoch 3, batch 2000, train_loss[loss=4.395, NarTop10Accuracy=0.4384, over 6627.00 frames. ], tot_loss[loss=4.164, NarTop10Accuracy=0.4912, over 5989.30 frames. ], batch size: 50, lr: 2.15e-02 +2024-08-06 15:01:29,898 INFO [trainer.py:765] (1/8) Epoch 3, batch 2100, train_loss[loss=3.841, NarTop10Accuracy=0.5557, over 3891.00 frames. ], tot_loss[loss=4.14, NarTop10Accuracy=0.4959, over 5959.90 frames. ], batch size: 4, lr: 2.14e-02 +2024-08-06 15:01:55,181 INFO [trainer.py:765] (1/8) Epoch 3, batch 2200, train_loss[loss=3.948, NarTop10Accuracy=0.5378, over 7368.00 frames. ], tot_loss[loss=4.112, NarTop10Accuracy=0.5018, over 6001.30 frames. ], batch size: 31, lr: 2.13e-02 +2024-08-06 15:02:20,409 INFO [trainer.py:765] (1/8) Epoch 3, batch 2300, train_loss[loss=4.186, NarTop10Accuracy=0.4891, over 5709.00 frames. ], tot_loss[loss=4.122, NarTop10Accuracy=0.4995, over 6017.42 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 15:02:44,663 INFO [trainer.py:765] (1/8) Epoch 3, batch 2400, train_loss[loss=4.255, NarTop10Accuracy=0.46, over 5064.00 frames. ], tot_loss[loss=4.099, NarTop10Accuracy=0.5041, over 5779.38 frames. ], batch size: 7, lr: 2.11e-02 +2024-08-06 15:03:08,234 INFO [trainer.py:765] (1/8) Epoch 3, batch 2500, train_loss[loss=3.838, NarTop10Accuracy=0.558, over 5145.00 frames. ], tot_loss[loss=4.036, NarTop10Accuracy=0.5169, over 5477.61 frames. ], batch size: 7, lr: 2.10e-02 +2024-08-06 15:03:28,311 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 15:04:28,130 INFO [trainer.py:765] (1/8) Epoch 4, batch 100, train_loss[loss=3.795, NarTop10Accuracy=0.5717, over 7302.00 frames. ], tot_loss[loss=4.031, NarTop10Accuracy=0.5177, over 2373.17 frames. ], batch size: 31, lr: 1.97e-02 +2024-08-06 15:04:59,841 INFO [trainer.py:765] (1/8) Epoch 4, batch 200, train_loss[loss=3.884, NarTop10Accuracy=0.5462, over 6813.00 frames. ], tot_loss[loss=4.002, NarTop10Accuracy=0.5241, over 3861.09 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 15:05:27,507 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 15:05:35,694 INFO [trainer.py:811] (1/8) Epoch 4, validation: loss=3.804, NarTop10Accuracy=0.5644, over 1905321.00 frames. +2024-08-06 15:05:35,695 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 15:05:36,237 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.765e+02 1.975e+02 2.270e+02 5.852e+02, threshold=3.949e+02, percent-clipped=2.8 +2024-08-06 15:05:43,888 INFO [trainer.py:765] (1/8) Epoch 4, batch 300, train_loss[loss=3.774, NarTop10Accuracy=0.5765, over 6966.00 frames. ], tot_loss[loss=3.992, NarTop10Accuracy=0.5258, over 4655.28 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 15:06:16,123 INFO [trainer.py:765] (1/8) Epoch 4, batch 400, train_loss[loss=3.768, NarTop10Accuracy=0.579, over 5055.00 frames. ], tot_loss[loss=4.006, NarTop10Accuracy=0.5232, over 5100.95 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 15:06:46,472 INFO [trainer.py:765] (1/8) Epoch 4, batch 500, train_loss[loss=4.167, NarTop10Accuracy=0.4816, over 6276.00 frames. ], tot_loss[loss=3.982, NarTop10Accuracy=0.5278, over 5377.94 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 15:07:23,817 INFO [trainer.py:765] (1/8) Epoch 4, batch 600, train_loss[loss=3.662, NarTop10Accuracy=0.6042, over 5709.00 frames. ], tot_loss[loss=3.968, NarTop10Accuracy=0.5307, over 5655.68 frames. ], batch size: 9, lr: 1.93e-02 +2024-08-06 15:07:59,001 INFO [trainer.py:765] (1/8) Epoch 4, batch 700, train_loss[loss=4.256, NarTop10Accuracy=0.4754, over 4995.00 frames. ], tot_loss[loss=3.971, NarTop10Accuracy=0.5298, over 5736.67 frames. ], batch size: 6, lr: 1.92e-02 +2024-08-06 15:08:32,429 INFO [trainer.py:765] (1/8) Epoch 4, batch 800, train_loss[loss=3.526, NarTop10Accuracy=0.6195, over 5058.00 frames. ], tot_loss[loss=3.96, NarTop10Accuracy=0.5323, over 5784.99 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 15:09:10,688 INFO [trainer.py:765] (1/8) Epoch 4, batch 900, train_loss[loss=3.608, NarTop10Accuracy=0.6022, over 6135.00 frames. ], tot_loss[loss=3.922, NarTop10Accuracy=0.54, over 5806.99 frames. ], batch size: 13, lr: 1.90e-02 +2024-08-06 15:09:46,075 INFO [trainer.py:765] (1/8) Epoch 4, batch 1000, train_loss[loss=3.583, NarTop10Accuracy=0.6124, over 6258.00 frames. ], tot_loss[loss=3.915, NarTop10Accuracy=0.5416, over 5908.00 frames. ], batch size: 13, lr: 1.89e-02 +2024-08-06 15:10:18,138 INFO [trainer.py:765] (1/8) Epoch 4, batch 1100, train_loss[loss=3.737, NarTop10Accuracy=0.5776, over 6810.00 frames. ], tot_loss[loss=3.91, NarTop10Accuracy=0.5427, over 5938.60 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 15:10:55,074 INFO [trainer.py:765] (1/8) Epoch 4, batch 1200, train_loss[loss=4.257, NarTop10Accuracy=0.4645, over 7092.00 frames. ], tot_loss[loss=3.9, NarTop10Accuracy=0.5442, over 5928.01 frames. ], batch size: 31, lr: 1.88e-02 +2024-08-06 15:11:32,073 INFO [trainer.py:765] (1/8) Epoch 4, batch 1300, train_loss[loss=3.505, NarTop10Accuracy=0.6245, over 4977.00 frames. ], tot_loss[loss=3.856, NarTop10Accuracy=0.5532, over 5994.56 frames. ], batch size: 6, lr: 1.87e-02 +2024-08-06 15:12:05,687 INFO [trainer.py:765] (1/8) Epoch 4, batch 1400, train_loss[loss=3.776, NarTop10Accuracy=0.5839, over 6087.00 frames. ], tot_loss[loss=3.857, NarTop10Accuracy=0.5534, over 6014.16 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 15:12:33,695 INFO [trainer.py:765] (1/8) Epoch 4, batch 1500, train_loss[loss=3.838, NarTop10Accuracy=0.5622, over 5802.00 frames. ], tot_loss[loss=3.857, NarTop10Accuracy=0.5532, over 5955.71 frames. ], batch size: 50, lr: 1.85e-02 +2024-08-06 15:13:01,509 INFO [trainer.py:765] (1/8) Epoch 4, batch 1600, train_loss[loss=3.773, NarTop10Accuracy=0.5689, over 6972.00 frames. ], tot_loss[loss=3.848, NarTop10Accuracy=0.5554, over 5919.53 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 15:13:28,132 INFO [trainer.py:765] (1/8) Epoch 4, batch 1700, train_loss[loss=3.747, NarTop10Accuracy=0.5794, over 6333.00 frames. ], tot_loss[loss=3.829, NarTop10Accuracy=0.5597, over 5912.60 frames. ], batch size: 13, lr: 1.84e-02 +2024-08-06 15:13:54,556 INFO [trainer.py:765] (1/8) Epoch 4, batch 1800, train_loss[loss=3.778, NarTop10Accuracy=0.5771, over 7107.00 frames. ], tot_loss[loss=3.835, NarTop10Accuracy=0.5582, over 5977.35 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 15:14:20,997 INFO [trainer.py:765] (1/8) Epoch 4, batch 1900, train_loss[loss=3.748, NarTop10Accuracy=0.5852, over 6405.00 frames. ], tot_loss[loss=3.856, NarTop10Accuracy=0.5536, over 6021.15 frames. ], batch size: 50, lr: 1.82e-02 +2024-08-06 15:14:46,671 INFO [trainer.py:765] (1/8) Epoch 4, batch 2000, train_loss[loss=3.803, NarTop10Accuracy=0.5592, over 6174.00 frames. ], tot_loss[loss=3.828, NarTop10Accuracy=0.5593, over 5991.10 frames. ], batch size: 50, lr: 1.81e-02 +2024-08-06 15:15:11,858 INFO [trainer.py:765] (1/8) Epoch 4, batch 2100, train_loss[loss=3.473, NarTop10Accuracy=0.6247, over 3804.00 frames. ], tot_loss[loss=3.815, NarTop10Accuracy=0.5618, over 5976.64 frames. ], batch size: 4, lr: 1.81e-02 +2024-08-06 15:15:37,088 INFO [trainer.py:765] (1/8) Epoch 4, batch 2200, train_loss[loss=3.601, NarTop10Accuracy=0.6104, over 7656.00 frames. ], tot_loss[loss=3.806, NarTop10Accuracy=0.5636, over 6011.79 frames. ], batch size: 32, lr: 1.80e-02 +2024-08-06 15:15:55,088 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 15:16:03,243 INFO [trainer.py:811] (1/8) Epoch 4, validation: loss=3.665, NarTop10Accuracy=0.5912, over 1905321.00 frames. +2024-08-06 15:16:03,243 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 15:16:03,740 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.414e+02 1.889e+02 2.096e+02 2.369e+02 1.168e+03, threshold=4.192e+02, percent-clipped=1.7 +2024-08-06 15:16:10,347 INFO [trainer.py:765] (1/8) Epoch 4, batch 2300, train_loss[loss=3.68, NarTop10Accuracy=0.5964, over 5628.00 frames. ], tot_loss[loss=3.816, NarTop10Accuracy=0.5616, over 6014.31 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 15:16:34,840 INFO [trainer.py:765] (1/8) Epoch 4, batch 2400, train_loss[loss=3.425, NarTop10Accuracy=0.6501, over 5139.00 frames. ], tot_loss[loss=3.779, NarTop10Accuracy=0.5691, over 5757.92 frames. ], batch size: 7, lr: 1.79e-02 +2024-08-06 15:16:58,534 INFO [trainer.py:765] (1/8) Epoch 4, batch 2500, train_loss[loss=3.52, NarTop10Accuracy=0.6293, over 5199.00 frames. ], tot_loss[loss=3.767, NarTop10Accuracy=0.5713, over 5480.65 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 15:17:18,901 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 15:18:24,101 INFO [trainer.py:765] (1/8) Epoch 5, batch 100, train_loss[loss=3.506, NarTop10Accuracy=0.6341, over 7452.00 frames. ], tot_loss[loss=3.777, NarTop10Accuracy=0.5702, over 2361.88 frames. ], batch size: 31, lr: 1.66e-02 +2024-08-06 15:18:59,675 INFO [trainer.py:765] (1/8) Epoch 5, batch 200, train_loss[loss=4.102, NarTop10Accuracy=0.5003, over 6678.00 frames. ], tot_loss[loss=3.751, NarTop10Accuracy=0.5752, over 3846.93 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 15:19:32,888 INFO [trainer.py:765] (1/8) Epoch 5, batch 300, train_loss[loss=4.004, NarTop10Accuracy=0.522, over 6969.00 frames. ], tot_loss[loss=3.724, NarTop10Accuracy=0.5807, over 4653.09 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 15:20:01,656 INFO [trainer.py:765] (1/8) Epoch 5, batch 400, train_loss[loss=3.575, NarTop10Accuracy=0.6216, over 5151.00 frames. ], tot_loss[loss=3.712, NarTop10Accuracy=0.5825, over 5098.33 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 15:20:38,298 INFO [trainer.py:765] (1/8) Epoch 5, batch 500, train_loss[loss=3.957, NarTop10Accuracy=0.5258, over 6045.00 frames. ], tot_loss[loss=3.732, NarTop10Accuracy=0.5779, over 5373.97 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 15:21:13,711 INFO [trainer.py:765] (1/8) Epoch 5, batch 600, train_loss[loss=3.891, NarTop10Accuracy=0.5376, over 5703.00 frames. ], tot_loss[loss=3.719, NarTop10Accuracy=0.5803, over 5658.33 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 15:21:45,881 INFO [trainer.py:765] (1/8) Epoch 5, batch 700, train_loss[loss=3.362, NarTop10Accuracy=0.6607, over 4884.00 frames. ], tot_loss[loss=3.716, NarTop10Accuracy=0.5814, over 5730.89 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 15:22:24,499 INFO [trainer.py:765] (1/8) Epoch 5, batch 800, train_loss[loss=4.001, NarTop10Accuracy=0.5202, over 4941.00 frames. ], tot_loss[loss=3.705, NarTop10Accuracy=0.5839, over 5785.20 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 15:22:56,784 INFO [trainer.py:765] (1/8) Epoch 5, batch 900, train_loss[loss=3.622, NarTop10Accuracy=0.6063, over 6216.00 frames. ], tot_loss[loss=3.698, NarTop10Accuracy=0.5852, over 5819.06 frames. ], batch size: 13, lr: 1.61e-02 +2024-08-06 15:23:31,914 INFO [trainer.py:765] (1/8) Epoch 5, batch 1000, train_loss[loss=3.672, NarTop10Accuracy=0.6, over 6234.00 frames. ], tot_loss[loss=3.683, NarTop10Accuracy=0.5883, over 5913.73 frames. ], batch size: 13, lr: 1.60e-02 +2024-08-06 15:24:09,572 INFO [trainer.py:765] (1/8) Epoch 5, batch 1100, train_loss[loss=3.529, NarTop10Accuracy=0.6244, over 6906.00 frames. ], tot_loss[loss=3.681, NarTop10Accuracy=0.5887, over 5933.90 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 15:24:44,529 INFO [trainer.py:765] (1/8) Epoch 5, batch 1200, train_loss[loss=3.449, NarTop10Accuracy=0.6391, over 7329.00 frames. ], tot_loss[loss=3.678, NarTop10Accuracy=0.5893, over 5914.00 frames. ], batch size: 31, lr: 1.59e-02 +2024-08-06 15:25:19,380 INFO [trainer.py:765] (1/8) Epoch 5, batch 1300, train_loss[loss=3.838, NarTop10Accuracy=0.5451, over 5118.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.592, over 6003.15 frames. ], batch size: 6, lr: 1.59e-02 +2024-08-06 15:25:51,694 INFO [trainer.py:765] (1/8) Epoch 5, batch 1400, train_loss[loss=3.901, NarTop10Accuracy=0.5419, over 6000.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5909, over 6018.90 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 15:26:26,195 INFO [trainer.py:765] (1/8) Epoch 5, batch 1500, train_loss[loss=3.593, NarTop10Accuracy=0.6177, over 6606.00 frames. ], tot_loss[loss=3.668, NarTop10Accuracy=0.5912, over 5952.64 frames. ], batch size: 53, lr: 1.58e-02 +2024-08-06 15:26:54,130 INFO [trainer.py:765] (1/8) Epoch 5, batch 1600, train_loss[loss=3.494, NarTop10Accuracy=0.6325, over 7173.00 frames. ], tot_loss[loss=3.682, NarTop10Accuracy=0.5886, over 5929.80 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 15:27:19,603 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 15:27:27,821 INFO [trainer.py:811] (1/8) Epoch 5, validation: loss=3.552, NarTop10Accuracy=0.6147, over 1905321.00 frames. +2024-08-06 15:27:27,822 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 15:27:28,341 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.756e+02 1.962e+02 2.205e+02 5.880e+02, threshold=3.924e+02, percent-clipped=0.8 +2024-08-06 15:27:29,131 INFO [trainer.py:765] (1/8) Epoch 5, batch 1700, train_loss[loss=3.653, NarTop10Accuracy=0.5954, over 6618.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.5921, over 5921.38 frames. ], batch size: 14, lr: 1.56e-02 +2024-08-06 15:27:55,652 INFO [trainer.py:765] (1/8) Epoch 5, batch 1800, train_loss[loss=3.762, NarTop10Accuracy=0.5703, over 7242.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5923, over 5967.37 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 15:28:22,172 INFO [trainer.py:765] (1/8) Epoch 5, batch 1900, train_loss[loss=3.704, NarTop10Accuracy=0.5872, over 5916.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5911, over 6017.91 frames. ], batch size: 50, lr: 1.55e-02 +2024-08-06 15:28:47,893 INFO [trainer.py:765] (1/8) Epoch 5, batch 2000, train_loss[loss=3.626, NarTop10Accuracy=0.5978, over 5979.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5912, over 5990.51 frames. ], batch size: 50, lr: 1.55e-02 +2024-08-06 15:29:13,770 INFO [trainer.py:765] (1/8) Epoch 5, batch 2100, train_loss[loss=3.435, NarTop10Accuracy=0.6453, over 3843.00 frames. ], tot_loss[loss=3.687, NarTop10Accuracy=0.5873, over 5961.66 frames. ], batch size: 4, lr: 1.54e-02 +2024-08-06 15:29:39,177 INFO [trainer.py:765] (1/8) Epoch 5, batch 2200, train_loss[loss=4.104, NarTop10Accuracy=0.4974, over 7347.00 frames. ], tot_loss[loss=3.672, NarTop10Accuracy=0.5902, over 5995.22 frames. ], batch size: 31, lr: 1.54e-02 +2024-08-06 15:30:04,430 INFO [trainer.py:765] (1/8) Epoch 5, batch 2300, train_loss[loss=3.461, NarTop10Accuracy=0.6269, over 5748.00 frames. ], tot_loss[loss=3.681, NarTop10Accuracy=0.5885, over 6015.65 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 15:30:28,862 INFO [trainer.py:765] (1/8) Epoch 5, batch 2400, train_loss[loss=3.327, NarTop10Accuracy=0.6591, over 5127.00 frames. ], tot_loss[loss=3.654, NarTop10Accuracy=0.5942, over 5764.05 frames. ], batch size: 7, lr: 1.53e-02 +2024-08-06 15:30:52,502 INFO [trainer.py:765] (1/8) Epoch 5, batch 2500, train_loss[loss=3.349, NarTop10Accuracy=0.654, over 5037.00 frames. ], tot_loss[loss=3.611, NarTop10Accuracy=0.6026, over 5453.35 frames. ], batch size: 7, lr: 1.52e-02 +2024-08-06 15:31:12,397 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 15:32:14,415 INFO [trainer.py:765] (1/8) Epoch 6, batch 100, train_loss[loss=3.505, NarTop10Accuracy=0.6322, over 7221.00 frames. ], tot_loss[loss=3.629, NarTop10Accuracy=0.5986, over 2359.27 frames. ], batch size: 32, lr: 1.42e-02 +2024-08-06 15:32:46,016 INFO [trainer.py:765] (1/8) Epoch 6, batch 200, train_loss[loss=4.019, NarTop10Accuracy=0.505, over 7098.00 frames. ], tot_loss[loss=3.616, NarTop10Accuracy=0.6016, over 3859.41 frames. ], batch size: 18, lr: 1.42e-02 +2024-08-06 15:33:21,243 INFO [trainer.py:765] (1/8) Epoch 6, batch 300, train_loss[loss=3.546, NarTop10Accuracy=0.6158, over 7278.00 frames. ], tot_loss[loss=3.613, NarTop10Accuracy=0.6022, over 4660.06 frames. ], batch size: 23, lr: 1.41e-02 +2024-08-06 15:33:56,035 INFO [trainer.py:765] (1/8) Epoch 6, batch 400, train_loss[loss=3.453, NarTop10Accuracy=0.6373, over 5067.00 frames. ], tot_loss[loss=3.597, NarTop10Accuracy=0.6054, over 5096.56 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 15:34:26,759 INFO [trainer.py:765] (1/8) Epoch 6, batch 500, train_loss[loss=3.309, NarTop10Accuracy=0.6671, over 6117.00 frames. ], tot_loss[loss=3.582, NarTop10Accuracy=0.609, over 5385.10 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 15:35:01,458 INFO [trainer.py:765] (1/8) Epoch 6, batch 600, train_loss[loss=3.309, NarTop10Accuracy=0.6715, over 6198.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.6088, over 5672.91 frames. ], batch size: 10, lr: 1.40e-02 +2024-08-06 15:35:32,734 INFO [trainer.py:765] (1/8) Epoch 6, batch 700, train_loss[loss=3.355, NarTop10Accuracy=0.6594, over 4338.00 frames. ], tot_loss[loss=3.584, NarTop10Accuracy=0.6086, over 5721.21 frames. ], batch size: 5, lr: 1.39e-02 +2024-08-06 15:36:06,844 INFO [trainer.py:765] (1/8) Epoch 6, batch 800, train_loss[loss=3.623, NarTop10Accuracy=0.5956, over 5079.00 frames. ], tot_loss[loss=3.593, NarTop10Accuracy=0.6064, over 5787.56 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 15:36:40,384 INFO [trainer.py:765] (1/8) Epoch 6, batch 900, train_loss[loss=3.934, NarTop10Accuracy=0.5359, over 6339.00 frames. ], tot_loss[loss=3.582, NarTop10Accuracy=0.6086, over 5792.08 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 15:37:15,272 INFO [trainer.py:765] (1/8) Epoch 6, batch 1000, train_loss[loss=3.364, NarTop10Accuracy=0.6451, over 6837.00 frames. ], tot_loss[loss=3.594, NarTop10Accuracy=0.6058, over 5898.09 frames. ], batch size: 14, lr: 1.38e-02 +2024-08-06 15:37:50,508 INFO [trainer.py:765] (1/8) Epoch 6, batch 1100, train_loss[loss=3.419, NarTop10Accuracy=0.6495, over 6723.00 frames. ], tot_loss[loss=3.589, NarTop10Accuracy=0.6071, over 5932.45 frames. ], batch size: 17, lr: 1.38e-02 +2024-08-06 15:37:55,828 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 15:38:04,436 INFO [trainer.py:811] (1/8) Epoch 6, validation: loss=3.421, NarTop10Accuracy=0.6418, over 1905321.00 frames. +2024-08-06 15:38:04,437 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 15:38:04,965 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.809e+02 1.991e+02 2.234e+02 5.215e+02, threshold=3.983e+02, percent-clipped=0.5 +2024-08-06 15:38:36,168 INFO [trainer.py:765] (1/8) Epoch 6, batch 1200, train_loss[loss=3.422, NarTop10Accuracy=0.647, over 7536.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6108, over 5931.31 frames. ], batch size: 32, lr: 1.37e-02 +2024-08-06 15:39:08,242 INFO [trainer.py:765] (1/8) Epoch 6, batch 1300, train_loss[loss=3.474, NarTop10Accuracy=0.6334, over 5013.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.611, over 5989.57 frames. ], batch size: 6, lr: 1.37e-02 +2024-08-06 15:39:44,069 INFO [trainer.py:765] (1/8) Epoch 6, batch 1400, train_loss[loss=3.372, NarTop10Accuracy=0.6546, over 6060.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.6114, over 6008.00 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 15:40:15,383 INFO [trainer.py:765] (1/8) Epoch 6, batch 1500, train_loss[loss=4.034, NarTop10Accuracy=0.5213, over 6066.00 frames. ], tot_loss[loss=3.565, NarTop10Accuracy=0.6126, over 5948.07 frames. ], batch size: 50, lr: 1.36e-02 +2024-08-06 15:40:43,105 INFO [trainer.py:765] (1/8) Epoch 6, batch 1600, train_loss[loss=3.456, NarTop10Accuracy=0.6325, over 6930.00 frames. ], tot_loss[loss=3.56, NarTop10Accuracy=0.6134, over 5917.39 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 15:41:09,788 INFO [trainer.py:765] (1/8) Epoch 6, batch 1700, train_loss[loss=3.526, NarTop10Accuracy=0.6194, over 6159.00 frames. ], tot_loss[loss=3.55, NarTop10Accuracy=0.6152, over 5917.20 frames. ], batch size: 13, lr: 1.35e-02 +2024-08-06 15:41:36,316 INFO [trainer.py:765] (1/8) Epoch 6, batch 1800, train_loss[loss=3.454, NarTop10Accuracy=0.6394, over 7386.00 frames. ], tot_loss[loss=3.561, NarTop10Accuracy=0.6132, over 5996.07 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 15:42:02,720 INFO [trainer.py:765] (1/8) Epoch 6, batch 1900, train_loss[loss=3.804, NarTop10Accuracy=0.5647, over 6288.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6087, over 6033.13 frames. ], batch size: 51, lr: 1.34e-02 +2024-08-06 15:42:28,319 INFO [trainer.py:765] (1/8) Epoch 6, batch 2000, train_loss[loss=3.504, NarTop10Accuracy=0.62, over 6582.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6097, over 6026.23 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 15:42:53,669 INFO [trainer.py:765] (1/8) Epoch 6, batch 2100, train_loss[loss=3.166, NarTop10Accuracy=0.6929, over 3990.00 frames. ], tot_loss[loss=3.557, NarTop10Accuracy=0.6133, over 5994.33 frames. ], batch size: 4, lr: 1.33e-02 +2024-08-06 15:43:18,977 INFO [trainer.py:765] (1/8) Epoch 6, batch 2200, train_loss[loss=3.782, NarTop10Accuracy=0.566, over 7395.00 frames. ], tot_loss[loss=3.564, NarTop10Accuracy=0.6122, over 6025.15 frames. ], batch size: 31, lr: 1.33e-02 +2024-08-06 15:43:44,105 INFO [trainer.py:765] (1/8) Epoch 6, batch 2300, train_loss[loss=3.327, NarTop10Accuracy=0.666, over 5712.00 frames. ], tot_loss[loss=3.561, NarTop10Accuracy=0.6126, over 6043.14 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 15:44:08,620 INFO [trainer.py:765] (1/8) Epoch 6, batch 2400, train_loss[loss=3.385, NarTop10Accuracy=0.6523, over 5031.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.6183, over 5788.15 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:32,132 INFO [trainer.py:765] (1/8) Epoch 6, batch 2500, train_loss[loss=3.496, NarTop10Accuracy=0.6312, over 5154.00 frames. ], tot_loss[loss=3.52, NarTop10Accuracy=0.6211, over 5472.80 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:51,568 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 15:45:58,042 INFO [trainer.py:765] (1/8) Epoch 7, batch 100, train_loss[loss=3.382, NarTop10Accuracy=0.6544, over 7578.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.6162, over 2370.00 frames. ], batch size: 31, lr: 1.24e-02 +2024-08-06 15:46:33,613 INFO [trainer.py:765] (1/8) Epoch 7, batch 200, train_loss[loss=3.467, NarTop10Accuracy=0.6207, over 6849.00 frames. ], tot_loss[loss=3.524, NarTop10Accuracy=0.6201, over 3857.92 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 15:47:03,246 INFO [trainer.py:765] (1/8) Epoch 7, batch 300, train_loss[loss=3.847, NarTop10Accuracy=0.5556, over 7293.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6179, over 4649.38 frames. ], batch size: 23, lr: 1.23e-02 +2024-08-06 15:47:34,494 INFO [trainer.py:765] (1/8) Epoch 7, batch 400, train_loss[loss=3.661, NarTop10Accuracy=0.5924, over 5658.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.6188, over 5107.51 frames. ], batch size: 8, lr: 1.23e-02 +2024-08-06 15:48:13,729 INFO [trainer.py:765] (1/8) Epoch 7, batch 500, train_loss[loss=3.611, NarTop10Accuracy=0.6036, over 6024.00 frames. ], tot_loss[loss=3.521, NarTop10Accuracy=0.621, over 5375.72 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 15:48:26,368 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 15:48:34,533 INFO [trainer.py:811] (1/8) Epoch 7, validation: loss=3.326, NarTop10Accuracy=0.6612, over 1905321.00 frames. +2024-08-06 15:48:34,534 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 15:48:35,079 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.860e+02 2.018e+02 2.241e+02 5.111e+02, threshold=4.035e+02, percent-clipped=0.3 +2024-08-06 15:48:52,721 INFO [trainer.py:765] (1/8) Epoch 7, batch 600, train_loss[loss=3.204, NarTop10Accuracy=0.6898, over 5748.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6209, over 5634.24 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 15:49:24,911 INFO [trainer.py:765] (1/8) Epoch 7, batch 700, train_loss[loss=3.802, NarTop10Accuracy=0.5594, over 5118.00 frames. ], tot_loss[loss=3.516, NarTop10Accuracy=0.622, over 5700.01 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 15:50:04,380 INFO [trainer.py:765] (1/8) Epoch 7, batch 800, train_loss[loss=3.101, NarTop10Accuracy=0.7091, over 5028.00 frames. ], tot_loss[loss=3.502, NarTop10Accuracy=0.6253, over 5756.47 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 15:50:34,548 INFO [trainer.py:765] (1/8) Epoch 7, batch 900, train_loss[loss=3.269, NarTop10Accuracy=0.6767, over 6636.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6264, over 5791.30 frames. ], batch size: 14, lr: 1.21e-02 +2024-08-06 15:51:07,154 INFO [trainer.py:765] (1/8) Epoch 7, batch 1000, train_loss[loss=3.312, NarTop10Accuracy=0.661, over 6072.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6274, over 5902.62 frames. ], batch size: 13, lr: 1.20e-02 +2024-08-06 15:51:51,758 INFO [trainer.py:765] (1/8) Epoch 7, batch 1100, train_loss[loss=3.311, NarTop10Accuracy=0.6652, over 6777.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6272, over 5944.99 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 15:52:22,699 INFO [trainer.py:765] (1/8) Epoch 7, batch 1200, train_loss[loss=3.403, NarTop10Accuracy=0.6526, over 7455.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6272, over 5939.14 frames. ], batch size: 31, lr: 1.20e-02 +2024-08-06 15:52:52,007 INFO [trainer.py:765] (1/8) Epoch 7, batch 1300, train_loss[loss=3.448, NarTop10Accuracy=0.6362, over 5046.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6262, over 5998.13 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 15:53:33,841 INFO [trainer.py:765] (1/8) Epoch 7, batch 1400, train_loss[loss=3.312, NarTop10Accuracy=0.6672, over 6219.00 frames. ], tot_loss[loss=3.496, NarTop10Accuracy=0.626, over 6031.75 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 15:54:04,599 INFO [trainer.py:765] (1/8) Epoch 7, batch 1500, train_loss[loss=3.615, NarTop10Accuracy=0.593, over 5625.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6305, over 5952.57 frames. ], batch size: 51, lr: 1.19e-02 +2024-08-06 15:54:32,384 INFO [trainer.py:765] (1/8) Epoch 7, batch 1600, train_loss[loss=3.626, NarTop10Accuracy=0.5926, over 7023.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.63, over 5930.83 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 15:54:59,054 INFO [trainer.py:765] (1/8) Epoch 7, batch 1700, train_loss[loss=3.524, NarTop10Accuracy=0.6189, over 6063.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.626, over 5916.40 frames. ], batch size: 13, lr: 1.18e-02 +2024-08-06 15:55:25,512 INFO [trainer.py:765] (1/8) Epoch 7, batch 1800, train_loss[loss=3.911, NarTop10Accuracy=0.5472, over 6942.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6263, over 5970.30 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 15:55:52,082 INFO [trainer.py:765] (1/8) Epoch 7, batch 1900, train_loss[loss=3.414, NarTop10Accuracy=0.6378, over 6087.00 frames. ], tot_loss[loss=3.509, NarTop10Accuracy=0.6232, over 6009.46 frames. ], batch size: 50, lr: 1.18e-02 +2024-08-06 15:56:17,590 INFO [trainer.py:765] (1/8) Epoch 7, batch 2000, train_loss[loss=3.775, NarTop10Accuracy=0.569, over 6579.00 frames. ], tot_loss[loss=3.509, NarTop10Accuracy=0.6234, over 5991.80 frames. ], batch size: 51, lr: 1.17e-02 +2024-08-06 15:56:42,856 INFO [trainer.py:765] (1/8) Epoch 7, batch 2100, train_loss[loss=3.93, NarTop10Accuracy=0.5233, over 4005.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6264, over 5969.26 frames. ], batch size: 4, lr: 1.17e-02 +2024-08-06 15:57:08,078 INFO [trainer.py:765] (1/8) Epoch 7, batch 2200, train_loss[loss=3.475, NarTop10Accuracy=0.6246, over 7152.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.6229, over 6005.32 frames. ], batch size: 31, lr: 1.17e-02 +2024-08-06 15:57:33,178 INFO [trainer.py:765] (1/8) Epoch 7, batch 2300, train_loss[loss=3.14, NarTop10Accuracy=0.6927, over 5637.00 frames. ], tot_loss[loss=3.508, NarTop10Accuracy=0.6237, over 6018.05 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 15:57:57,618 INFO [trainer.py:765] (1/8) Epoch 7, batch 2400, train_loss[loss=3.174, NarTop10Accuracy=0.6874, over 5166.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.627, over 5756.67 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 15:58:21,088 INFO [trainer.py:765] (1/8) Epoch 7, batch 2500, train_loss[loss=3.856, NarTop10Accuracy=0.5531, over 5229.00 frames. ], tot_loss[loss=3.467, NarTop10Accuracy=0.6317, over 5455.22 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 15:58:31,565 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 15:58:39,769 INFO [trainer.py:811] (1/8) Epoch 7, validation: loss=3.381, NarTop10Accuracy=0.6488, over 1905321.00 frames. +2024-08-06 15:58:39,770 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 15:58:40,221 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.831e+02 1.996e+02 2.207e+02 5.229e+02, threshold=3.992e+02, percent-clipped=0.2 +2024-08-06 15:58:49,133 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 15:59:52,877 INFO [trainer.py:765] (1/8) Epoch 8, batch 100, train_loss[loss=3.632, NarTop10Accuracy=0.5922, over 7077.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6334, over 2376.28 frames. ], batch size: 31, lr: 1.09e-02 +2024-08-06 16:00:27,880 INFO [trainer.py:765] (1/8) Epoch 8, batch 200, train_loss[loss=3.313, NarTop10Accuracy=0.6651, over 6924.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6297, over 3853.53 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 16:00:58,562 INFO [trainer.py:765] (1/8) Epoch 8, batch 300, train_loss[loss=3.303, NarTop10Accuracy=0.67, over 7332.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.6314, over 4655.77 frames. ], batch size: 22, lr: 1.08e-02 +2024-08-06 16:01:29,759 INFO [trainer.py:765] (1/8) Epoch 8, batch 400, train_loss[loss=3.715, NarTop10Accuracy=0.5725, over 5028.00 frames. ], tot_loss[loss=3.465, NarTop10Accuracy=0.6315, over 5099.67 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 16:02:04,065 INFO [trainer.py:765] (1/8) Epoch 8, batch 500, train_loss[loss=3.865, NarTop10Accuracy=0.5492, over 6093.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6342, over 5372.03 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 16:02:41,835 INFO [trainer.py:765] (1/8) Epoch 8, batch 600, train_loss[loss=3.158, NarTop10Accuracy=0.7052, over 5709.00 frames. ], tot_loss[loss=3.465, NarTop10Accuracy=0.6315, over 5640.03 frames. ], batch size: 9, lr: 1.08e-02 +2024-08-06 16:03:11,499 INFO [trainer.py:765] (1/8) Epoch 8, batch 700, train_loss[loss=3.691, NarTop10Accuracy=0.5815, over 5190.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6304, over 5703.73 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 16:03:50,083 INFO [trainer.py:765] (1/8) Epoch 8, batch 800, train_loss[loss=3.372, NarTop10Accuracy=0.6531, over 5049.00 frames. ], tot_loss[loss=3.465, NarTop10Accuracy=0.6318, over 5772.34 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 16:04:27,587 INFO [trainer.py:765] (1/8) Epoch 8, batch 900, train_loss[loss=3.228, NarTop10Accuracy=0.6794, over 6150.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6356, over 5792.09 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 16:04:57,465 INFO [trainer.py:765] (1/8) Epoch 8, batch 1000, train_loss[loss=3.62, NarTop10Accuracy=0.5982, over 6234.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6371, over 5905.49 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 16:05:37,293 INFO [trainer.py:765] (1/8) Epoch 8, batch 1100, train_loss[loss=3.731, NarTop10Accuracy=0.5821, over 6822.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.638, over 5924.46 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 16:06:15,860 INFO [trainer.py:765] (1/8) Epoch 8, batch 1200, train_loss[loss=3.448, NarTop10Accuracy=0.6366, over 7335.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6354, over 5936.03 frames. ], batch size: 31, lr: 1.06e-02 +2024-08-06 16:06:45,186 INFO [trainer.py:765] (1/8) Epoch 8, batch 1300, train_loss[loss=3.417, NarTop10Accuracy=0.6429, over 5172.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6369, over 5999.03 frames. ], batch size: 6, lr: 1.06e-02 +2024-08-06 16:07:24,234 INFO [trainer.py:765] (1/8) Epoch 8, batch 1400, train_loss[loss=3.425, NarTop10Accuracy=0.6314, over 5994.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6357, over 6011.16 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 16:07:52,168 INFO [trainer.py:765] (1/8) Epoch 8, batch 1500, train_loss[loss=3.398, NarTop10Accuracy=0.6525, over 6231.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6402, over 5954.22 frames. ], batch size: 50, lr: 1.05e-02 +2024-08-06 16:08:19,948 INFO [trainer.py:765] (1/8) Epoch 8, batch 1600, train_loss[loss=3.24, NarTop10Accuracy=0.6776, over 6957.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6411, over 5930.53 frames. ], batch size: 22, lr: 1.05e-02 +2024-08-06 16:08:46,617 INFO [trainer.py:765] (1/8) Epoch 8, batch 1700, train_loss[loss=3.247, NarTop10Accuracy=0.6727, over 6282.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6407, over 5907.85 frames. ], batch size: 13, lr: 1.05e-02 +2024-08-06 16:09:13,105 INFO [trainer.py:765] (1/8) Epoch 8, batch 1800, train_loss[loss=3.252, NarTop10Accuracy=0.677, over 7209.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6427, over 5962.86 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 16:09:39,635 INFO [trainer.py:765] (1/8) Epoch 8, batch 1900, train_loss[loss=3.681, NarTop10Accuracy=0.5864, over 6495.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6436, over 6014.68 frames. ], batch size: 50, lr: 1.04e-02 +2024-08-06 16:09:56,939 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 16:10:04,970 INFO [trainer.py:811] (1/8) Epoch 8, validation: loss=3.282, NarTop10Accuracy=0.6699, over 1905321.00 frames. +2024-08-06 16:10:04,970 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 16:10:05,469 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.814e+02 1.981e+02 2.158e+02 5.862e+02, threshold=3.962e+02, percent-clipped=0.1 +2024-08-06 16:10:13,203 INFO [trainer.py:765] (1/8) Epoch 8, batch 2000, train_loss[loss=3.922, NarTop10Accuracy=0.5332, over 6243.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6421, over 5986.73 frames. ], batch size: 50, lr: 1.04e-02 +2024-08-06 16:10:38,513 INFO [trainer.py:765] (1/8) Epoch 8, batch 2100, train_loss[loss=3.208, NarTop10Accuracy=0.6827, over 4014.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6443, over 5948.80 frames. ], batch size: 4, lr: 1.04e-02 +2024-08-06 16:11:03,746 INFO [trainer.py:765] (1/8) Epoch 8, batch 2200, train_loss[loss=3.529, NarTop10Accuracy=0.6189, over 7368.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6413, over 6015.35 frames. ], batch size: 31, lr: 1.04e-02 +2024-08-06 16:11:28,904 INFO [trainer.py:765] (1/8) Epoch 8, batch 2300, train_loss[loss=3.691, NarTop10Accuracy=0.5771, over 5745.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6376, over 6030.40 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 16:11:53,092 INFO [trainer.py:765] (1/8) Epoch 8, batch 2400, train_loss[loss=3.531, NarTop10Accuracy=0.6207, over 5628.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6404, over 5780.38 frames. ], batch size: 8, lr: 1.03e-02 +2024-08-06 16:12:16,444 INFO [trainer.py:765] (1/8) Epoch 8, batch 2500, train_loss[loss=3.387, NarTop10Accuracy=0.6428, over 5010.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6421, over 5481.59 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 16:12:36,390 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 16:13:37,514 INFO [trainer.py:765] (1/8) Epoch 9, batch 100, train_loss[loss=3.164, NarTop10Accuracy=0.6948, over 7518.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.651, over 2353.58 frames. ], batch size: 32, lr: 9.72e-03 +2024-08-06 16:14:14,440 INFO [trainer.py:765] (1/8) Epoch 9, batch 200, train_loss[loss=3.681, NarTop10Accuracy=0.584, over 6714.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6524, over 3850.84 frames. ], batch size: 17, lr: 9.70e-03 +2024-08-06 16:14:44,507 INFO [trainer.py:765] (1/8) Epoch 9, batch 300, train_loss[loss=3.388, NarTop10Accuracy=0.65, over 7182.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6496, over 4627.30 frames. ], batch size: 23, lr: 9.68e-03 +2024-08-06 16:15:14,914 INFO [trainer.py:765] (1/8) Epoch 9, batch 400, train_loss[loss=3.133, NarTop10Accuracy=0.7052, over 5118.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6528, over 5084.01 frames. ], batch size: 7, lr: 9.65e-03 +2024-08-06 16:15:50,336 INFO [trainer.py:765] (1/8) Epoch 9, batch 500, train_loss[loss=3.093, NarTop10Accuracy=0.7045, over 6126.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.655, over 5356.33 frames. ], batch size: 11, lr: 9.63e-03 +2024-08-06 16:16:23,972 INFO [trainer.py:765] (1/8) Epoch 9, batch 600, train_loss[loss=3.498, NarTop10Accuracy=0.6297, over 5520.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6573, over 5635.29 frames. ], batch size: 9, lr: 9.61e-03 +2024-08-06 16:16:57,146 INFO [trainer.py:765] (1/8) Epoch 9, batch 700, train_loss[loss=3.212, NarTop10Accuracy=0.6791, over 4962.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6548, over 5701.42 frames. ], batch size: 6, lr: 9.59e-03 +2024-08-06 16:17:32,051 INFO [trainer.py:765] (1/8) Epoch 9, batch 800, train_loss[loss=3.171, NarTop10Accuracy=0.698, over 4920.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6479, over 5759.41 frames. ], batch size: 6, lr: 9.57e-03 +2024-08-06 16:18:07,815 INFO [trainer.py:765] (1/8) Epoch 9, batch 900, train_loss[loss=3.27, NarTop10Accuracy=0.6751, over 6672.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6491, over 5789.61 frames. ], batch size: 14, lr: 9.55e-03 +2024-08-06 16:18:39,344 INFO [trainer.py:765] (1/8) Epoch 9, batch 1000, train_loss[loss=3.136, NarTop10Accuracy=0.6926, over 6231.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6456, over 5889.93 frames. ], batch size: 13, lr: 9.53e-03 +2024-08-06 16:19:15,382 INFO [trainer.py:765] (1/8) Epoch 9, batch 1100, train_loss[loss=3.52, NarTop10Accuracy=0.6206, over 6855.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6462, over 5930.52 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 16:19:53,877 INFO [trainer.py:765] (1/8) Epoch 9, batch 1200, train_loss[loss=3.788, NarTop10Accuracy=0.5654, over 7419.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6444, over 5935.68 frames. ], batch size: 32, lr: 9.48e-03 +2024-08-06 16:20:24,906 INFO [trainer.py:765] (1/8) Epoch 9, batch 1300, train_loss[loss=3.205, NarTop10Accuracy=0.674, over 5007.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6452, over 6002.64 frames. ], batch size: 6, lr: 9.46e-03 +2024-08-06 16:20:56,579 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 16:21:04,483 INFO [trainer.py:811] (1/8) Epoch 9, validation: loss=3.266, NarTop10Accuracy=0.6725, over 1905321.00 frames. +2024-08-06 16:21:04,484 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 16:21:05,035 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 1.808e+02 1.967e+02 2.142e+02 6.126e+02, threshold=3.935e+02, percent-clipped=0.5 +2024-08-06 16:21:06,691 INFO [trainer.py:765] (1/8) Epoch 9, batch 1400, train_loss[loss=3.557, NarTop10Accuracy=0.6095, over 6126.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.644, over 6019.82 frames. ], batch size: 11, lr: 9.44e-03 +2024-08-06 16:21:38,896 INFO [trainer.py:765] (1/8) Epoch 9, batch 1500, train_loss[loss=3.413, NarTop10Accuracy=0.6449, over 6402.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6501, over 5958.46 frames. ], batch size: 51, lr: 9.42e-03 +2024-08-06 16:22:06,721 INFO [trainer.py:765] (1/8) Epoch 9, batch 1600, train_loss[loss=3.485, NarTop10Accuracy=0.6144, over 7287.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6509, over 5945.34 frames. ], batch size: 23, lr: 9.40e-03 +2024-08-06 16:22:33,470 INFO [trainer.py:765] (1/8) Epoch 9, batch 1700, train_loss[loss=3.366, NarTop10Accuracy=0.6479, over 6660.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6474, over 5933.77 frames. ], batch size: 14, lr: 9.38e-03 +2024-08-06 16:23:00,063 INFO [trainer.py:765] (1/8) Epoch 9, batch 1800, train_loss[loss=3.171, NarTop10Accuracy=0.697, over 7107.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6503, over 5982.72 frames. ], batch size: 22, lr: 9.36e-03 +2024-08-06 16:23:26,782 INFO [trainer.py:765] (1/8) Epoch 9, batch 1900, train_loss[loss=3.329, NarTop10Accuracy=0.6645, over 5994.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6484, over 6022.73 frames. ], batch size: 50, lr: 9.34e-03 +2024-08-06 16:23:52,485 INFO [trainer.py:765] (1/8) Epoch 9, batch 2000, train_loss[loss=3.886, NarTop10Accuracy=0.5414, over 5739.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6496, over 5972.24 frames. ], batch size: 50, lr: 9.32e-03 +2024-08-06 16:24:17,962 INFO [trainer.py:765] (1/8) Epoch 9, batch 2100, train_loss[loss=3.105, NarTop10Accuracy=0.7065, over 4014.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6487, over 5956.90 frames. ], batch size: 4, lr: 9.30e-03 +2024-08-06 16:24:43,421 INFO [trainer.py:765] (1/8) Epoch 9, batch 2200, train_loss[loss=3.635, NarTop10Accuracy=0.5943, over 7092.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6476, over 5994.87 frames. ], batch size: 31, lr: 9.28e-03 +2024-08-06 16:25:08,721 INFO [trainer.py:765] (1/8) Epoch 9, batch 2300, train_loss[loss=3.385, NarTop10Accuracy=0.6484, over 5727.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6447, over 6032.79 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 16:25:33,164 INFO [trainer.py:765] (1/8) Epoch 9, batch 2400, train_loss[loss=3.318, NarTop10Accuracy=0.659, over 5139.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6455, over 5786.55 frames. ], batch size: 7, lr: 9.25e-03 +2024-08-06 16:25:56,768 INFO [trainer.py:765] (1/8) Epoch 9, batch 2500, train_loss[loss=3.156, NarTop10Accuracy=0.6969, over 5250.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6537, over 5483.67 frames. ], batch size: 7, lr: 9.23e-03 +2024-08-06 16:26:16,434 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 16:27:19,583 INFO [trainer.py:765] (1/8) Epoch 10, batch 100, train_loss[loss=3.325, NarTop10Accuracy=0.666, over 7575.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6492, over 2375.34 frames. ], batch size: 32, lr: 8.76e-03 +2024-08-06 16:27:52,627 INFO [trainer.py:765] (1/8) Epoch 10, batch 200, train_loss[loss=2.999, NarTop10Accuracy=0.7327, over 6894.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6543, over 3855.59 frames. ], batch size: 17, lr: 8.74e-03 +2024-08-06 16:28:23,056 INFO [trainer.py:765] (1/8) Epoch 10, batch 300, train_loss[loss=3.142, NarTop10Accuracy=0.7038, over 6864.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6546, over 4655.94 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 16:28:59,199 INFO [trainer.py:765] (1/8) Epoch 10, batch 400, train_loss[loss=3.279, NarTop10Accuracy=0.6744, over 5055.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6562, over 5103.85 frames. ], batch size: 7, lr: 8.71e-03 +2024-08-06 16:29:29,217 INFO [trainer.py:765] (1/8) Epoch 10, batch 500, train_loss[loss=3.075, NarTop10Accuracy=0.7121, over 6126.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6578, over 5371.89 frames. ], batch size: 11, lr: 8.69e-03 +2024-08-06 16:30:02,764 INFO [trainer.py:765] (1/8) Epoch 10, batch 600, train_loss[loss=3.541, NarTop10Accuracy=0.6196, over 5640.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6574, over 5640.01 frames. ], batch size: 9, lr: 8.67e-03 +2024-08-06 16:30:34,264 INFO [trainer.py:765] (1/8) Epoch 10, batch 700, train_loss[loss=3.307, NarTop10Accuracy=0.6598, over 4326.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6557, over 5718.98 frames. ], batch size: 5, lr: 8.65e-03 +2024-08-06 16:31:09,842 INFO [trainer.py:765] (1/8) Epoch 10, batch 800, train_loss[loss=3.529, NarTop10Accuracy=0.6213, over 5055.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6537, over 5782.66 frames. ], batch size: 6, lr: 8.64e-03 +2024-08-06 16:31:16,256 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 16:31:24,565 INFO [trainer.py:811] (1/8) Epoch 10, validation: loss=3.184, NarTop10Accuracy=0.6898, over 1905321.00 frames. +2024-08-06 16:31:24,566 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 16:31:25,154 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.851e+02 2.012e+02 2.196e+02 4.599e+02, threshold=4.024e+02, percent-clipped=0.1 +2024-08-06 16:31:50,345 INFO [trainer.py:765] (1/8) Epoch 10, batch 900, train_loss[loss=3.11, NarTop10Accuracy=0.7113, over 6378.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6598, over 5785.46 frames. ], batch size: 13, lr: 8.62e-03 +2024-08-06 16:32:28,588 INFO [trainer.py:765] (1/8) Epoch 10, batch 1000, train_loss[loss=3.13, NarTop10Accuracy=0.7077, over 6141.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6585, over 5900.56 frames. ], batch size: 13, lr: 8.60e-03 +2024-08-06 16:33:06,375 INFO [trainer.py:765] (1/8) Epoch 10, batch 1100, train_loss[loss=3.159, NarTop10Accuracy=0.7018, over 6798.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6573, over 5922.92 frames. ], batch size: 17, lr: 8.59e-03 +2024-08-06 16:33:40,960 INFO [trainer.py:765] (1/8) Epoch 10, batch 1200, train_loss[loss=3.339, NarTop10Accuracy=0.6644, over 7458.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6574, over 5925.63 frames. ], batch size: 31, lr: 8.57e-03 +2024-08-06 16:34:16,169 INFO [trainer.py:765] (1/8) Epoch 10, batch 1300, train_loss[loss=3.176, NarTop10Accuracy=0.681, over 5178.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6583, over 5999.31 frames. ], batch size: 6, lr: 8.55e-03 +2024-08-06 16:34:51,200 INFO [trainer.py:765] (1/8) Epoch 10, batch 1400, train_loss[loss=3.4, NarTop10Accuracy=0.6499, over 6108.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6522, over 6022.66 frames. ], batch size: 11, lr: 8.54e-03 +2024-08-06 16:35:22,158 INFO [trainer.py:765] (1/8) Epoch 10, batch 1500, train_loss[loss=3.624, NarTop10Accuracy=0.6046, over 6207.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.658, over 5957.40 frames. ], batch size: 50, lr: 8.52e-03 +2024-08-06 16:35:50,136 INFO [trainer.py:765] (1/8) Epoch 10, batch 1600, train_loss[loss=3.598, NarTop10Accuracy=0.6074, over 7212.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6601, over 5936.33 frames. ], batch size: 22, lr: 8.50e-03 +2024-08-06 16:36:16,975 INFO [trainer.py:765] (1/8) Epoch 10, batch 1700, train_loss[loss=3.471, NarTop10Accuracy=0.6369, over 6342.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6571, over 5926.93 frames. ], batch size: 13, lr: 8.49e-03 +2024-08-06 16:36:43,647 INFO [trainer.py:765] (1/8) Epoch 10, batch 1800, train_loss[loss=3.256, NarTop10Accuracy=0.6767, over 7206.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6596, over 5990.88 frames. ], batch size: 23, lr: 8.47e-03 +2024-08-06 16:37:10,289 INFO [trainer.py:765] (1/8) Epoch 10, batch 1900, train_loss[loss=3.277, NarTop10Accuracy=0.6712, over 6051.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6601, over 6037.49 frames. ], batch size: 51, lr: 8.45e-03 +2024-08-06 16:37:36,089 INFO [trainer.py:765] (1/8) Epoch 10, batch 2000, train_loss[loss=3.254, NarTop10Accuracy=0.6848, over 6348.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6602, over 6013.65 frames. ], batch size: 50, lr: 8.44e-03 +2024-08-06 16:38:01,650 INFO [trainer.py:765] (1/8) Epoch 10, batch 2100, train_loss[loss=3.328, NarTop10Accuracy=0.6487, over 4851.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6566, over 5986.25 frames. ], batch size: 5, lr: 8.42e-03 +2024-08-06 16:38:27,120 INFO [trainer.py:765] (1/8) Epoch 10, batch 2200, train_loss[loss=3.793, NarTop10Accuracy=0.5644, over 7050.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6558, over 6010.45 frames. ], batch size: 31, lr: 8.41e-03 +2024-08-06 16:38:52,447 INFO [trainer.py:765] (1/8) Epoch 10, batch 2300, train_loss[loss=3.205, NarTop10Accuracy=0.6916, over 5628.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6544, over 6014.34 frames. ], batch size: 9, lr: 8.39e-03 +2024-08-06 16:39:17,005 INFO [trainer.py:765] (1/8) Epoch 10, batch 2400, train_loss[loss=3.289, NarTop10Accuracy=0.6684, over 5211.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6605, over 5777.78 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 16:39:40,801 INFO [trainer.py:765] (1/8) Epoch 10, batch 2500, train_loss[loss=3.5, NarTop10Accuracy=0.6214, over 5151.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6658, over 5465.18 frames. ], batch size: 7, lr: 8.36e-03 +2024-08-06 16:40:00,839 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 16:41:06,234 INFO [trainer.py:765] (1/8) Epoch 11, batch 100, train_loss[loss=3.593, NarTop10Accuracy=0.6093, over 7272.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6553, over 2361.87 frames. ], batch size: 31, lr: 7.97e-03 +2024-08-06 16:41:39,020 INFO [trainer.py:765] (1/8) Epoch 11, batch 200, train_loss[loss=3.649, NarTop10Accuracy=0.5854, over 6897.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6586, over 3853.59 frames. ], batch size: 17, lr: 7.95e-03 +2024-08-06 16:41:53,189 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 16:42:01,355 INFO [trainer.py:811] (1/8) Epoch 11, validation: loss=3.116, NarTop10Accuracy=0.7034, over 1905321.00 frames. +2024-08-06 16:42:01,356 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 16:42:01,879 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 1.889e+02 2.046e+02 2.249e+02 5.417e+02, threshold=4.093e+02, percent-clipped=0.2 +2024-08-06 16:42:17,975 INFO [trainer.py:765] (1/8) Epoch 11, batch 300, train_loss[loss=3.073, NarTop10Accuracy=0.7081, over 7104.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6646, over 4655.64 frames. ], batch size: 22, lr: 7.94e-03 +2024-08-06 16:42:55,154 INFO [trainer.py:765] (1/8) Epoch 11, batch 400, train_loss[loss=3.346, NarTop10Accuracy=0.6546, over 5226.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6657, over 5117.73 frames. ], batch size: 7, lr: 7.92e-03 +2024-08-06 16:43:25,719 INFO [trainer.py:765] (1/8) Epoch 11, batch 500, train_loss[loss=3.06, NarTop10Accuracy=0.7167, over 6153.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6682, over 5389.60 frames. ], batch size: 11, lr: 7.91e-03 +2024-08-06 16:44:02,242 INFO [trainer.py:765] (1/8) Epoch 11, batch 600, train_loss[loss=3.625, NarTop10Accuracy=0.5978, over 5685.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6662, over 5647.37 frames. ], batch size: 9, lr: 7.89e-03 +2024-08-06 16:44:35,716 INFO [trainer.py:765] (1/8) Epoch 11, batch 700, train_loss[loss=3.56, NarTop10Accuracy=0.6041, over 4980.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6662, over 5708.29 frames. ], batch size: 6, lr: 7.88e-03 +2024-08-06 16:45:10,468 INFO [trainer.py:765] (1/8) Epoch 11, batch 800, train_loss[loss=3.016, NarTop10Accuracy=0.7294, over 4986.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6641, over 5759.23 frames. ], batch size: 6, lr: 7.86e-03 +2024-08-06 16:45:46,457 INFO [trainer.py:765] (1/8) Epoch 11, batch 900, train_loss[loss=3.531, NarTop10Accuracy=0.6127, over 6351.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6644, over 5784.54 frames. ], batch size: 13, lr: 7.85e-03 +2024-08-06 16:46:20,311 INFO [trainer.py:765] (1/8) Epoch 11, batch 1000, train_loss[loss=3.318, NarTop10Accuracy=0.6599, over 6270.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.6644, over 5882.70 frames. ], batch size: 13, lr: 7.84e-03 +2024-08-06 16:46:53,456 INFO [trainer.py:765] (1/8) Epoch 11, batch 1100, train_loss[loss=3.075, NarTop10Accuracy=0.7152, over 6921.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6661, over 5889.58 frames. ], batch size: 17, lr: 7.82e-03 +2024-08-06 16:47:33,030 INFO [trainer.py:765] (1/8) Epoch 11, batch 1200, train_loss[loss=3.43, NarTop10Accuracy=0.6398, over 7323.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6642, over 5918.35 frames. ], batch size: 31, lr: 7.81e-03 +2024-08-06 16:48:06,482 INFO [trainer.py:765] (1/8) Epoch 11, batch 1300, train_loss[loss=3.101, NarTop10Accuracy=0.7111, over 5127.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6631, over 5983.10 frames. ], batch size: 6, lr: 7.79e-03 +2024-08-06 16:48:41,356 INFO [trainer.py:765] (1/8) Epoch 11, batch 1400, train_loss[loss=3.429, NarTop10Accuracy=0.6382, over 6231.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6595, over 6003.59 frames. ], batch size: 11, lr: 7.78e-03 +2024-08-06 16:49:09,344 INFO [trainer.py:765] (1/8) Epoch 11, batch 1500, train_loss[loss=3.213, NarTop10Accuracy=0.6838, over 5784.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6605, over 5943.35 frames. ], batch size: 50, lr: 7.77e-03 +2024-08-06 16:49:37,103 INFO [trainer.py:765] (1/8) Epoch 11, batch 1600, train_loss[loss=3.306, NarTop10Accuracy=0.6588, over 7233.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6642, over 5906.35 frames. ], batch size: 22, lr: 7.75e-03 +2024-08-06 16:50:03,792 INFO [trainer.py:765] (1/8) Epoch 11, batch 1700, train_loss[loss=3.376, NarTop10Accuracy=0.6382, over 6774.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6645, over 5914.30 frames. ], batch size: 14, lr: 7.74e-03 +2024-08-06 16:50:30,352 INFO [trainer.py:765] (1/8) Epoch 11, batch 1800, train_loss[loss=3.317, NarTop10Accuracy=0.6616, over 6888.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6625, over 5978.58 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 16:50:56,821 INFO [trainer.py:765] (1/8) Epoch 11, batch 1900, train_loss[loss=3.838, NarTop10Accuracy=0.5612, over 5832.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6608, over 6012.19 frames. ], batch size: 50, lr: 7.71e-03 +2024-08-06 16:51:22,404 INFO [trainer.py:765] (1/8) Epoch 11, batch 2000, train_loss[loss=3.944, NarTop10Accuracy=0.5317, over 5844.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6621, over 5994.67 frames. ], batch size: 50, lr: 7.70e-03 +2024-08-06 16:51:47,794 INFO [trainer.py:765] (1/8) Epoch 11, batch 2100, train_loss[loss=2.96, NarTop10Accuracy=0.7239, over 4866.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6639, over 5979.62 frames. ], batch size: 5, lr: 7.68e-03 +2024-08-06 16:52:13,118 INFO [trainer.py:765] (1/8) Epoch 11, batch 2200, train_loss[loss=3.28, NarTop10Accuracy=0.6728, over 7473.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6646, over 6017.56 frames. ], batch size: 32, lr: 7.67e-03 +2024-08-06 16:52:23,898 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 16:52:32,079 INFO [trainer.py:811] (1/8) Epoch 11, validation: loss=3.101, NarTop10Accuracy=0.7058, over 1905321.00 frames. +2024-08-06 16:52:32,080 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 16:52:32,593 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.920e+02 2.088e+02 2.244e+02 3.599e+02, threshold=4.177e+02, percent-clipped=0.0 +2024-08-06 16:52:46,445 INFO [trainer.py:765] (1/8) Epoch 11, batch 2300, train_loss[loss=3.168, NarTop10Accuracy=0.6956, over 5694.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6635, over 6008.73 frames. ], batch size: 9, lr: 7.66e-03 +2024-08-06 16:53:10,887 INFO [trainer.py:765] (1/8) Epoch 11, batch 2400, train_loss[loss=3.484, NarTop10Accuracy=0.6263, over 5253.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.6651, over 5764.12 frames. ], batch size: 7, lr: 7.64e-03 +2024-08-06 16:53:34,371 INFO [trainer.py:765] (1/8) Epoch 11, batch 2500, train_loss[loss=3.653, NarTop10Accuracy=0.5834, over 5046.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6663, over 5480.73 frames. ], batch size: 7, lr: 7.63e-03 +2024-08-06 16:53:54,404 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 16:54:58,524 INFO [trainer.py:765] (1/8) Epoch 12, batch 100, train_loss[loss=3.63, NarTop10Accuracy=0.5932, over 7413.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6662, over 2371.92 frames. ], batch size: 31, lr: 7.30e-03 +2024-08-06 16:55:32,431 INFO [trainer.py:765] (1/8) Epoch 12, batch 200, train_loss[loss=3.195, NarTop10Accuracy=0.6852, over 6975.00 frames. ], tot_loss[loss=3.272, NarTop10Accuracy=0.6713, over 3864.48 frames. ], batch size: 17, lr: 7.29e-03 +2024-08-06 16:56:05,095 INFO [trainer.py:765] (1/8) Epoch 12, batch 300, train_loss[loss=3.116, NarTop10Accuracy=0.709, over 6963.00 frames. ], tot_loss[loss=3.245, NarTop10Accuracy=0.6764, over 4655.60 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 16:56:36,425 INFO [trainer.py:765] (1/8) Epoch 12, batch 400, train_loss[loss=3.14, NarTop10Accuracy=0.7092, over 5103.00 frames. ], tot_loss[loss=3.261, NarTop10Accuracy=0.6741, over 5095.26 frames. ], batch size: 7, lr: 7.26e-03 +2024-08-06 16:57:10,502 INFO [trainer.py:765] (1/8) Epoch 12, batch 500, train_loss[loss=3.563, NarTop10Accuracy=0.6086, over 6048.00 frames. ], tot_loss[loss=3.27, NarTop10Accuracy=0.6722, over 5374.91 frames. ], batch size: 11, lr: 7.25e-03 +2024-08-06 16:57:45,482 INFO [trainer.py:765] (1/8) Epoch 12, batch 600, train_loss[loss=2.97, NarTop10Accuracy=0.7307, over 5694.00 frames. ], tot_loss[loss=3.275, NarTop10Accuracy=0.6712, over 5649.23 frames. ], batch size: 9, lr: 7.24e-03 +2024-08-06 16:58:17,004 INFO [trainer.py:765] (1/8) Epoch 12, batch 700, train_loss[loss=3.541, NarTop10Accuracy=0.6147, over 5196.00 frames. ], tot_loss[loss=3.288, NarTop10Accuracy=0.6683, over 5716.45 frames. ], batch size: 6, lr: 7.22e-03 +2024-08-06 16:58:53,468 INFO [trainer.py:765] (1/8) Epoch 12, batch 800, train_loss[loss=3.365, NarTop10Accuracy=0.6537, over 4308.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6676, over 5779.85 frames. ], batch size: 5, lr: 7.21e-03 +2024-08-06 16:59:27,205 INFO [trainer.py:765] (1/8) Epoch 12, batch 900, train_loss[loss=3.137, NarTop10Accuracy=0.6972, over 6660.00 frames. ], tot_loss[loss=3.271, NarTop10Accuracy=0.6718, over 5792.80 frames. ], batch size: 14, lr: 7.20e-03 +2024-08-06 17:00:01,573 INFO [trainer.py:765] (1/8) Epoch 12, batch 1000, train_loss[loss=2.988, NarTop10Accuracy=0.7309, over 6204.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6684, over 5894.14 frames. ], batch size: 13, lr: 7.19e-03 +2024-08-06 17:00:39,188 INFO [trainer.py:765] (1/8) Epoch 12, batch 1100, train_loss[loss=3.632, NarTop10Accuracy=0.5964, over 6954.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6657, over 5927.51 frames. ], batch size: 17, lr: 7.18e-03 +2024-08-06 17:01:13,963 INFO [trainer.py:765] (1/8) Epoch 12, batch 1200, train_loss[loss=3.162, NarTop10Accuracy=0.7001, over 7317.00 frames. ], tot_loss[loss=3.26, NarTop10Accuracy=0.6737, over 5933.76 frames. ], batch size: 31, lr: 7.17e-03 +2024-08-06 17:01:48,107 INFO [trainer.py:765] (1/8) Epoch 12, batch 1300, train_loss[loss=3.202, NarTop10Accuracy=0.6923, over 4281.00 frames. ], tot_loss[loss=3.279, NarTop10Accuracy=0.6701, over 5996.81 frames. ], batch size: 5, lr: 7.15e-03 +2024-08-06 17:02:22,322 INFO [trainer.py:765] (1/8) Epoch 12, batch 1400, train_loss[loss=3.479, NarTop10Accuracy=0.6256, over 6177.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6674, over 6015.08 frames. ], batch size: 11, lr: 7.14e-03 +2024-08-06 17:02:52,876 INFO [trainer.py:765] (1/8) Epoch 12, batch 1500, train_loss[loss=3.411, NarTop10Accuracy=0.6422, over 5982.00 frames. ], tot_loss[loss=3.267, NarTop10Accuracy=0.6726, over 5967.32 frames. ], batch size: 50, lr: 7.13e-03 +2024-08-06 17:03:20,690 INFO [trainer.py:765] (1/8) Epoch 12, batch 1600, train_loss[loss=3.23, NarTop10Accuracy=0.6801, over 7164.00 frames. ], tot_loss[loss=3.279, NarTop10Accuracy=0.67, over 5926.68 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 17:03:38,296 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 17:03:46,474 INFO [trainer.py:811] (1/8) Epoch 12, validation: loss=3.054, NarTop10Accuracy=0.7153, over 1905321.00 frames. +2024-08-06 17:03:46,475 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 17:03:46,988 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.899e+02 2.078e+02 2.276e+02 5.455e+02, threshold=4.157e+02, percent-clipped=0.1 +2024-08-06 17:03:55,604 INFO [trainer.py:765] (1/8) Epoch 12, batch 1700, train_loss[loss=3.251, NarTop10Accuracy=0.6712, over 6678.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.6689, over 5909.22 frames. ], batch size: 14, lr: 7.11e-03 +2024-08-06 17:04:22,121 INFO [trainer.py:765] (1/8) Epoch 12, batch 1800, train_loss[loss=3.664, NarTop10Accuracy=0.5906, over 7416.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6684, over 5979.99 frames. ], batch size: 23, lr: 7.10e-03 +2024-08-06 17:04:48,591 INFO [trainer.py:765] (1/8) Epoch 12, batch 1900, train_loss[loss=3.258, NarTop10Accuracy=0.6769, over 6306.00 frames. ], tot_loss[loss=3.278, NarTop10Accuracy=0.6706, over 6030.31 frames. ], batch size: 52, lr: 7.08e-03 +2024-08-06 17:05:14,198 INFO [trainer.py:765] (1/8) Epoch 12, batch 2000, train_loss[loss=3.543, NarTop10Accuracy=0.6148, over 5364.00 frames. ], tot_loss[loss=3.269, NarTop10Accuracy=0.6728, over 6002.12 frames. ], batch size: 50, lr: 7.07e-03 +2024-08-06 17:05:39,468 INFO [trainer.py:765] (1/8) Epoch 12, batch 2100, train_loss[loss=3.386, NarTop10Accuracy=0.652, over 3900.00 frames. ], tot_loss[loss=3.274, NarTop10Accuracy=0.6717, over 5968.27 frames. ], batch size: 4, lr: 7.06e-03 +2024-08-06 17:06:04,691 INFO [trainer.py:765] (1/8) Epoch 12, batch 2200, train_loss[loss=3.558, NarTop10Accuracy=0.6142, over 7203.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6679, over 5991.19 frames. ], batch size: 31, lr: 7.05e-03 +2024-08-06 17:06:29,847 INFO [trainer.py:765] (1/8) Epoch 12, batch 2300, train_loss[loss=3.496, NarTop10Accuracy=0.6185, over 5631.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6688, over 6010.09 frames. ], batch size: 9, lr: 7.04e-03 +2024-08-06 17:06:54,200 INFO [trainer.py:765] (1/8) Epoch 12, batch 2400, train_loss[loss=3.33, NarTop10Accuracy=0.6618, over 5250.00 frames. ], tot_loss[loss=3.272, NarTop10Accuracy=0.6711, over 5782.81 frames. ], batch size: 7, lr: 7.03e-03 +2024-08-06 17:07:17,646 INFO [trainer.py:765] (1/8) Epoch 12, batch 2500, train_loss[loss=3.281, NarTop10Accuracy=0.6653, over 5106.00 frames. ], tot_loss[loss=3.259, NarTop10Accuracy=0.6733, over 5465.81 frames. ], batch size: 7, lr: 7.02e-03 +2024-08-06 17:07:37,701 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 17:08:40,078 INFO [trainer.py:765] (1/8) Epoch 13, batch 100, train_loss[loss=3.072, NarTop10Accuracy=0.7097, over 7296.00 frames. ], tot_loss[loss=3.288, NarTop10Accuracy=0.6676, over 2364.01 frames. ], batch size: 31, lr: 6.73e-03 +2024-08-06 17:09:14,119 INFO [trainer.py:765] (1/8) Epoch 13, batch 200, train_loss[loss=3.01, NarTop10Accuracy=0.7333, over 6741.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6686, over 3855.06 frames. ], batch size: 17, lr: 6.72e-03 +2024-08-06 17:09:46,275 INFO [trainer.py:765] (1/8) Epoch 13, batch 300, train_loss[loss=3.569, NarTop10Accuracy=0.6014, over 7038.00 frames. ], tot_loss[loss=3.267, NarTop10Accuracy=0.6724, over 4649.41 frames. ], batch size: 22, lr: 6.71e-03 +2024-08-06 17:10:19,162 INFO [trainer.py:765] (1/8) Epoch 13, batch 400, train_loss[loss=2.977, NarTop10Accuracy=0.7303, over 5169.00 frames. ], tot_loss[loss=3.244, NarTop10Accuracy=0.6771, over 5120.46 frames. ], batch size: 7, lr: 6.70e-03 +2024-08-06 17:10:49,334 INFO [trainer.py:765] (1/8) Epoch 13, batch 500, train_loss[loss=3.168, NarTop10Accuracy=0.6849, over 6069.00 frames. ], tot_loss[loss=3.237, NarTop10Accuracy=0.6788, over 5401.69 frames. ], batch size: 11, lr: 6.69e-03 +2024-08-06 17:11:26,243 INFO [trainer.py:765] (1/8) Epoch 13, batch 600, train_loss[loss=3.01, NarTop10Accuracy=0.7236, over 5718.00 frames. ], tot_loss[loss=3.228, NarTop10Accuracy=0.6806, over 5664.18 frames. ], batch size: 9, lr: 6.68e-03 +2024-08-06 17:11:57,381 INFO [trainer.py:765] (1/8) Epoch 13, batch 700, train_loss[loss=3.095, NarTop10Accuracy=0.7042, over 5211.00 frames. ], tot_loss[loss=3.235, NarTop10Accuracy=0.6788, over 5727.10 frames. ], batch size: 6, lr: 6.67e-03 +2024-08-06 17:12:33,441 INFO [trainer.py:765] (1/8) Epoch 13, batch 800, train_loss[loss=2.972, NarTop10Accuracy=0.7368, over 5127.00 frames. ], tot_loss[loss=3.247, NarTop10Accuracy=0.676, over 5793.48 frames. ], batch size: 6, lr: 6.66e-03 +2024-08-06 17:13:10,030 INFO [trainer.py:765] (1/8) Epoch 13, batch 900, train_loss[loss=3.199, NarTop10Accuracy=0.6902, over 6528.00 frames. ], tot_loss[loss=3.237, NarTop10Accuracy=0.6783, over 5807.22 frames. ], batch size: 14, lr: 6.65e-03 +2024-08-06 17:13:41,442 INFO [trainer.py:765] (1/8) Epoch 13, batch 1000, train_loss[loss=3.55, NarTop10Accuracy=0.6111, over 6672.00 frames. ], tot_loss[loss=3.247, NarTop10Accuracy=0.6765, over 5896.95 frames. ], batch size: 14, lr: 6.64e-03 +2024-08-06 17:14:15,536 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 17:14:23,644 INFO [trainer.py:811] (1/8) Epoch 13, validation: loss=3.099, NarTop10Accuracy=0.7062, over 1905321.00 frames. +2024-08-06 17:14:23,645 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 17:14:24,471 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 1.948e+02 2.091e+02 2.295e+02 3.353e+02, threshold=4.181e+02, percent-clipped=0.0 +2024-08-06 17:14:26,696 INFO [trainer.py:765] (1/8) Epoch 13, batch 1100, train_loss[loss=3.439, NarTop10Accuracy=0.6359, over 6711.00 frames. ], tot_loss[loss=3.255, NarTop10Accuracy=0.6749, over 5933.24 frames. ], batch size: 17, lr: 6.63e-03 +2024-08-06 17:15:03,474 INFO [trainer.py:765] (1/8) Epoch 13, batch 1200, train_loss[loss=3.397, NarTop10Accuracy=0.6512, over 7206.00 frames. ], tot_loss[loss=3.255, NarTop10Accuracy=0.6748, over 5938.95 frames. ], batch size: 31, lr: 6.62e-03 +2024-08-06 17:15:35,513 INFO [trainer.py:765] (1/8) Epoch 13, batch 1300, train_loss[loss=2.753, NarTop10Accuracy=0.768, over 4980.00 frames. ], tot_loss[loss=3.254, NarTop10Accuracy=0.6749, over 5997.70 frames. ], batch size: 6, lr: 6.61e-03 +2024-08-06 17:16:11,782 INFO [trainer.py:765] (1/8) Epoch 13, batch 1400, train_loss[loss=3.156, NarTop10Accuracy=0.6866, over 6180.00 frames. ], tot_loss[loss=3.259, NarTop10Accuracy=0.6739, over 6024.07 frames. ], batch size: 11, lr: 6.60e-03 +2024-08-06 17:16:39,787 INFO [trainer.py:765] (1/8) Epoch 13, batch 1500, train_loss[loss=3.53, NarTop10Accuracy=0.617, over 5760.00 frames. ], tot_loss[loss=3.252, NarTop10Accuracy=0.6749, over 5948.28 frames. ], batch size: 50, lr: 6.59e-03 +2024-08-06 17:17:07,602 INFO [trainer.py:765] (1/8) Epoch 13, batch 1600, train_loss[loss=2.942, NarTop10Accuracy=0.7379, over 7164.00 frames. ], tot_loss[loss=3.259, NarTop10Accuracy=0.6736, over 5930.97 frames. ], batch size: 22, lr: 6.58e-03 +2024-08-06 17:17:34,258 INFO [trainer.py:765] (1/8) Epoch 13, batch 1700, train_loss[loss=3.214, NarTop10Accuracy=0.6813, over 6285.00 frames. ], tot_loss[loss=3.257, NarTop10Accuracy=0.6739, over 5917.87 frames. ], batch size: 13, lr: 6.57e-03 +2024-08-06 17:18:00,761 INFO [trainer.py:765] (1/8) Epoch 13, batch 1800, train_loss[loss=3.084, NarTop10Accuracy=0.7037, over 7131.00 frames. ], tot_loss[loss=3.249, NarTop10Accuracy=0.6759, over 5987.25 frames. ], batch size: 22, lr: 6.56e-03 +2024-08-06 17:18:27,243 INFO [trainer.py:765] (1/8) Epoch 13, batch 1900, train_loss[loss=3.463, NarTop10Accuracy=0.6329, over 5838.00 frames. ], tot_loss[loss=3.251, NarTop10Accuracy=0.6754, over 6031.57 frames. ], batch size: 50, lr: 6.55e-03 +2024-08-06 17:18:52,776 INFO [trainer.py:765] (1/8) Epoch 13, batch 2000, train_loss[loss=3.533, NarTop10Accuracy=0.6199, over 6048.00 frames. ], tot_loss[loss=3.239, NarTop10Accuracy=0.6782, over 5994.36 frames. ], batch size: 50, lr: 6.54e-03 +2024-08-06 17:19:18,147 INFO [trainer.py:765] (1/8) Epoch 13, batch 2100, train_loss[loss=2.944, NarTop10Accuracy=0.7397, over 4710.00 frames. ], tot_loss[loss=3.241, NarTop10Accuracy=0.6779, over 5966.51 frames. ], batch size: 5, lr: 6.53e-03 +2024-08-06 17:19:43,411 INFO [trainer.py:765] (1/8) Epoch 13, batch 2200, train_loss[loss=3.434, NarTop10Accuracy=0.638, over 7269.00 frames. ], tot_loss[loss=3.247, NarTop10Accuracy=0.6766, over 5981.88 frames. ], batch size: 31, lr: 6.52e-03 +2024-08-06 17:20:08,542 INFO [trainer.py:765] (1/8) Epoch 13, batch 2300, train_loss[loss=3.66, NarTop10Accuracy=0.5997, over 5745.00 frames. ], tot_loss[loss=3.263, NarTop10Accuracy=0.6731, over 6012.22 frames. ], batch size: 9, lr: 6.51e-03 +2024-08-06 17:20:32,939 INFO [trainer.py:765] (1/8) Epoch 13, batch 2400, train_loss[loss=3.579, NarTop10Accuracy=0.6069, over 5097.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6796, over 5789.69 frames. ], batch size: 7, lr: 6.50e-03 +2024-08-06 17:20:56,408 INFO [trainer.py:765] (1/8) Epoch 13, batch 2500, train_loss[loss=3.542, NarTop10Accuracy=0.6096, over 5130.00 frames. ], tot_loss[loss=3.218, NarTop10Accuracy=0.682, over 5485.03 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 17:21:16,341 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 17:22:19,315 INFO [trainer.py:765] (1/8) Epoch 14, batch 100, train_loss[loss=3.017, NarTop10Accuracy=0.7295, over 7179.00 frames. ], tot_loss[loss=3.225, NarTop10Accuracy=0.6821, over 2354.25 frames. ], batch size: 31, lr: 6.24e-03 +2024-08-06 17:22:50,379 INFO [trainer.py:765] (1/8) Epoch 14, batch 200, train_loss[loss=3.16, NarTop10Accuracy=0.7008, over 6843.00 frames. ], tot_loss[loss=3.229, NarTop10Accuracy=0.681, over 3859.07 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 17:23:23,880 INFO [trainer.py:765] (1/8) Epoch 14, batch 300, train_loss[loss=3.01, NarTop10Accuracy=0.7253, over 7047.00 frames. ], tot_loss[loss=3.205, NarTop10Accuracy=0.6857, over 4665.20 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 17:23:57,485 INFO [trainer.py:765] (1/8) Epoch 14, batch 400, train_loss[loss=2.913, NarTop10Accuracy=0.7504, over 5310.00 frames. ], tot_loss[loss=3.221, NarTop10Accuracy=0.682, over 5130.13 frames. ], batch size: 7, lr: 6.22e-03 +2024-08-06 17:24:32,114 INFO [trainer.py:765] (1/8) Epoch 14, batch 500, train_loss[loss=3.221, NarTop10Accuracy=0.6801, over 6138.00 frames. ], tot_loss[loss=3.232, NarTop10Accuracy=0.6796, over 5412.28 frames. ], batch size: 11, lr: 6.21e-03 +2024-08-06 17:24:36,213 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 17:24:44,275 INFO [trainer.py:811] (1/8) Epoch 14, validation: loss=3.004, NarTop10Accuracy=0.726, over 1905321.00 frames. +2024-08-06 17:24:44,276 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 17:24:44,822 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 1.969e+02 2.114e+02 2.287e+02 4.406e+02, threshold=4.227e+02, percent-clipped=0.1 +2024-08-06 17:25:12,913 INFO [trainer.py:765] (1/8) Epoch 14, batch 600, train_loss[loss=2.903, NarTop10Accuracy=0.7441, over 5820.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6794, over 5659.32 frames. ], batch size: 9, lr: 6.20e-03 +2024-08-06 17:25:48,547 INFO [trainer.py:765] (1/8) Epoch 14, batch 700, train_loss[loss=3.329, NarTop10Accuracy=0.6639, over 4338.00 frames. ], tot_loss[loss=3.22, NarTop10Accuracy=0.6817, over 5726.36 frames. ], batch size: 5, lr: 6.19e-03 +2024-08-06 17:26:25,278 INFO [trainer.py:765] (1/8) Epoch 14, batch 800, train_loss[loss=2.804, NarTop10Accuracy=0.7605, over 5025.00 frames. ], tot_loss[loss=3.217, NarTop10Accuracy=0.6826, over 5787.83 frames. ], batch size: 6, lr: 6.18e-03 +2024-08-06 17:26:57,658 INFO [trainer.py:765] (1/8) Epoch 14, batch 900, train_loss[loss=3.246, NarTop10Accuracy=0.6775, over 6192.00 frames. ], tot_loss[loss=3.207, NarTop10Accuracy=0.6843, over 5795.53 frames. ], batch size: 13, lr: 6.17e-03 +2024-08-06 17:27:31,716 INFO [trainer.py:765] (1/8) Epoch 14, batch 1000, train_loss[loss=3.485, NarTop10Accuracy=0.638, over 6723.00 frames. ], tot_loss[loss=3.226, NarTop10Accuracy=0.6803, over 5900.30 frames. ], batch size: 14, lr: 6.16e-03 +2024-08-06 17:28:11,596 INFO [trainer.py:765] (1/8) Epoch 14, batch 1100, train_loss[loss=3.006, NarTop10Accuracy=0.7303, over 6960.00 frames. ], tot_loss[loss=3.225, NarTop10Accuracy=0.6806, over 5924.91 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 17:28:40,732 INFO [trainer.py:765] (1/8) Epoch 14, batch 1200, train_loss[loss=3.433, NarTop10Accuracy=0.6351, over 7140.00 frames. ], tot_loss[loss=3.222, NarTop10Accuracy=0.6815, over 5925.45 frames. ], batch size: 31, lr: 6.15e-03 +2024-08-06 17:29:16,213 INFO [trainer.py:765] (1/8) Epoch 14, batch 1300, train_loss[loss=3.382, NarTop10Accuracy=0.651, over 4278.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.6809, over 5989.89 frames. ], batch size: 5, lr: 6.14e-03 +2024-08-06 17:29:54,601 INFO [trainer.py:765] (1/8) Epoch 14, batch 1400, train_loss[loss=3.546, NarTop10Accuracy=0.6247, over 6066.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6795, over 6006.17 frames. ], batch size: 11, lr: 6.13e-03 +2024-08-06 17:30:25,314 INFO [trainer.py:765] (1/8) Epoch 14, batch 1500, train_loss[loss=3.772, NarTop10Accuracy=0.574, over 6429.00 frames. ], tot_loss[loss=3.238, NarTop10Accuracy=0.6782, over 5946.50 frames. ], batch size: 50, lr: 6.12e-03 +2024-08-06 17:30:53,042 INFO [trainer.py:765] (1/8) Epoch 14, batch 1600, train_loss[loss=2.901, NarTop10Accuracy=0.7509, over 6876.00 frames. ], tot_loss[loss=3.228, NarTop10Accuracy=0.6807, over 5945.92 frames. ], batch size: 22, lr: 6.11e-03 +2024-08-06 17:31:19,727 INFO [trainer.py:765] (1/8) Epoch 14, batch 1700, train_loss[loss=3.073, NarTop10Accuracy=0.7175, over 6591.00 frames. ], tot_loss[loss=3.206, NarTop10Accuracy=0.6851, over 5928.04 frames. ], batch size: 14, lr: 6.10e-03 +2024-08-06 17:31:46,288 INFO [trainer.py:765] (1/8) Epoch 14, batch 1800, train_loss[loss=3.001, NarTop10Accuracy=0.7314, over 7092.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.6884, over 5995.16 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 17:32:12,727 INFO [trainer.py:765] (1/8) Epoch 14, batch 1900, train_loss[loss=3.655, NarTop10Accuracy=0.5896, over 6495.00 frames. ], tot_loss[loss=3.207, NarTop10Accuracy=0.685, over 6036.35 frames. ], batch size: 51, lr: 6.09e-03 +2024-08-06 17:32:38,282 INFO [trainer.py:765] (1/8) Epoch 14, batch 2000, train_loss[loss=3.163, NarTop10Accuracy=0.6933, over 5760.00 frames. ], tot_loss[loss=3.209, NarTop10Accuracy=0.6843, over 6006.30 frames. ], batch size: 50, lr: 6.08e-03 +2024-08-06 17:33:03,645 INFO [trainer.py:765] (1/8) Epoch 14, batch 2100, train_loss[loss=3.124, NarTop10Accuracy=0.7017, over 4839.00 frames. ], tot_loss[loss=3.213, NarTop10Accuracy=0.6835, over 5993.62 frames. ], batch size: 5, lr: 6.07e-03 +2024-08-06 17:33:28,998 INFO [trainer.py:765] (1/8) Epoch 14, batch 2200, train_loss[loss=3.386, NarTop10Accuracy=0.6595, over 7281.00 frames. ], tot_loss[loss=3.215, NarTop10Accuracy=0.6836, over 6030.24 frames. ], batch size: 31, lr: 6.06e-03 +2024-08-06 17:33:54,086 INFO [trainer.py:765] (1/8) Epoch 14, batch 2300, train_loss[loss=2.917, NarTop10Accuracy=0.7538, over 5667.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6801, over 6031.36 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 17:34:18,534 INFO [trainer.py:765] (1/8) Epoch 14, batch 2400, train_loss[loss=2.984, NarTop10Accuracy=0.736, over 5193.00 frames. ], tot_loss[loss=3.23, NarTop10Accuracy=0.6802, over 5788.98 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:42,116 INFO [trainer.py:765] (1/8) Epoch 14, batch 2500, train_loss[loss=2.971, NarTop10Accuracy=0.7358, over 5046.00 frames. ], tot_loss[loss=3.199, NarTop10Accuracy=0.6861, over 5482.29 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:45,394 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 17:34:53,209 INFO [trainer.py:811] (1/8) Epoch 14, validation: loss=3.062, NarTop10Accuracy=0.7136, over 1905321.00 frames. +2024-08-06 17:34:53,209 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 17:34:53,679 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 1.975e+02 2.132e+02 2.304e+02 3.875e+02, threshold=4.265e+02, percent-clipped=0.0 +2024-08-06 17:35:09,873 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 17:36:11,738 INFO [trainer.py:765] (1/8) Epoch 15, batch 100, train_loss[loss=3.074, NarTop10Accuracy=0.7111, over 7344.00 frames. ], tot_loss[loss=3.218, NarTop10Accuracy=0.6821, over 2387.57 frames. ], batch size: 31, lr: 5.82e-03 +2024-08-06 17:36:44,334 INFO [trainer.py:765] (1/8) Epoch 15, batch 200, train_loss[loss=3.423, NarTop10Accuracy=0.6349, over 6726.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.6873, over 3876.23 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 17:37:17,714 INFO [trainer.py:765] (1/8) Epoch 15, batch 300, train_loss[loss=3.325, NarTop10Accuracy=0.6655, over 7131.00 frames. ], tot_loss[loss=3.201, NarTop10Accuracy=0.6861, over 4678.81 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 17:37:48,903 INFO [trainer.py:765] (1/8) Epoch 15, batch 400, train_loss[loss=2.877, NarTop10Accuracy=0.7488, over 5058.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6877, over 5130.09 frames. ], batch size: 7, lr: 5.80e-03 +2024-08-06 17:38:22,353 INFO [trainer.py:765] (1/8) Epoch 15, batch 500, train_loss[loss=2.837, NarTop10Accuracy=0.7591, over 6042.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.688, over 5397.69 frames. ], batch size: 11, lr: 5.79e-03 +2024-08-06 17:38:53,093 INFO [trainer.py:765] (1/8) Epoch 15, batch 600, train_loss[loss=3.025, NarTop10Accuracy=0.736, over 5787.00 frames. ], tot_loss[loss=3.201, NarTop10Accuracy=0.6854, over 5662.99 frames. ], batch size: 9, lr: 5.78e-03 +2024-08-06 17:39:27,922 INFO [trainer.py:765] (1/8) Epoch 15, batch 700, train_loss[loss=2.9, NarTop10Accuracy=0.7522, over 5043.00 frames. ], tot_loss[loss=3.204, NarTop10Accuracy=0.6851, over 5741.94 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 17:40:05,564 INFO [trainer.py:765] (1/8) Epoch 15, batch 800, train_loss[loss=3.265, NarTop10Accuracy=0.6675, over 5124.00 frames. ], tot_loss[loss=3.226, NarTop10Accuracy=0.6809, over 5805.10 frames. ], batch size: 6, lr: 5.76e-03 +2024-08-06 17:40:35,790 INFO [trainer.py:765] (1/8) Epoch 15, batch 900, train_loss[loss=3.512, NarTop10Accuracy=0.6159, over 6243.00 frames. ], tot_loss[loss=3.207, NarTop10Accuracy=0.6847, over 5815.91 frames. ], batch size: 13, lr: 5.76e-03 +2024-08-06 17:41:11,250 INFO [trainer.py:765] (1/8) Epoch 15, batch 1000, train_loss[loss=3.097, NarTop10Accuracy=0.7097, over 6711.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6877, over 5908.52 frames. ], batch size: 14, lr: 5.75e-03 +2024-08-06 17:41:46,451 INFO [trainer.py:765] (1/8) Epoch 15, batch 1100, train_loss[loss=3.14, NarTop10Accuracy=0.6997, over 6789.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.6873, over 5942.56 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 17:42:19,456 INFO [trainer.py:765] (1/8) Epoch 15, batch 1200, train_loss[loss=3.411, NarTop10Accuracy=0.6456, over 7470.00 frames. ], tot_loss[loss=3.222, NarTop10Accuracy=0.6809, over 5940.40 frames. ], batch size: 32, lr: 5.73e-03 +2024-08-06 17:42:54,427 INFO [trainer.py:765] (1/8) Epoch 15, batch 1300, train_loss[loss=2.769, NarTop10Accuracy=0.7623, over 5082.00 frames. ], tot_loss[loss=3.203, NarTop10Accuracy=0.6849, over 6004.55 frames. ], batch size: 6, lr: 5.73e-03 +2024-08-06 17:43:26,607 INFO [trainer.py:765] (1/8) Epoch 15, batch 1400, train_loss[loss=3.445, NarTop10Accuracy=0.6432, over 6246.00 frames. ], tot_loss[loss=3.216, NarTop10Accuracy=0.6823, over 6052.40 frames. ], batch size: 11, lr: 5.72e-03 +2024-08-06 17:43:56,557 INFO [trainer.py:765] (1/8) Epoch 15, batch 1500, train_loss[loss=3.133, NarTop10Accuracy=0.7028, over 5895.00 frames. ], tot_loss[loss=3.222, NarTop10Accuracy=0.681, over 5958.59 frames. ], batch size: 51, lr: 5.71e-03 +2024-08-06 17:44:24,241 INFO [trainer.py:765] (1/8) Epoch 15, batch 1600, train_loss[loss=3.493, NarTop10Accuracy=0.6128, over 7035.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6864, over 5916.91 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 17:44:50,855 INFO [trainer.py:765] (1/8) Epoch 15, batch 1700, train_loss[loss=3.107, NarTop10Accuracy=0.7083, over 6240.00 frames. ], tot_loss[loss=3.184, NarTop10Accuracy=0.6892, over 5918.92 frames. ], batch size: 13, lr: 5.70e-03 +2024-08-06 17:45:17,293 INFO [trainer.py:765] (1/8) Epoch 15, batch 1800, train_loss[loss=3.226, NarTop10Accuracy=0.6794, over 7089.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.6885, over 5986.19 frames. ], batch size: 22, lr: 5.69e-03 +2024-08-06 17:45:43,678 INFO [trainer.py:765] (1/8) Epoch 15, batch 1900, train_loss[loss=3.093, NarTop10Accuracy=0.7051, over 6411.00 frames. ], tot_loss[loss=3.213, NarTop10Accuracy=0.6836, over 6020.29 frames. ], batch size: 51, lr: 5.68e-03 +2024-08-06 17:45:53,539 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 17:46:01,742 INFO [trainer.py:811] (1/8) Epoch 15, validation: loss=3.006, NarTop10Accuracy=0.725, over 1905321.00 frames. +2024-08-06 17:46:01,743 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 17:46:02,216 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.004e+02 2.149e+02 2.324e+02 3.721e+02, threshold=4.298e+02, percent-clipped=0.0 +2024-08-06 17:46:17,371 INFO [trainer.py:765] (1/8) Epoch 15, batch 2000, train_loss[loss=3.229, NarTop10Accuracy=0.6848, over 6093.00 frames. ], tot_loss[loss=3.205, NarTop10Accuracy=0.685, over 6008.38 frames. ], batch size: 50, lr: 5.67e-03 +2024-08-06 17:46:42,773 INFO [trainer.py:765] (1/8) Epoch 15, batch 2100, train_loss[loss=3.102, NarTop10Accuracy=0.6973, over 4032.00 frames. ], tot_loss[loss=3.201, NarTop10Accuracy=0.6856, over 5990.29 frames. ], batch size: 4, lr: 5.67e-03 +2024-08-06 17:47:08,033 INFO [trainer.py:765] (1/8) Epoch 15, batch 2200, train_loss[loss=3.023, NarTop10Accuracy=0.716, over 7311.00 frames. ], tot_loss[loss=3.206, NarTop10Accuracy=0.6844, over 6020.65 frames. ], batch size: 32, lr: 5.66e-03 +2024-08-06 17:47:33,291 INFO [trainer.py:765] (1/8) Epoch 15, batch 2300, train_loss[loss=3.627, NarTop10Accuracy=0.5949, over 5739.00 frames. ], tot_loss[loss=3.213, NarTop10Accuracy=0.683, over 6037.34 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 17:47:57,640 INFO [trainer.py:765] (1/8) Epoch 15, batch 2400, train_loss[loss=3.272, NarTop10Accuracy=0.6752, over 5106.00 frames. ], tot_loss[loss=3.191, NarTop10Accuracy=0.6875, over 5798.45 frames. ], batch size: 7, lr: 5.65e-03 +2024-08-06 17:48:21,161 INFO [trainer.py:765] (1/8) Epoch 15, batch 2500, train_loss[loss=2.956, NarTop10Accuracy=0.731, over 5118.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6928, over 5487.58 frames. ], batch size: 7, lr: 5.64e-03 +2024-08-06 17:48:41,073 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 17:49:41,220 INFO [trainer.py:765] (1/8) Epoch 16, batch 100, train_loss[loss=3.328, NarTop10Accuracy=0.6614, over 7203.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6929, over 2363.19 frames. ], batch size: 31, lr: 5.45e-03 +2024-08-06 17:50:12,157 INFO [trainer.py:765] (1/8) Epoch 16, batch 200, train_loss[loss=2.913, NarTop10Accuracy=0.7433, over 6840.00 frames. ], tot_loss[loss=3.203, NarTop10Accuracy=0.6854, over 3868.00 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 17:50:45,158 INFO [trainer.py:765] (1/8) Epoch 16, batch 300, train_loss[loss=3.115, NarTop10Accuracy=0.7049, over 7098.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6883, over 4650.98 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 17:51:15,975 INFO [trainer.py:765] (1/8) Epoch 16, batch 400, train_loss[loss=3.613, NarTop10Accuracy=0.5999, over 5172.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6868, over 5104.33 frames. ], batch size: 7, lr: 5.43e-03 +2024-08-06 17:51:50,323 INFO [trainer.py:765] (1/8) Epoch 16, batch 500, train_loss[loss=3.036, NarTop10Accuracy=0.7243, over 6042.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6882, over 5391.03 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 17:52:24,251 INFO [trainer.py:765] (1/8) Epoch 16, batch 600, train_loss[loss=2.925, NarTop10Accuracy=0.7424, over 5811.00 frames. ], tot_loss[loss=3.195, NarTop10Accuracy=0.6867, over 5654.88 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 17:52:55,386 INFO [trainer.py:765] (1/8) Epoch 16, batch 700, train_loss[loss=2.709, NarTop10Accuracy=0.786, over 4953.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6872, over 5743.94 frames. ], batch size: 6, lr: 5.41e-03 +2024-08-06 17:53:33,815 INFO [trainer.py:765] (1/8) Epoch 16, batch 800, train_loss[loss=3.097, NarTop10Accuracy=0.7054, over 5172.00 frames. ], tot_loss[loss=3.184, NarTop10Accuracy=0.6889, over 5810.50 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 17:54:03,922 INFO [trainer.py:765] (1/8) Epoch 16, batch 900, train_loss[loss=3.523, NarTop10Accuracy=0.61, over 6309.00 frames. ], tot_loss[loss=3.175, NarTop10Accuracy=0.6908, over 5829.65 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 17:54:37,607 INFO [trainer.py:765] (1/8) Epoch 16, batch 1000, train_loss[loss=3.011, NarTop10Accuracy=0.7188, over 6717.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6928, over 5927.61 frames. ], batch size: 14, lr: 5.39e-03 +2024-08-06 17:55:17,196 INFO [trainer.py:765] (1/8) Epoch 16, batch 1100, train_loss[loss=3.119, NarTop10Accuracy=0.7024, over 6867.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6864, over 5947.83 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 17:55:46,209 INFO [trainer.py:765] (1/8) Epoch 16, batch 1200, train_loss[loss=3.474, NarTop10Accuracy=0.6289, over 7284.00 frames. ], tot_loss[loss=3.206, NarTop10Accuracy=0.6842, over 5947.59 frames. ], batch size: 32, lr: 5.37e-03 +2024-08-06 17:56:22,774 INFO [trainer.py:765] (1/8) Epoch 16, batch 1300, train_loss[loss=3.653, NarTop10Accuracy=0.6036, over 4296.00 frames. ], tot_loss[loss=3.203, NarTop10Accuracy=0.685, over 5999.23 frames. ], batch size: 5, lr: 5.37e-03 +2024-08-06 17:56:44,647 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 17:56:53,428 INFO [trainer.py:811] (1/8) Epoch 16, validation: loss=3.112, NarTop10Accuracy=0.703, over 1905321.00 frames. +2024-08-06 17:56:53,429 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 17:56:54,007 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 1.974e+02 2.136e+02 2.310e+02 5.351e+02, threshold=4.271e+02, percent-clipped=0.2 +2024-08-06 17:57:06,171 INFO [trainer.py:765] (1/8) Epoch 16, batch 1400, train_loss[loss=3.124, NarTop10Accuracy=0.7051, over 6063.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6867, over 6030.86 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 17:57:34,033 INFO [trainer.py:765] (1/8) Epoch 16, batch 1500, train_loss[loss=3.404, NarTop10Accuracy=0.6554, over 6090.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.6868, over 5945.41 frames. ], batch size: 51, lr: 5.35e-03 +2024-08-06 17:58:01,775 INFO [trainer.py:765] (1/8) Epoch 16, batch 1600, train_loss[loss=2.969, NarTop10Accuracy=0.7391, over 7086.00 frames. ], tot_loss[loss=3.187, NarTop10Accuracy=0.6886, over 5931.44 frames. ], batch size: 22, lr: 5.35e-03 +2024-08-06 17:58:28,475 INFO [trainer.py:765] (1/8) Epoch 16, batch 1700, train_loss[loss=2.997, NarTop10Accuracy=0.7299, over 6162.00 frames. ], tot_loss[loss=3.199, NarTop10Accuracy=0.6859, over 5925.87 frames. ], batch size: 13, lr: 5.34e-03 +2024-08-06 17:58:54,976 INFO [trainer.py:765] (1/8) Epoch 16, batch 1800, train_loss[loss=3.005, NarTop10Accuracy=0.7232, over 7209.00 frames. ], tot_loss[loss=3.183, NarTop10Accuracy=0.6896, over 5980.67 frames. ], batch size: 22, lr: 5.33e-03 +2024-08-06 17:59:21,360 INFO [trainer.py:765] (1/8) Epoch 16, batch 1900, train_loss[loss=3.542, NarTop10Accuracy=0.6178, over 6054.00 frames. ], tot_loss[loss=3.205, NarTop10Accuracy=0.6849, over 6027.51 frames. ], batch size: 50, lr: 5.33e-03 +2024-08-06 17:59:46,857 INFO [trainer.py:765] (1/8) Epoch 16, batch 2000, train_loss[loss=3.138, NarTop10Accuracy=0.6946, over 6213.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.6913, over 5992.06 frames. ], batch size: 51, lr: 5.32e-03 +2024-08-06 18:00:12,117 INFO [trainer.py:765] (1/8) Epoch 16, batch 2100, train_loss[loss=3.376, NarTop10Accuracy=0.6416, over 3888.00 frames. ], tot_loss[loss=3.198, NarTop10Accuracy=0.6858, over 5960.54 frames. ], batch size: 4, lr: 5.32e-03 +2024-08-06 18:00:37,333 INFO [trainer.py:765] (1/8) Epoch 16, batch 2200, train_loss[loss=3.215, NarTop10Accuracy=0.6842, over 7086.00 frames. ], tot_loss[loss=3.214, NarTop10Accuracy=0.6827, over 6008.73 frames. ], batch size: 31, lr: 5.31e-03 +2024-08-06 18:01:02,502 INFO [trainer.py:765] (1/8) Epoch 16, batch 2300, train_loss[loss=3.008, NarTop10Accuracy=0.7289, over 6216.00 frames. ], tot_loss[loss=3.217, NarTop10Accuracy=0.6822, over 6012.61 frames. ], batch size: 10, lr: 5.30e-03 +2024-08-06 18:01:26,883 INFO [trainer.py:765] (1/8) Epoch 16, batch 2400, train_loss[loss=2.958, NarTop10Accuracy=0.7352, over 5151.00 frames. ], tot_loss[loss=3.201, NarTop10Accuracy=0.6857, over 5790.77 frames. ], batch size: 7, lr: 5.30e-03 +2024-08-06 18:01:50,405 INFO [trainer.py:765] (1/8) Epoch 16, batch 2500, train_loss[loss=2.919, NarTop10Accuracy=0.7405, over 5097.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6915, over 5477.19 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 18:02:10,742 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 18:03:08,531 INFO [trainer.py:765] (1/8) Epoch 17, batch 100, train_loss[loss=3.142, NarTop10Accuracy=0.693, over 7143.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7017, over 2370.79 frames. ], batch size: 31, lr: 5.12e-03 +2024-08-06 18:03:45,145 INFO [trainer.py:765] (1/8) Epoch 17, batch 200, train_loss[loss=3.494, NarTop10Accuracy=0.625, over 6792.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.6981, over 3864.32 frames. ], batch size: 17, lr: 5.12e-03 +2024-08-06 18:04:19,591 INFO [trainer.py:765] (1/8) Epoch 17, batch 300, train_loss[loss=3.284, NarTop10Accuracy=0.6668, over 7221.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6943, over 4675.44 frames. ], batch size: 22, lr: 5.11e-03 +2024-08-06 18:04:48,402 INFO [trainer.py:765] (1/8) Epoch 17, batch 400, train_loss[loss=3.391, NarTop10Accuracy=0.645, over 4995.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6923, over 5113.08 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 18:05:24,680 INFO [trainer.py:765] (1/8) Epoch 17, batch 500, train_loss[loss=2.837, NarTop10Accuracy=0.7583, over 6051.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6956, over 5370.10 frames. ], batch size: 11, lr: 5.10e-03 +2024-08-06 18:05:58,739 INFO [trainer.py:765] (1/8) Epoch 17, batch 600, train_loss[loss=3.163, NarTop10Accuracy=0.6813, over 5718.00 frames. ], tot_loss[loss=3.175, NarTop10Accuracy=0.6905, over 5650.56 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 18:06:32,475 INFO [trainer.py:765] (1/8) Epoch 17, batch 700, train_loss[loss=3.025, NarTop10Accuracy=0.7206, over 5097.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6921, over 5714.93 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 18:07:02,725 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 18:07:10,763 INFO [trainer.py:811] (1/8) Epoch 17, validation: loss=3.018, NarTop10Accuracy=0.7223, over 1905321.00 frames. +2024-08-06 18:07:10,764 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 18:07:11,312 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.005e+02 2.161e+02 2.341e+02 3.806e+02, threshold=4.323e+02, percent-clipped=0.0 +2024-08-06 18:07:14,354 INFO [trainer.py:765] (1/8) Epoch 17, batch 800, train_loss[loss=3.006, NarTop10Accuracy=0.7224, over 4977.00 frames. ], tot_loss[loss=3.18, NarTop10Accuracy=0.6898, over 5785.16 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 18:07:49,721 INFO [trainer.py:765] (1/8) Epoch 17, batch 900, train_loss[loss=3.493, NarTop10Accuracy=0.6133, over 6345.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6944, over 5800.61 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 18:08:21,598 INFO [trainer.py:765] (1/8) Epoch 17, batch 1000, train_loss[loss=3.129, NarTop10Accuracy=0.6971, over 6396.00 frames. ], tot_loss[loss=3.167, NarTop10Accuracy=0.6925, over 5893.99 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 18:09:03,106 INFO [trainer.py:765] (1/8) Epoch 17, batch 1100, train_loss[loss=3.057, NarTop10Accuracy=0.7198, over 6591.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6914, over 5928.84 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 18:09:36,746 INFO [trainer.py:765] (1/8) Epoch 17, batch 1200, train_loss[loss=3.185, NarTop10Accuracy=0.692, over 7221.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6925, over 5944.57 frames. ], batch size: 31, lr: 5.06e-03 +2024-08-06 18:10:10,688 INFO [trainer.py:765] (1/8) Epoch 17, batch 1300, train_loss[loss=3.337, NarTop10Accuracy=0.6574, over 5145.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.6913, over 6010.15 frames. ], batch size: 6, lr: 5.05e-03 +2024-08-06 18:10:48,027 INFO [trainer.py:765] (1/8) Epoch 17, batch 1400, train_loss[loss=3.219, NarTop10Accuracy=0.686, over 6099.00 frames. ], tot_loss[loss=3.18, NarTop10Accuracy=0.6899, over 6026.07 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 18:11:19,106 INFO [trainer.py:765] (1/8) Epoch 17, batch 1500, train_loss[loss=3.473, NarTop10Accuracy=0.636, over 6171.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6914, over 5953.50 frames. ], batch size: 50, lr: 5.04e-03 +2024-08-06 18:11:46,855 INFO [trainer.py:765] (1/8) Epoch 17, batch 1600, train_loss[loss=3.097, NarTop10Accuracy=0.7089, over 6921.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6947, over 5933.51 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 18:12:13,509 INFO [trainer.py:765] (1/8) Epoch 17, batch 1700, train_loss[loss=3.54, NarTop10Accuracy=0.6214, over 6783.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6912, over 5905.87 frames. ], batch size: 14, lr: 5.03e-03 +2024-08-06 18:12:40,002 INFO [trainer.py:765] (1/8) Epoch 17, batch 1800, train_loss[loss=2.981, NarTop10Accuracy=0.7315, over 7032.00 frames. ], tot_loss[loss=3.181, NarTop10Accuracy=0.6894, over 5977.94 frames. ], batch size: 22, lr: 5.02e-03 +2024-08-06 18:13:06,380 INFO [trainer.py:765] (1/8) Epoch 17, batch 1900, train_loss[loss=3.166, NarTop10Accuracy=0.6961, over 6189.00 frames. ], tot_loss[loss=3.195, NarTop10Accuracy=0.6868, over 6024.65 frames. ], batch size: 51, lr: 5.01e-03 +2024-08-06 18:13:31,923 INFO [trainer.py:765] (1/8) Epoch 17, batch 2000, train_loss[loss=3.577, NarTop10Accuracy=0.6093, over 6270.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6921, over 6016.57 frames. ], batch size: 53, lr: 5.01e-03 +2024-08-06 18:13:57,228 INFO [trainer.py:765] (1/8) Epoch 17, batch 2100, train_loss[loss=3.017, NarTop10Accuracy=0.7215, over 3930.00 frames. ], tot_loss[loss=3.179, NarTop10Accuracy=0.69, over 5983.52 frames. ], batch size: 4, lr: 5.00e-03 +2024-08-06 18:14:22,435 INFO [trainer.py:765] (1/8) Epoch 17, batch 2200, train_loss[loss=2.997, NarTop10Accuracy=0.7295, over 7338.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6876, over 6024.59 frames. ], batch size: 31, lr: 5.00e-03 +2024-08-06 18:14:47,592 INFO [trainer.py:765] (1/8) Epoch 17, batch 2300, train_loss[loss=2.907, NarTop10Accuracy=0.7476, over 5700.00 frames. ], tot_loss[loss=3.195, NarTop10Accuracy=0.6869, over 6032.69 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 18:15:12,061 INFO [trainer.py:765] (1/8) Epoch 17, batch 2400, train_loss[loss=2.989, NarTop10Accuracy=0.7223, over 5859.00 frames. ], tot_loss[loss=3.184, NarTop10Accuracy=0.6889, over 5788.41 frames. ], batch size: 8, lr: 4.99e-03 +2024-08-06 18:15:35,515 INFO [trainer.py:765] (1/8) Epoch 17, batch 2500, train_loss[loss=2.691, NarTop10Accuracy=0.7814, over 5211.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6911, over 5484.75 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 18:15:55,778 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 18:16:49,908 INFO [trainer.py:765] (1/8) Epoch 18, batch 100, train_loss[loss=3.094, NarTop10Accuracy=0.7083, over 7335.00 frames. ], tot_loss[loss=3.176, NarTop10Accuracy=0.6905, over 2365.54 frames. ], batch size: 31, lr: 4.83e-03 +2024-08-06 18:17:24,749 INFO [trainer.py:765] (1/8) Epoch 18, batch 200, train_loss[loss=3.03, NarTop10Accuracy=0.7206, over 6744.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6946, over 3853.62 frames. ], batch size: 17, lr: 4.83e-03 +2024-08-06 18:17:27,716 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 18:17:35,926 INFO [trainer.py:811] (1/8) Epoch 18, validation: loss=3.062, NarTop10Accuracy=0.7137, over 1905321.00 frames. +2024-08-06 18:17:35,927 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 18:17:36,528 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.024e+02 2.164e+02 2.334e+02 7.024e+02, threshold=4.329e+02, percent-clipped=0.1 +2024-08-06 18:18:06,912 INFO [trainer.py:765] (1/8) Epoch 18, batch 300, train_loss[loss=3.362, NarTop10Accuracy=0.6515, over 7179.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6929, over 4654.49 frames. ], batch size: 22, lr: 4.82e-03 +2024-08-06 18:18:38,183 INFO [trainer.py:765] (1/8) Epoch 18, batch 400, train_loss[loss=3.35, NarTop10Accuracy=0.6528, over 5106.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6938, over 5089.18 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 18:19:13,599 INFO [trainer.py:765] (1/8) Epoch 18, batch 500, train_loss[loss=3.058, NarTop10Accuracy=0.7189, over 6126.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6938, over 5364.59 frames. ], batch size: 11, lr: 4.81e-03 +2024-08-06 18:19:48,151 INFO [trainer.py:765] (1/8) Epoch 18, batch 600, train_loss[loss=3.305, NarTop10Accuracy=0.6662, over 5721.00 frames. ], tot_loss[loss=3.156, NarTop10Accuracy=0.6944, over 5638.63 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 18:20:23,869 INFO [trainer.py:765] (1/8) Epoch 18, batch 700, train_loss[loss=3.485, NarTop10Accuracy=0.6303, over 5118.00 frames. ], tot_loss[loss=3.162, NarTop10Accuracy=0.6933, over 5695.99 frames. ], batch size: 6, lr: 4.80e-03 +2024-08-06 18:21:01,026 INFO [trainer.py:765] (1/8) Epoch 18, batch 800, train_loss[loss=2.712, NarTop10Accuracy=0.7956, over 5043.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6916, over 5759.12 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 18:21:32,408 INFO [trainer.py:765] (1/8) Epoch 18, batch 900, train_loss[loss=2.949, NarTop10Accuracy=0.7402, over 6780.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6965, over 5788.79 frames. ], batch size: 14, lr: 4.79e-03 +2024-08-06 18:22:11,191 INFO [trainer.py:765] (1/8) Epoch 18, batch 1000, train_loss[loss=2.951, NarTop10Accuracy=0.7363, over 6219.00 frames. ], tot_loss[loss=3.162, NarTop10Accuracy=0.6936, over 5894.05 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 18:22:46,969 INFO [trainer.py:765] (1/8) Epoch 18, batch 1100, train_loss[loss=3.28, NarTop10Accuracy=0.6623, over 6825.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6935, over 5939.43 frames. ], batch size: 17, lr: 4.78e-03 +2024-08-06 18:23:18,605 INFO [trainer.py:765] (1/8) Epoch 18, batch 1200, train_loss[loss=3.734, NarTop10Accuracy=0.5641, over 7515.00 frames. ], tot_loss[loss=3.177, NarTop10Accuracy=0.6901, over 5935.71 frames. ], batch size: 32, lr: 4.77e-03 +2024-08-06 18:24:00,099 INFO [trainer.py:765] (1/8) Epoch 18, batch 1300, train_loss[loss=2.894, NarTop10Accuracy=0.7434, over 5049.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6952, over 5984.17 frames. ], batch size: 6, lr: 4.77e-03 +2024-08-06 18:24:29,574 INFO [trainer.py:765] (1/8) Epoch 18, batch 1400, train_loss[loss=2.959, NarTop10Accuracy=0.7383, over 6162.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6945, over 6002.66 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 18:25:00,307 INFO [trainer.py:765] (1/8) Epoch 18, batch 1500, train_loss[loss=3.1, NarTop10Accuracy=0.708, over 6138.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6955, over 5935.01 frames. ], batch size: 50, lr: 4.76e-03 +2024-08-06 18:25:28,085 INFO [trainer.py:765] (1/8) Epoch 18, batch 1600, train_loss[loss=3.131, NarTop10Accuracy=0.7006, over 7089.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6933, over 5917.57 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 18:25:54,688 INFO [trainer.py:765] (1/8) Epoch 18, batch 1700, train_loss[loss=3.071, NarTop10Accuracy=0.7189, over 6588.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6925, over 5891.77 frames. ], batch size: 14, lr: 4.75e-03 +2024-08-06 18:26:21,196 INFO [trainer.py:765] (1/8) Epoch 18, batch 1800, train_loss[loss=3.522, NarTop10Accuracy=0.6257, over 7014.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6933, over 5951.22 frames. ], batch size: 22, lr: 4.74e-03 +2024-08-06 18:26:47,567 INFO [trainer.py:765] (1/8) Epoch 18, batch 1900, train_loss[loss=3.191, NarTop10Accuracy=0.6869, over 6066.00 frames. ], tot_loss[loss=3.177, NarTop10Accuracy=0.6905, over 6007.98 frames. ], batch size: 51, lr: 4.74e-03 +2024-08-06 18:27:13,176 INFO [trainer.py:765] (1/8) Epoch 18, batch 2000, train_loss[loss=3.116, NarTop10Accuracy=0.7008, over 6195.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6935, over 5991.07 frames. ], batch size: 50, lr: 4.73e-03 +2024-08-06 18:27:38,529 INFO [trainer.py:765] (1/8) Epoch 18, batch 2100, train_loss[loss=3.239, NarTop10Accuracy=0.6781, over 3864.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.694, over 5973.91 frames. ], batch size: 4, lr: 4.73e-03 +2024-08-06 18:28:03,812 INFO [trainer.py:765] (1/8) Epoch 18, batch 2200, train_loss[loss=2.979, NarTop10Accuracy=0.7309, over 7236.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6932, over 5999.28 frames. ], batch size: 31, lr: 4.72e-03 +2024-08-06 18:28:06,571 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 18:28:14,649 INFO [trainer.py:811] (1/8) Epoch 18, validation: loss=3.028, NarTop10Accuracy=0.7201, over 1905321.00 frames. +2024-08-06 18:28:14,650 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 18:28:15,147 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.054e+02 2.220e+02 2.384e+02 3.992e+02, threshold=4.441e+02, percent-clipped=0.0 +2024-08-06 18:28:37,096 INFO [trainer.py:765] (1/8) Epoch 18, batch 2300, train_loss[loss=2.895, NarTop10Accuracy=0.7373, over 5616.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6905, over 6026.83 frames. ], batch size: 9, lr: 4.72e-03 +2024-08-06 18:29:01,592 INFO [trainer.py:765] (1/8) Epoch 18, batch 2400, train_loss[loss=2.93, NarTop10Accuracy=0.7374, over 5166.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.695, over 5788.00 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 18:29:25,027 INFO [trainer.py:765] (1/8) Epoch 18, batch 2500, train_loss[loss=2.908, NarTop10Accuracy=0.7391, over 5109.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.6993, over 5490.18 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 18:29:45,357 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 18:30:41,232 INFO [trainer.py:765] (1/8) Epoch 19, batch 100, train_loss[loss=2.978, NarTop10Accuracy=0.7321, over 7623.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6945, over 2361.05 frames. ], batch size: 32, lr: 4.57e-03 +2024-08-06 18:31:15,603 INFO [trainer.py:765] (1/8) Epoch 19, batch 200, train_loss[loss=2.906, NarTop10Accuracy=0.7475, over 6864.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6966, over 3836.37 frames. ], batch size: 17, lr: 4.57e-03 +2024-08-06 18:31:47,468 INFO [trainer.py:765] (1/8) Epoch 19, batch 300, train_loss[loss=3.387, NarTop10Accuracy=0.6395, over 7059.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6993, over 4649.19 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 18:32:20,355 INFO [trainer.py:765] (1/8) Epoch 19, batch 400, train_loss[loss=3.125, NarTop10Accuracy=0.7073, over 5091.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.6997, over 5113.86 frames. ], batch size: 7, lr: 4.56e-03 +2024-08-06 18:32:50,335 INFO [trainer.py:765] (1/8) Epoch 19, batch 500, train_loss[loss=3.048, NarTop10Accuracy=0.7215, over 6081.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6991, over 5405.19 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 18:33:29,610 INFO [trainer.py:765] (1/8) Epoch 19, batch 600, train_loss[loss=3.076, NarTop10Accuracy=0.7108, over 5748.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6965, over 5659.12 frames. ], batch size: 9, lr: 4.55e-03 +2024-08-06 18:34:03,592 INFO [trainer.py:765] (1/8) Epoch 19, batch 700, train_loss[loss=2.974, NarTop10Accuracy=0.7304, over 5052.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6969, over 5721.90 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 18:34:35,179 INFO [trainer.py:765] (1/8) Epoch 19, batch 800, train_loss[loss=3.24, NarTop10Accuracy=0.6688, over 4359.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6957, over 5804.02 frames. ], batch size: 5, lr: 4.54e-03 +2024-08-06 18:35:10,263 INFO [trainer.py:765] (1/8) Epoch 19, batch 900, train_loss[loss=2.889, NarTop10Accuracy=0.7559, over 6174.00 frames. ], tot_loss[loss=3.14, NarTop10Accuracy=0.6976, over 5822.47 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 18:35:48,637 INFO [trainer.py:765] (1/8) Epoch 19, batch 1000, train_loss[loss=3.396, NarTop10Accuracy=0.6538, over 6288.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6967, over 5929.18 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 18:36:20,939 INFO [trainer.py:765] (1/8) Epoch 19, batch 1100, train_loss[loss=2.997, NarTop10Accuracy=0.7282, over 6675.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6942, over 5927.63 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 18:36:57,130 INFO [trainer.py:765] (1/8) Epoch 19, batch 1200, train_loss[loss=3.034, NarTop10Accuracy=0.7212, over 7215.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6912, over 5936.76 frames. ], batch size: 31, lr: 4.52e-03 +2024-08-06 18:37:35,315 INFO [trainer.py:765] (1/8) Epoch 19, batch 1300, train_loss[loss=2.953, NarTop10Accuracy=0.7441, over 5133.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6915, over 5997.63 frames. ], batch size: 6, lr: 4.51e-03 +2024-08-06 18:38:04,680 INFO [trainer.py:765] (1/8) Epoch 19, batch 1400, train_loss[loss=2.794, NarTop10Accuracy=0.7588, over 6003.00 frames. ], tot_loss[loss=3.17, NarTop10Accuracy=0.6917, over 6020.70 frames. ], batch size: 11, lr: 4.51e-03 +2024-08-06 18:38:34,550 INFO [trainer.py:765] (1/8) Epoch 19, batch 1500, train_loss[loss=3.394, NarTop10Accuracy=0.6409, over 5859.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6955, over 5958.68 frames. ], batch size: 50, lr: 4.50e-03 +2024-08-06 18:39:02,312 INFO [trainer.py:765] (1/8) Epoch 19, batch 1600, train_loss[loss=3.423, NarTop10Accuracy=0.6444, over 7047.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6963, over 5942.06 frames. ], batch size: 22, lr: 4.50e-03 +2024-08-06 18:39:11,591 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 18:39:19,795 INFO [trainer.py:811] (1/8) Epoch 19, validation: loss=2.958, NarTop10Accuracy=0.7345, over 1905321.00 frames. +2024-08-06 18:39:19,796 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 18:39:20,378 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.040e+02 2.194e+02 2.364e+02 6.410e+02, threshold=4.387e+02, percent-clipped=0.2 +2024-08-06 18:39:37,192 INFO [trainer.py:765] (1/8) Epoch 19, batch 1700, train_loss[loss=3.467, NarTop10Accuracy=0.6241, over 6663.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6961, over 5933.68 frames. ], batch size: 14, lr: 4.49e-03 +2024-08-06 18:40:03,789 INFO [trainer.py:765] (1/8) Epoch 19, batch 1800, train_loss[loss=3.477, NarTop10Accuracy=0.6231, over 7392.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.696, over 5982.11 frames. ], batch size: 23, lr: 4.49e-03 +2024-08-06 18:40:30,217 INFO [trainer.py:765] (1/8) Epoch 19, batch 1900, train_loss[loss=3.149, NarTop10Accuracy=0.7025, over 5715.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6963, over 6014.38 frames. ], batch size: 51, lr: 4.49e-03 +2024-08-06 18:40:55,793 INFO [trainer.py:765] (1/8) Epoch 19, batch 2000, train_loss[loss=3.26, NarTop10Accuracy=0.6766, over 6228.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6973, over 5987.62 frames. ], batch size: 51, lr: 4.48e-03 +2024-08-06 18:41:21,183 INFO [trainer.py:765] (1/8) Epoch 19, batch 2100, train_loss[loss=3.086, NarTop10Accuracy=0.7126, over 4869.00 frames. ], tot_loss[loss=3.139, NarTop10Accuracy=0.6982, over 5977.69 frames. ], batch size: 5, lr: 4.48e-03 +2024-08-06 18:41:46,455 INFO [trainer.py:765] (1/8) Epoch 19, batch 2200, train_loss[loss=3.232, NarTop10Accuracy=0.6791, over 6987.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6955, over 6000.34 frames. ], batch size: 31, lr: 4.47e-03 +2024-08-06 18:42:11,559 INFO [trainer.py:765] (1/8) Epoch 19, batch 2300, train_loss[loss=3.24, NarTop10Accuracy=0.6706, over 5748.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6921, over 6028.71 frames. ], batch size: 9, lr: 4.47e-03 +2024-08-06 18:42:35,987 INFO [trainer.py:765] (1/8) Epoch 19, batch 2400, train_loss[loss=3.035, NarTop10Accuracy=0.7166, over 5703.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6961, over 5788.06 frames. ], batch size: 8, lr: 4.46e-03 +2024-08-06 18:42:59,690 INFO [trainer.py:765] (1/8) Epoch 19, batch 2500, train_loss[loss=2.921, NarTop10Accuracy=0.7419, over 5166.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6994, over 5496.78 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:43:19,586 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 18:44:22,974 INFO [trainer.py:765] (1/8) Epoch 20, batch 100, train_loss[loss=3.279, NarTop10Accuracy=0.6712, over 7320.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6935, over 2364.09 frames. ], batch size: 31, lr: 4.34e-03 +2024-08-06 18:44:58,379 INFO [trainer.py:765] (1/8) Epoch 20, batch 200, train_loss[loss=3.474, NarTop10Accuracy=0.629, over 6756.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.6999, over 3842.50 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 18:45:32,279 INFO [trainer.py:765] (1/8) Epoch 20, batch 300, train_loss[loss=3.381, NarTop10Accuracy=0.655, over 7362.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.7012, over 4651.97 frames. ], batch size: 23, lr: 4.33e-03 +2024-08-06 18:46:05,128 INFO [trainer.py:765] (1/8) Epoch 20, batch 400, train_loss[loss=2.816, NarTop10Accuracy=0.7688, over 5247.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7005, over 5117.92 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 18:46:35,770 INFO [trainer.py:765] (1/8) Epoch 20, batch 500, train_loss[loss=2.914, NarTop10Accuracy=0.7416, over 6027.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.6996, over 5391.44 frames. ], batch size: 11, lr: 4.32e-03 +2024-08-06 18:47:13,255 INFO [trainer.py:765] (1/8) Epoch 20, batch 600, train_loss[loss=2.953, NarTop10Accuracy=0.7363, over 5901.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.7005, over 5642.81 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 18:47:44,481 INFO [trainer.py:765] (1/8) Epoch 20, batch 700, train_loss[loss=2.669, NarTop10Accuracy=0.7839, over 5145.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7025, over 5703.63 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 18:48:21,016 INFO [trainer.py:765] (1/8) Epoch 20, batch 800, train_loss[loss=2.821, NarTop10Accuracy=0.7697, over 4203.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.699, over 5789.05 frames. ], batch size: 5, lr: 4.31e-03 +2024-08-06 18:48:56,535 INFO [trainer.py:765] (1/8) Epoch 20, batch 900, train_loss[loss=2.894, NarTop10Accuracy=0.7528, over 6561.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7016, over 5813.04 frames. ], batch size: 14, lr: 4.30e-03 +2024-08-06 18:49:29,805 INFO [trainer.py:765] (1/8) Epoch 20, batch 1000, train_loss[loss=3.372, NarTop10Accuracy=0.6527, over 6597.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6953, over 5908.46 frames. ], batch size: 14, lr: 4.30e-03 +2024-08-06 18:49:52,237 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 18:50:00,327 INFO [trainer.py:811] (1/8) Epoch 20, validation: loss=2.962, NarTop10Accuracy=0.7336, over 1905321.00 frames. +2024-08-06 18:50:00,327 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 18:50:00,875 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.061e+02 2.223e+02 2.401e+02 3.871e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 18:50:15,428 INFO [trainer.py:765] (1/8) Epoch 20, batch 1100, train_loss[loss=3.296, NarTop10Accuracy=0.6683, over 6981.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6952, over 5944.50 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 18:50:53,776 INFO [trainer.py:765] (1/8) Epoch 20, batch 1200, train_loss[loss=2.981, NarTop10Accuracy=0.7278, over 7227.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6957, over 5928.93 frames. ], batch size: 31, lr: 4.29e-03 +2024-08-06 18:51:25,130 INFO [trainer.py:765] (1/8) Epoch 20, batch 1300, train_loss[loss=3.195, NarTop10Accuracy=0.6823, over 4380.00 frames. ], tot_loss[loss=3.14, NarTop10Accuracy=0.6972, over 5983.71 frames. ], batch size: 5, lr: 4.29e-03 +2024-08-06 18:51:59,314 INFO [trainer.py:765] (1/8) Epoch 20, batch 1400, train_loss[loss=2.985, NarTop10Accuracy=0.7357, over 6141.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.6985, over 6000.85 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 18:52:32,806 INFO [trainer.py:765] (1/8) Epoch 20, batch 1500, train_loss[loss=3.193, NarTop10Accuracy=0.6826, over 5952.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.6974, over 5953.68 frames. ], batch size: 50, lr: 4.28e-03 +2024-08-06 18:53:00,635 INFO [trainer.py:765] (1/8) Epoch 20, batch 1600, train_loss[loss=2.989, NarTop10Accuracy=0.74, over 7146.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6966, over 5933.47 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 18:53:27,328 INFO [trainer.py:765] (1/8) Epoch 20, batch 1700, train_loss[loss=3.605, NarTop10Accuracy=0.602, over 6696.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6965, over 5924.38 frames. ], batch size: 14, lr: 4.27e-03 +2024-08-06 18:53:53,851 INFO [trainer.py:765] (1/8) Epoch 20, batch 1800, train_loss[loss=3.037, NarTop10Accuracy=0.7239, over 7203.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.6985, over 5995.74 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 18:54:20,316 INFO [trainer.py:765] (1/8) Epoch 20, batch 1900, train_loss[loss=3.162, NarTop10Accuracy=0.6976, over 5580.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6926, over 6016.18 frames. ], batch size: 50, lr: 4.26e-03 +2024-08-06 18:54:45,891 INFO [trainer.py:765] (1/8) Epoch 20, batch 2000, train_loss[loss=3.541, NarTop10Accuracy=0.6184, over 5985.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.693, over 5997.72 frames. ], batch size: 51, lr: 4.26e-03 +2024-08-06 18:55:11,182 INFO [trainer.py:765] (1/8) Epoch 20, batch 2100, train_loss[loss=3.21, NarTop10Accuracy=0.6642, over 4791.00 frames. ], tot_loss[loss=3.156, NarTop10Accuracy=0.6947, over 5965.04 frames. ], batch size: 5, lr: 4.25e-03 +2024-08-06 18:55:36,414 INFO [trainer.py:765] (1/8) Epoch 20, batch 2200, train_loss[loss=2.878, NarTop10Accuracy=0.7514, over 7329.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6953, over 6014.34 frames. ], batch size: 31, lr: 4.25e-03 +2024-08-06 18:56:01,635 INFO [trainer.py:765] (1/8) Epoch 20, batch 2300, train_loss[loss=3.135, NarTop10Accuracy=0.6986, over 5622.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6919, over 6016.30 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 18:56:26,050 INFO [trainer.py:765] (1/8) Epoch 20, batch 2400, train_loss[loss=2.999, NarTop10Accuracy=0.7293, over 5217.00 frames. ], tot_loss[loss=3.156, NarTop10Accuracy=0.694, over 5779.05 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:56:49,566 INFO [trainer.py:765] (1/8) Epoch 20, batch 2500, train_loss[loss=2.938, NarTop10Accuracy=0.7477, over 5118.00 frames. ], tot_loss[loss=3.119, NarTop10Accuracy=0.7018, over 5481.39 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:57:09,563 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 18:58:09,585 INFO [trainer.py:765] (1/8) Epoch 21, batch 100, train_loss[loss=3.356, NarTop10Accuracy=0.6517, over 6987.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7057, over 2350.65 frames. ], batch size: 31, lr: 4.13e-03 +2024-08-06 18:58:40,417 INFO [trainer.py:765] (1/8) Epoch 21, batch 200, train_loss[loss=2.869, NarTop10Accuracy=0.7556, over 6837.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.701, over 3841.29 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 18:59:13,333 INFO [trainer.py:765] (1/8) Epoch 21, batch 300, train_loss[loss=2.82, NarTop10Accuracy=0.7671, over 7014.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7004, over 4651.97 frames. ], batch size: 22, lr: 4.12e-03 +2024-08-06 18:59:48,150 INFO [trainer.py:765] (1/8) Epoch 21, batch 400, train_loss[loss=2.907, NarTop10Accuracy=0.7491, over 5043.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7026, over 5094.16 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 19:00:16,839 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 19:00:25,075 INFO [trainer.py:811] (1/8) Epoch 21, validation: loss=2.992, NarTop10Accuracy=0.7268, over 1905321.00 frames. +2024-08-06 19:00:25,076 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 19:00:25,622 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.071e+02 2.224e+02 2.387e+02 3.839e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 19:00:29,890 INFO [trainer.py:765] (1/8) Epoch 21, batch 500, train_loss[loss=2.816, NarTop10Accuracy=0.7608, over 6234.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7012, over 5371.94 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 19:01:03,328 INFO [trainer.py:765] (1/8) Epoch 21, batch 600, train_loss[loss=3.4, NarTop10Accuracy=0.648, over 5718.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7041, over 5655.48 frames. ], batch size: 9, lr: 4.11e-03 +2024-08-06 19:01:39,388 INFO [trainer.py:765] (1/8) Epoch 21, batch 700, train_loss[loss=2.86, NarTop10Accuracy=0.7529, over 5037.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7034, over 5733.66 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 19:02:18,047 INFO [trainer.py:765] (1/8) Epoch 21, batch 800, train_loss[loss=3.031, NarTop10Accuracy=0.7285, over 5088.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7002, over 5792.60 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 19:02:48,663 INFO [trainer.py:765] (1/8) Epoch 21, batch 900, train_loss[loss=2.937, NarTop10Accuracy=0.7349, over 6174.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7002, over 5824.58 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 19:03:25,800 INFO [trainer.py:765] (1/8) Epoch 21, batch 1000, train_loss[loss=3.063, NarTop10Accuracy=0.7184, over 6306.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.6993, over 5925.82 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 19:04:07,206 INFO [trainer.py:765] (1/8) Epoch 21, batch 1100, train_loss[loss=3.482, NarTop10Accuracy=0.635, over 6765.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.6951, over 5956.81 frames. ], batch size: 17, lr: 4.09e-03 +2024-08-06 19:04:38,462 INFO [trainer.py:765] (1/8) Epoch 21, batch 1200, train_loss[loss=3.34, NarTop10Accuracy=0.6581, over 7443.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6991, over 5937.19 frames. ], batch size: 31, lr: 4.08e-03 +2024-08-06 19:05:15,315 INFO [trainer.py:765] (1/8) Epoch 21, batch 1300, train_loss[loss=2.89, NarTop10Accuracy=0.7419, over 4224.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7039, over 5996.59 frames. ], batch size: 5, lr: 4.08e-03 +2024-08-06 19:05:55,559 INFO [trainer.py:765] (1/8) Epoch 21, batch 1400, train_loss[loss=3.428, NarTop10Accuracy=0.6336, over 6039.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7032, over 6001.79 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 19:06:23,599 INFO [trainer.py:765] (1/8) Epoch 21, batch 1500, train_loss[loss=3.223, NarTop10Accuracy=0.6841, over 6408.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.6999, over 5938.86 frames. ], batch size: 50, lr: 4.07e-03 +2024-08-06 19:06:51,461 INFO [trainer.py:765] (1/8) Epoch 21, batch 1600, train_loss[loss=2.945, NarTop10Accuracy=0.7447, over 6960.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.6991, over 5924.21 frames. ], batch size: 22, lr: 4.07e-03 +2024-08-06 19:07:18,211 INFO [trainer.py:765] (1/8) Epoch 21, batch 1700, train_loss[loss=3.201, NarTop10Accuracy=0.6919, over 6585.00 frames. ], tot_loss[loss=3.139, NarTop10Accuracy=0.6981, over 5920.20 frames. ], batch size: 14, lr: 4.06e-03 +2024-08-06 19:07:44,809 INFO [trainer.py:765] (1/8) Epoch 21, batch 1800, train_loss[loss=2.836, NarTop10Accuracy=0.7518, over 7119.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.6992, over 5972.93 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 19:08:11,369 INFO [trainer.py:765] (1/8) Epoch 21, batch 1900, train_loss[loss=3.657, NarTop10Accuracy=0.595, over 5958.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6968, over 6019.77 frames. ], batch size: 50, lr: 4.06e-03 +2024-08-06 19:08:37,105 INFO [trainer.py:765] (1/8) Epoch 21, batch 2000, train_loss[loss=3.498, NarTop10Accuracy=0.6294, over 6138.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6962, over 5997.44 frames. ], batch size: 50, lr: 4.05e-03 +2024-08-06 19:09:02,507 INFO [trainer.py:765] (1/8) Epoch 21, batch 2100, train_loss[loss=2.758, NarTop10Accuracy=0.7782, over 4827.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6959, over 5970.84 frames. ], batch size: 5, lr: 4.05e-03 +2024-08-06 19:09:27,891 INFO [trainer.py:765] (1/8) Epoch 21, batch 2200, train_loss[loss=2.907, NarTop10Accuracy=0.742, over 7119.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6948, over 6018.16 frames. ], batch size: 31, lr: 4.04e-03 +2024-08-06 19:09:53,222 INFO [trainer.py:765] (1/8) Epoch 21, batch 2300, train_loss[loss=3.111, NarTop10Accuracy=0.6958, over 5769.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.692, over 6030.19 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 19:10:17,596 INFO [trainer.py:765] (1/8) Epoch 21, batch 2400, train_loss[loss=3.34, NarTop10Accuracy=0.6507, over 5205.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6962, over 5771.95 frames. ], batch size: 7, lr: 4.04e-03 +2024-08-06 19:10:37,228 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 19:10:45,275 INFO [trainer.py:811] (1/8) Epoch 21, validation: loss=2.971, NarTop10Accuracy=0.7316, over 1905321.00 frames. +2024-08-06 19:10:45,276 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 19:10:45,741 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.100e+02 2.242e+02 2.407e+02 6.546e+02, threshold=4.484e+02, percent-clipped=0.1 +2024-08-06 19:10:49,272 INFO [trainer.py:765] (1/8) Epoch 21, batch 2500, train_loss[loss=3.387, NarTop10Accuracy=0.6533, over 5091.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.704, over 5460.11 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 19:11:08,908 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 19:12:09,053 INFO [trainer.py:765] (1/8) Epoch 22, batch 100, train_loss[loss=3.055, NarTop10Accuracy=0.7234, over 7206.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7045, over 2370.31 frames. ], batch size: 31, lr: 3.93e-03 +2024-08-06 19:12:44,462 INFO [trainer.py:765] (1/8) Epoch 22, batch 200, train_loss[loss=3.176, NarTop10Accuracy=0.6876, over 6807.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7037, over 3856.56 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 19:13:14,533 INFO [trainer.py:765] (1/8) Epoch 22, batch 300, train_loss[loss=2.954, NarTop10Accuracy=0.7362, over 6885.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.706, over 4651.70 frames. ], batch size: 22, lr: 3.93e-03 +2024-08-06 19:13:49,228 INFO [trainer.py:765] (1/8) Epoch 22, batch 400, train_loss[loss=2.989, NarTop10Accuracy=0.7363, over 5109.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.707, over 5103.93 frames. ], batch size: 7, lr: 3.92e-03 +2024-08-06 19:14:24,850 INFO [trainer.py:765] (1/8) Epoch 22, batch 500, train_loss[loss=3.132, NarTop10Accuracy=0.6949, over 6126.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.706, over 5388.78 frames. ], batch size: 11, lr: 3.92e-03 +2024-08-06 19:14:55,701 INFO [trainer.py:765] (1/8) Epoch 22, batch 600, train_loss[loss=3.114, NarTop10Accuracy=0.7082, over 5706.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6994, over 5650.35 frames. ], batch size: 9, lr: 3.92e-03 +2024-08-06 19:15:30,867 INFO [trainer.py:765] (1/8) Epoch 22, batch 700, train_loss[loss=3.467, NarTop10Accuracy=0.6349, over 4377.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.7, over 5708.94 frames. ], batch size: 5, lr: 3.91e-03 +2024-08-06 19:16:10,664 INFO [trainer.py:765] (1/8) Epoch 22, batch 800, train_loss[loss=2.882, NarTop10Accuracy=0.7503, over 5046.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.7025, over 5791.66 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 19:16:40,952 INFO [trainer.py:765] (1/8) Epoch 22, batch 900, train_loss[loss=3.061, NarTop10Accuracy=0.7137, over 6177.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7022, over 5818.86 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 19:17:16,433 INFO [trainer.py:765] (1/8) Epoch 22, batch 1000, train_loss[loss=3.161, NarTop10Accuracy=0.6928, over 6546.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7035, over 5913.57 frames. ], batch size: 14, lr: 3.90e-03 +2024-08-06 19:17:52,085 INFO [trainer.py:765] (1/8) Epoch 22, batch 1100, train_loss[loss=2.969, NarTop10Accuracy=0.7367, over 6831.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7024, over 5950.03 frames. ], batch size: 17, lr: 3.90e-03 +2024-08-06 19:18:25,926 INFO [trainer.py:765] (1/8) Epoch 22, batch 1200, train_loss[loss=2.927, NarTop10Accuracy=0.745, over 7056.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7061, over 5946.95 frames. ], batch size: 31, lr: 3.89e-03 +2024-08-06 19:19:01,252 INFO [trainer.py:765] (1/8) Epoch 22, batch 1300, train_loss[loss=2.944, NarTop10Accuracy=0.7418, over 5172.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7073, over 6005.49 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 19:19:33,316 INFO [trainer.py:765] (1/8) Epoch 22, batch 1400, train_loss[loss=2.845, NarTop10Accuracy=0.7641, over 6036.00 frames. ], tot_loss[loss=3.106, NarTop10Accuracy=0.7047, over 6026.15 frames. ], batch size: 11, lr: 3.89e-03 +2024-08-06 19:20:03,830 INFO [trainer.py:765] (1/8) Epoch 22, batch 1500, train_loss[loss=3.512, NarTop10Accuracy=0.6193, over 5880.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7043, over 5960.43 frames. ], batch size: 50, lr: 3.88e-03 +2024-08-06 19:20:31,646 INFO [trainer.py:765] (1/8) Epoch 22, batch 1600, train_loss[loss=3.14, NarTop10Accuracy=0.7018, over 7041.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7008, over 5935.41 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 19:20:58,417 INFO [trainer.py:765] (1/8) Epoch 22, batch 1700, train_loss[loss=3.299, NarTop10Accuracy=0.6676, over 6633.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7006, over 5926.97 frames. ], batch size: 14, lr: 3.88e-03 +2024-08-06 19:21:25,010 INFO [trainer.py:765] (1/8) Epoch 22, batch 1800, train_loss[loss=2.885, NarTop10Accuracy=0.7433, over 7203.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7016, over 5991.64 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 19:21:51,372 INFO [trainer.py:765] (1/8) Epoch 22, batch 1900, train_loss[loss=3.092, NarTop10Accuracy=0.7117, over 6291.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.6972, over 6045.79 frames. ], batch size: 52, lr: 3.87e-03 +2024-08-06 19:21:53,109 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 19:22:01,088 INFO [trainer.py:811] (1/8) Epoch 22, validation: loss=3.009, NarTop10Accuracy=0.7241, over 1905321.00 frames. +2024-08-06 19:22:01,089 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 19:22:01,575 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.114e+02 2.276e+02 2.445e+02 4.438e+02, threshold=4.551e+02, percent-clipped=0.0 +2024-08-06 19:22:24,819 INFO [trainer.py:765] (1/8) Epoch 22, batch 2000, train_loss[loss=3.576, NarTop10Accuracy=0.6102, over 6270.00 frames. ], tot_loss[loss=3.119, NarTop10Accuracy=0.7019, over 6016.48 frames. ], batch size: 51, lr: 3.87e-03 +2024-08-06 19:22:50,041 INFO [trainer.py:765] (1/8) Epoch 22, batch 2100, train_loss[loss=3.399, NarTop10Accuracy=0.6428, over 4767.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7032, over 5990.87 frames. ], batch size: 5, lr: 3.86e-03 +2024-08-06 19:23:15,230 INFO [trainer.py:765] (1/8) Epoch 22, batch 2200, train_loss[loss=3.043, NarTop10Accuracy=0.7208, over 7224.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7034, over 6016.39 frames. ], batch size: 31, lr: 3.86e-03 +2024-08-06 19:23:40,315 INFO [trainer.py:765] (1/8) Epoch 22, batch 2300, train_loss[loss=3.022, NarTop10Accuracy=0.7237, over 5655.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7001, over 6039.75 frames. ], batch size: 9, lr: 3.86e-03 +2024-08-06 19:24:04,602 INFO [trainer.py:765] (1/8) Epoch 22, batch 2400, train_loss[loss=3.122, NarTop10Accuracy=0.7047, over 5130.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7026, over 5805.11 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:28,024 INFO [trainer.py:765] (1/8) Epoch 22, batch 2500, train_loss[loss=3.125, NarTop10Accuracy=0.6888, over 5145.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7055, over 5512.87 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:47,564 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 19:25:45,385 INFO [trainer.py:765] (1/8) Epoch 23, batch 100, train_loss[loss=3.058, NarTop10Accuracy=0.7229, over 7374.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.7013, over 2372.04 frames. ], batch size: 31, lr: 3.76e-03 +2024-08-06 19:26:21,309 INFO [trainer.py:765] (1/8) Epoch 23, batch 200, train_loss[loss=3.468, NarTop10Accuracy=0.6343, over 6771.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7019, over 3868.22 frames. ], batch size: 17, lr: 3.76e-03 +2024-08-06 19:26:57,603 INFO [trainer.py:765] (1/8) Epoch 23, batch 300, train_loss[loss=3.019, NarTop10Accuracy=0.7282, over 7212.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7053, over 4667.14 frames. ], batch size: 22, lr: 3.75e-03 +2024-08-06 19:27:26,540 INFO [trainer.py:765] (1/8) Epoch 23, batch 400, train_loss[loss=3.385, NarTop10Accuracy=0.6527, over 5214.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7038, over 5126.27 frames. ], batch size: 7, lr: 3.75e-03 +2024-08-06 19:27:59,712 INFO [trainer.py:765] (1/8) Epoch 23, batch 500, train_loss[loss=3.421, NarTop10Accuracy=0.6277, over 6048.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7033, over 5403.53 frames. ], batch size: 11, lr: 3.75e-03 +2024-08-06 19:28:35,883 INFO [trainer.py:765] (1/8) Epoch 23, batch 600, train_loss[loss=3.399, NarTop10Accuracy=0.6422, over 5739.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7047, over 5651.03 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 19:29:11,366 INFO [trainer.py:765] (1/8) Epoch 23, batch 700, train_loss[loss=3.398, NarTop10Accuracy=0.6473, over 4230.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7067, over 5711.68 frames. ], batch size: 5, lr: 3.74e-03 +2024-08-06 19:29:43,613 INFO [trainer.py:765] (1/8) Epoch 23, batch 800, train_loss[loss=2.653, NarTop10Accuracy=0.7858, over 5145.00 frames. ], tot_loss[loss=3.106, NarTop10Accuracy=0.7045, over 5768.72 frames. ], batch size: 6, lr: 3.74e-03 +2024-08-06 19:30:19,390 INFO [trainer.py:765] (1/8) Epoch 23, batch 900, train_loss[loss=3.332, NarTop10Accuracy=0.653, over 6147.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7067, over 5791.29 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 19:30:58,195 INFO [trainer.py:765] (1/8) Epoch 23, batch 1000, train_loss[loss=3.007, NarTop10Accuracy=0.7231, over 6204.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7083, over 5898.84 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 19:31:31,520 INFO [trainer.py:765] (1/8) Epoch 23, batch 1100, train_loss[loss=3.074, NarTop10Accuracy=0.7172, over 6867.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.708, over 5940.40 frames. ], batch size: 17, lr: 3.73e-03 +2024-08-06 19:32:08,518 INFO [trainer.py:765] (1/8) Epoch 23, batch 1200, train_loss[loss=3.009, NarTop10Accuracy=0.726, over 7377.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7042, over 5941.10 frames. ], batch size: 31, lr: 3.72e-03 +2024-08-06 19:32:46,937 INFO [trainer.py:765] (1/8) Epoch 23, batch 1300, train_loss[loss=3.185, NarTop10Accuracy=0.6892, over 4983.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7039, over 6004.13 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 19:32:56,402 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 19:33:04,722 INFO [trainer.py:811] (1/8) Epoch 23, validation: loss=2.893, NarTop10Accuracy=0.7468, over 1905321.00 frames. +2024-08-06 19:33:04,723 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 19:33:05,262 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.108e+02 2.273e+02 2.457e+02 3.966e+02, threshold=4.546e+02, percent-clipped=0.0 +2024-08-06 19:33:27,409 INFO [trainer.py:765] (1/8) Epoch 23, batch 1400, train_loss[loss=2.746, NarTop10Accuracy=0.7811, over 5997.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7033, over 6024.74 frames. ], batch size: 11, lr: 3.72e-03 +2024-08-06 19:33:58,215 INFO [trainer.py:765] (1/8) Epoch 23, batch 1500, train_loss[loss=3.28, NarTop10Accuracy=0.6757, over 5889.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7062, over 5970.50 frames. ], batch size: 50, lr: 3.71e-03 +2024-08-06 19:34:26,015 INFO [trainer.py:765] (1/8) Epoch 23, batch 1600, train_loss[loss=2.895, NarTop10Accuracy=0.7442, over 7212.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7046, over 5968.89 frames. ], batch size: 23, lr: 3.71e-03 +2024-08-06 19:34:52,783 INFO [trainer.py:765] (1/8) Epoch 23, batch 1700, train_loss[loss=3.234, NarTop10Accuracy=0.6801, over 6606.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.7005, over 5935.44 frames. ], batch size: 14, lr: 3.71e-03 +2024-08-06 19:35:19,262 INFO [trainer.py:765] (1/8) Epoch 23, batch 1800, train_loss[loss=2.949, NarTop10Accuracy=0.7328, over 7092.00 frames. ], tot_loss[loss=3.121, NarTop10Accuracy=0.7011, over 5977.04 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 19:35:45,596 INFO [trainer.py:765] (1/8) Epoch 23, batch 1900, train_loss[loss=3.342, NarTop10Accuracy=0.6545, over 5679.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7001, over 6018.39 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 19:36:11,170 INFO [trainer.py:765] (1/8) Epoch 23, batch 2000, train_loss[loss=3.567, NarTop10Accuracy=0.6056, over 6429.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7041, over 5984.40 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 19:36:36,518 INFO [trainer.py:765] (1/8) Epoch 23, batch 2100, train_loss[loss=3.205, NarTop10Accuracy=0.6827, over 3954.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.703, over 5987.74 frames. ], batch size: 4, lr: 3.69e-03 +2024-08-06 19:37:01,908 INFO [trainer.py:765] (1/8) Epoch 23, batch 2200, train_loss[loss=3.089, NarTop10Accuracy=0.7006, over 7269.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.6998, over 6033.60 frames. ], batch size: 31, lr: 3.69e-03 +2024-08-06 19:37:27,060 INFO [trainer.py:765] (1/8) Epoch 23, batch 2300, train_loss[loss=3.015, NarTop10Accuracy=0.7236, over 5622.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.702, over 6047.55 frames. ], batch size: 9, lr: 3.69e-03 +2024-08-06 19:37:51,425 INFO [trainer.py:765] (1/8) Epoch 23, batch 2400, train_loss[loss=3.007, NarTop10Accuracy=0.7298, over 5106.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.701, over 5795.55 frames. ], batch size: 7, lr: 3.69e-03 +2024-08-06 19:38:15,053 INFO [trainer.py:765] (1/8) Epoch 23, batch 2500, train_loss[loss=3.442, NarTop10Accuracy=0.6356, over 5175.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7057, over 5484.39 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 19:38:35,059 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 19:39:37,631 INFO [trainer.py:765] (1/8) Epoch 24, batch 100, train_loss[loss=3.417, NarTop10Accuracy=0.6408, over 7416.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7029, over 2363.83 frames. ], batch size: 32, lr: 3.60e-03 +2024-08-06 19:40:10,189 INFO [trainer.py:765] (1/8) Epoch 24, batch 200, train_loss[loss=2.839, NarTop10Accuracy=0.7602, over 6840.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7046, over 3868.16 frames. ], batch size: 17, lr: 3.60e-03 +2024-08-06 19:40:40,554 INFO [trainer.py:765] (1/8) Epoch 24, batch 300, train_loss[loss=2.846, NarTop10Accuracy=0.7675, over 7092.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7059, over 4667.96 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 19:41:18,233 INFO [trainer.py:765] (1/8) Epoch 24, batch 400, train_loss[loss=2.834, NarTop10Accuracy=0.7418, over 5220.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7067, over 5114.44 frames. ], batch size: 7, lr: 3.59e-03 +2024-08-06 19:41:50,321 INFO [trainer.py:765] (1/8) Epoch 24, batch 500, train_loss[loss=2.966, NarTop10Accuracy=0.7388, over 6150.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7097, over 5379.61 frames. ], batch size: 11, lr: 3.59e-03 +2024-08-06 19:42:21,451 INFO [trainer.py:765] (1/8) Epoch 24, batch 600, train_loss[loss=2.852, NarTop10Accuracy=0.7545, over 5742.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7082, over 5646.78 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 19:42:52,842 INFO [trainer.py:765] (1/8) Epoch 24, batch 700, train_loss[loss=2.904, NarTop10Accuracy=0.7497, over 5130.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7069, over 5724.88 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 19:43:17,380 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 19:43:25,410 INFO [trainer.py:811] (1/8) Epoch 24, validation: loss=3.021, NarTop10Accuracy=0.7204, over 1905321.00 frames. +2024-08-06 19:43:25,411 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29066MB +2024-08-06 19:43:28,562 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.113e+02 2.282e+02 2.472e+02 2.357e+03, threshold=4.564e+02, percent-clipped=0.2 +2024-08-06 19:43:40,814 INFO [trainer.py:765] (1/8) Epoch 24, batch 800, train_loss[loss=2.874, NarTop10Accuracy=0.7516, over 5025.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7077, over 5785.12 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 19:44:11,409 INFO [trainer.py:765] (1/8) Epoch 24, batch 900, train_loss[loss=2.878, NarTop10Accuracy=0.7496, over 6723.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7082, over 5801.37 frames. ], batch size: 14, lr: 3.57e-03 +2024-08-06 19:44:47,489 INFO [trainer.py:765] (1/8) Epoch 24, batch 1000, train_loss[loss=3.253, NarTop10Accuracy=0.6691, over 6399.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.706, over 5905.91 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 19:45:27,107 INFO [trainer.py:765] (1/8) Epoch 24, batch 1100, train_loss[loss=3.452, NarTop10Accuracy=0.6292, over 6828.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7035, over 5940.87 frames. ], batch size: 17, lr: 3.57e-03 +2024-08-06 19:45:58,437 INFO [trainer.py:765] (1/8) Epoch 24, batch 1200, train_loss[loss=2.948, NarTop10Accuracy=0.7416, over 7578.00 frames. ], tot_loss[loss=3.105, NarTop10Accuracy=0.7046, over 5957.79 frames. ], batch size: 32, lr: 3.57e-03 +2024-08-06 19:46:30,293 INFO [trainer.py:765] (1/8) Epoch 24, batch 1300, train_loss[loss=3.217, NarTop10Accuracy=0.6698, over 4293.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7056, over 6007.72 frames. ], batch size: 5, lr: 3.56e-03 +2024-08-06 19:47:07,859 INFO [trainer.py:765] (1/8) Epoch 24, batch 1400, train_loss[loss=3.314, NarTop10Accuracy=0.6644, over 6150.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7033, over 6017.20 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 19:47:40,956 INFO [trainer.py:765] (1/8) Epoch 24, batch 1500, train_loss[loss=3.402, NarTop10Accuracy=0.6479, over 5919.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7016, over 5965.35 frames. ], batch size: 50, lr: 3.56e-03 +2024-08-06 19:48:08,675 INFO [trainer.py:765] (1/8) Epoch 24, batch 1600, train_loss[loss=3.447, NarTop10Accuracy=0.6357, over 7071.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7003, over 5935.28 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:48:35,266 INFO [trainer.py:765] (1/8) Epoch 24, batch 1700, train_loss[loss=2.858, NarTop10Accuracy=0.7612, over 6531.00 frames. ], tot_loss[loss=3.121, NarTop10Accuracy=0.7012, over 5911.94 frames. ], batch size: 14, lr: 3.55e-03 +2024-08-06 19:49:01,637 INFO [trainer.py:765] (1/8) Epoch 24, batch 1800, train_loss[loss=2.983, NarTop10Accuracy=0.7313, over 7191.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.6999, over 5963.22 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:49:28,041 INFO [trainer.py:765] (1/8) Epoch 24, batch 1900, train_loss[loss=3.613, NarTop10Accuracy=0.6023, over 6222.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.6998, over 6027.63 frames. ], batch size: 50, lr: 3.55e-03 +2024-08-06 19:49:53,533 INFO [trainer.py:765] (1/8) Epoch 24, batch 2000, train_loss[loss=3.506, NarTop10Accuracy=0.6242, over 6528.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.705, over 6007.38 frames. ], batch size: 51, lr: 3.54e-03 +2024-08-06 19:50:18,819 INFO [trainer.py:765] (1/8) Epoch 24, batch 2100, train_loss[loss=3.024, NarTop10Accuracy=0.7188, over 3912.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7049, over 5991.68 frames. ], batch size: 4, lr: 3.54e-03 +2024-08-06 19:50:43,942 INFO [trainer.py:765] (1/8) Epoch 24, batch 2200, train_loss[loss=3.458, NarTop10Accuracy=0.6324, over 6984.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.7045, over 6027.63 frames. ], batch size: 31, lr: 3.54e-03 +2024-08-06 19:51:09,024 INFO [trainer.py:765] (1/8) Epoch 24, batch 2300, train_loss[loss=2.743, NarTop10Accuracy=0.7729, over 5751.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7039, over 6042.53 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 19:51:33,348 INFO [trainer.py:765] (1/8) Epoch 24, batch 2400, train_loss[loss=3.02, NarTop10Accuracy=0.717, over 5229.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7052, over 5789.93 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 19:51:56,782 INFO [trainer.py:765] (1/8) Epoch 24, batch 2500, train_loss[loss=2.914, NarTop10Accuracy=0.7413, over 5724.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7087, over 5501.58 frames. ], batch size: 8, lr: 3.53e-03 +2024-08-06 19:52:17,153 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 19:53:22,196 INFO [trainer.py:765] (1/8) Epoch 25, batch 100, train_loss[loss=3.403, NarTop10Accuracy=0.6454, over 7251.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.709, over 2357.72 frames. ], batch size: 31, lr: 3.45e-03 +2024-08-06 19:53:47,261 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 19:53:55,329 INFO [trainer.py:811] (1/8) Epoch 25, validation: loss=2.96, NarTop10Accuracy=0.7332, over 1905321.00 frames. +2024-08-06 19:53:55,329 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29627MB +2024-08-06 19:53:55,916 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.155e+02 2.306e+02 2.475e+02 6.485e+02, threshold=4.611e+02, percent-clipped=0.1 +2024-08-06 19:54:01,176 INFO [trainer.py:765] (1/8) Epoch 25, batch 200, train_loss[loss=2.845, NarTop10Accuracy=0.7592, over 6822.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.708, over 3831.60 frames. ], batch size: 17, lr: 3.45e-03 +2024-08-06 19:54:35,647 INFO [trainer.py:765] (1/8) Epoch 25, batch 300, train_loss[loss=3.238, NarTop10Accuracy=0.6804, over 6972.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7092, over 4644.72 frames. ], batch size: 22, lr: 3.45e-03 +2024-08-06 19:55:12,958 INFO [trainer.py:765] (1/8) Epoch 25, batch 400, train_loss[loss=2.96, NarTop10Accuracy=0.7419, over 4977.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.709, over 5086.95 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 19:55:43,737 INFO [trainer.py:765] (1/8) Epoch 25, batch 500, train_loss[loss=2.744, NarTop10Accuracy=0.7707, over 6129.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7104, over 5374.66 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 19:56:14,814 INFO [trainer.py:765] (1/8) Epoch 25, batch 600, train_loss[loss=2.823, NarTop10Accuracy=0.761, over 5652.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.71, over 5643.53 frames. ], batch size: 9, lr: 3.44e-03 +2024-08-06 19:56:55,496 INFO [trainer.py:765] (1/8) Epoch 25, batch 700, train_loss[loss=2.708, NarTop10Accuracy=0.7881, over 5016.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7123, over 5709.97 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 19:57:30,135 INFO [trainer.py:765] (1/8) Epoch 25, batch 800, train_loss[loss=2.694, NarTop10Accuracy=0.7819, over 4224.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.71, over 5774.10 frames. ], batch size: 5, lr: 3.43e-03 +2024-08-06 19:58:00,678 INFO [trainer.py:765] (1/8) Epoch 25, batch 900, train_loss[loss=3.19, NarTop10Accuracy=0.6804, over 6588.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7107, over 5796.09 frames. ], batch size: 14, lr: 3.43e-03 +2024-08-06 19:58:37,638 INFO [trainer.py:765] (1/8) Epoch 25, batch 1000, train_loss[loss=2.916, NarTop10Accuracy=0.7413, over 6729.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7073, over 5890.63 frames. ], batch size: 14, lr: 3.43e-03 +2024-08-06 19:59:14,854 INFO [trainer.py:765] (1/8) Epoch 25, batch 1100, train_loss[loss=3.427, NarTop10Accuracy=0.635, over 6840.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7077, over 5939.78 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 19:59:49,038 INFO [trainer.py:765] (1/8) Epoch 25, batch 1200, train_loss[loss=3.434, NarTop10Accuracy=0.6298, over 7062.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7085, over 5940.64 frames. ], batch size: 31, lr: 3.42e-03 +2024-08-06 20:00:25,598 INFO [trainer.py:765] (1/8) Epoch 25, batch 1300, train_loss[loss=2.923, NarTop10Accuracy=0.7507, over 5193.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7083, over 6003.16 frames. ], batch size: 6, lr: 3.42e-03 +2024-08-06 20:01:02,015 INFO [trainer.py:765] (1/8) Epoch 25, batch 1400, train_loss[loss=2.905, NarTop10Accuracy=0.7481, over 6207.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7087, over 6017.69 frames. ], batch size: 11, lr: 3.42e-03 +2024-08-06 20:01:32,822 INFO [trainer.py:765] (1/8) Epoch 25, batch 1500, train_loss[loss=3.226, NarTop10Accuracy=0.6806, over 5853.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7078, over 5945.36 frames. ], batch size: 50, lr: 3.41e-03 +2024-08-06 20:02:00,624 INFO [trainer.py:765] (1/8) Epoch 25, batch 1600, train_loss[loss=2.794, NarTop10Accuracy=0.767, over 7497.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7084, over 5938.76 frames. ], batch size: 23, lr: 3.41e-03 +2024-08-06 20:02:27,358 INFO [trainer.py:765] (1/8) Epoch 25, batch 1700, train_loss[loss=2.919, NarTop10Accuracy=0.736, over 6270.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7076, over 5923.81 frames. ], batch size: 13, lr: 3.41e-03 +2024-08-06 20:02:53,853 INFO [trainer.py:765] (1/8) Epoch 25, batch 1800, train_loss[loss=3.393, NarTop10Accuracy=0.6453, over 6903.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7053, over 5973.77 frames. ], batch size: 22, lr: 3.40e-03 +2024-08-06 20:03:20,340 INFO [trainer.py:765] (1/8) Epoch 25, batch 1900, train_loss[loss=3.149, NarTop10Accuracy=0.701, over 6087.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.703, over 6008.05 frames. ], batch size: 50, lr: 3.40e-03 +2024-08-06 20:03:45,934 INFO [trainer.py:765] (1/8) Epoch 25, batch 2000, train_loss[loss=3.441, NarTop10Accuracy=0.6421, over 6300.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7007, over 5986.77 frames. ], batch size: 50, lr: 3.40e-03 +2024-08-06 20:04:11,245 INFO [trainer.py:765] (1/8) Epoch 25, batch 2100, train_loss[loss=2.728, NarTop10Accuracy=0.7843, over 3825.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7031, over 5962.89 frames. ], batch size: 4, lr: 3.40e-03 +2024-08-06 20:04:31,408 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 20:04:39,343 INFO [trainer.py:811] (1/8) Epoch 25, validation: loss=2.999, NarTop10Accuracy=0.7251, over 1905321.00 frames. +2024-08-06 20:04:39,344 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29627MB +2024-08-06 20:04:39,840 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.185e+02 2.339e+02 2.507e+02 3.640e+02, threshold=4.678e+02, percent-clipped=0.0 +2024-08-06 20:04:44,513 INFO [trainer.py:765] (1/8) Epoch 25, batch 2200, train_loss[loss=3.221, NarTop10Accuracy=0.6766, over 6921.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7021, over 6023.20 frames. ], batch size: 31, lr: 3.39e-03 +2024-08-06 20:05:09,645 INFO [trainer.py:765] (1/8) Epoch 25, batch 2300, train_loss[loss=3.037, NarTop10Accuracy=0.7228, over 5676.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7019, over 6018.11 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 20:05:34,141 INFO [trainer.py:765] (1/8) Epoch 25, batch 2400, train_loss[loss=2.842, NarTop10Accuracy=0.7537, over 5127.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7053, over 5796.04 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:05:57,846 INFO [trainer.py:765] (1/8) Epoch 25, batch 2500, train_loss[loss=2.85, NarTop10Accuracy=0.7566, over 5037.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7108, over 5495.33 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:06:18,019 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 20:07:19,303 INFO [trainer.py:765] (1/8) Epoch 26, batch 100, train_loss[loss=3.017, NarTop10Accuracy=0.7207, over 7428.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7051, over 2364.42 frames. ], batch size: 31, lr: 3.32e-03 +2024-08-06 20:07:52,381 INFO [trainer.py:765] (1/8) Epoch 26, batch 200, train_loss[loss=2.764, NarTop10Accuracy=0.7701, over 6828.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7069, over 3861.79 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 20:08:24,732 INFO [trainer.py:765] (1/8) Epoch 26, batch 300, train_loss[loss=2.942, NarTop10Accuracy=0.7344, over 6837.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.708, over 4660.03 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 20:08:58,184 INFO [trainer.py:765] (1/8) Epoch 26, batch 400, train_loss[loss=3.077, NarTop10Accuracy=0.7163, over 5043.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7077, over 5087.19 frames. ], batch size: 7, lr: 3.31e-03 +2024-08-06 20:09:33,146 INFO [trainer.py:765] (1/8) Epoch 26, batch 500, train_loss[loss=2.809, NarTop10Accuracy=0.7651, over 6024.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7074, over 5369.65 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 20:10:03,890 INFO [trainer.py:765] (1/8) Epoch 26, batch 600, train_loss[loss=2.744, NarTop10Accuracy=0.7745, over 5748.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7115, over 5633.52 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 20:10:39,872 INFO [trainer.py:765] (1/8) Epoch 26, batch 700, train_loss[loss=3.022, NarTop10Accuracy=0.7263, over 5217.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.707, over 5710.08 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 20:11:19,060 INFO [trainer.py:765] (1/8) Epoch 26, batch 800, train_loss[loss=2.958, NarTop10Accuracy=0.7281, over 4347.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.707, over 5762.91 frames. ], batch size: 5, lr: 3.30e-03 +2024-08-06 20:11:49,314 INFO [trainer.py:765] (1/8) Epoch 26, batch 900, train_loss[loss=2.927, NarTop10Accuracy=0.7427, over 6777.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7079, over 5797.55 frames. ], batch size: 14, lr: 3.29e-03 +2024-08-06 20:12:25,972 INFO [trainer.py:765] (1/8) Epoch 26, batch 1000, train_loss[loss=2.713, NarTop10Accuracy=0.7789, over 6609.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7072, over 5918.45 frames. ], batch size: 14, lr: 3.29e-03 +2024-08-06 20:13:06,376 INFO [trainer.py:765] (1/8) Epoch 26, batch 1100, train_loss[loss=3.37, NarTop10Accuracy=0.6495, over 6744.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7059, over 5926.57 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 20:13:37,535 INFO [trainer.py:765] (1/8) Epoch 26, batch 1200, train_loss[loss=3.404, NarTop10Accuracy=0.6437, over 7311.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7085, over 5933.32 frames. ], batch size: 31, lr: 3.29e-03 +2024-08-06 20:14:13,694 INFO [trainer.py:765] (1/8) Epoch 26, batch 1300, train_loss[loss=2.861, NarTop10Accuracy=0.7464, over 5187.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7089, over 5991.39 frames. ], batch size: 6, lr: 3.28e-03 +2024-08-06 20:14:50,538 INFO [trainer.py:765] (1/8) Epoch 26, batch 1400, train_loss[loss=2.788, NarTop10Accuracy=0.7674, over 6060.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7077, over 6017.15 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 20:15:21,154 INFO [trainer.py:765] (1/8) Epoch 26, batch 1500, train_loss[loss=3.19, NarTop10Accuracy=0.6817, over 5979.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7083, over 5937.23 frames. ], batch size: 51, lr: 3.28e-03 +2024-08-06 20:15:48,978 INFO [trainer.py:765] (1/8) Epoch 26, batch 1600, train_loss[loss=3.035, NarTop10Accuracy=0.7225, over 7158.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.71, over 5921.08 frames. ], batch size: 22, lr: 3.28e-03 +2024-08-06 20:15:50,001 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 20:15:58,239 INFO [trainer.py:811] (1/8) Epoch 26, validation: loss=2.899, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 20:15:58,239 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29627MB +2024-08-06 20:15:58,779 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.166e+02 2.322e+02 2.511e+02 3.952e+02, threshold=4.644e+02, percent-clipped=0.0 +2024-08-06 20:16:23,951 INFO [trainer.py:765] (1/8) Epoch 26, batch 1700, train_loss[loss=3.19, NarTop10Accuracy=0.6956, over 6252.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7122, over 5921.63 frames. ], batch size: 13, lr: 3.28e-03 +2024-08-06 20:16:50,426 INFO [trainer.py:765] (1/8) Epoch 26, batch 1800, train_loss[loss=2.76, NarTop10Accuracy=0.7709, over 7245.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7117, over 5990.87 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 20:17:16,839 INFO [trainer.py:765] (1/8) Epoch 26, batch 1900, train_loss[loss=3.022, NarTop10Accuracy=0.7265, over 6249.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7098, over 6019.41 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 20:17:42,379 INFO [trainer.py:765] (1/8) Epoch 26, batch 2000, train_loss[loss=3.582, NarTop10Accuracy=0.6093, over 6162.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7086, over 5996.24 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 20:18:07,563 INFO [trainer.py:765] (1/8) Epoch 26, batch 2100, train_loss[loss=3.133, NarTop10Accuracy=0.71, over 3900.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7058, over 5979.96 frames. ], batch size: 4, lr: 3.27e-03 +2024-08-06 20:18:32,776 INFO [trainer.py:765] (1/8) Epoch 26, batch 2200, train_loss[loss=2.943, NarTop10Accuracy=0.7337, over 7506.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7067, over 6004.32 frames. ], batch size: 31, lr: 3.26e-03 +2024-08-06 20:18:57,897 INFO [trainer.py:765] (1/8) Epoch 26, batch 2300, train_loss[loss=3.165, NarTop10Accuracy=0.6968, over 5604.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7061, over 6019.15 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 20:19:22,205 INFO [trainer.py:765] (1/8) Epoch 26, batch 2400, train_loss[loss=2.718, NarTop10Accuracy=0.7829, over 5019.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.711, over 5773.32 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:19:45,651 INFO [trainer.py:765] (1/8) Epoch 26, batch 2500, train_loss[loss=2.731, NarTop10Accuracy=0.7799, over 5124.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7149, over 5477.79 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:20:06,240 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 20:21:04,873 INFO [trainer.py:765] (1/8) Epoch 27, batch 100, train_loss[loss=3.232, NarTop10Accuracy=0.6787, over 7218.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7088, over 2366.36 frames. ], batch size: 31, lr: 3.19e-03 +2024-08-06 20:21:39,783 INFO [trainer.py:765] (1/8) Epoch 27, batch 200, train_loss[loss=2.765, NarTop10Accuracy=0.7621, over 6684.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7084, over 3856.79 frames. ], batch size: 17, lr: 3.19e-03 +2024-08-06 20:22:13,049 INFO [trainer.py:765] (1/8) Epoch 27, batch 300, train_loss[loss=2.893, NarTop10Accuracy=0.7462, over 7431.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7106, over 4652.56 frames. ], batch size: 23, lr: 3.18e-03 +2024-08-06 20:22:43,556 INFO [trainer.py:765] (1/8) Epoch 27, batch 400, train_loss[loss=2.898, NarTop10Accuracy=0.7441, over 5043.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7111, over 5109.12 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 20:23:18,083 INFO [trainer.py:765] (1/8) Epoch 27, batch 500, train_loss[loss=2.798, NarTop10Accuracy=0.7589, over 6042.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7145, over 5370.90 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 20:23:51,434 INFO [trainer.py:765] (1/8) Epoch 27, batch 600, train_loss[loss=3.244, NarTop10Accuracy=0.6771, over 5760.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7153, over 5646.03 frames. ], batch size: 9, lr: 3.18e-03 +2024-08-06 20:24:24,975 INFO [trainer.py:765] (1/8) Epoch 27, batch 700, train_loss[loss=2.833, NarTop10Accuracy=0.7664, over 5091.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7151, over 5705.19 frames. ], batch size: 6, lr: 3.18e-03 +2024-08-06 20:25:03,407 INFO [trainer.py:765] (1/8) Epoch 27, batch 800, train_loss[loss=3.146, NarTop10Accuracy=0.6959, over 5142.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7108, over 5775.74 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 20:25:34,176 INFO [trainer.py:765] (1/8) Epoch 27, batch 900, train_loss[loss=3.209, NarTop10Accuracy=0.6807, over 6120.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7104, over 5797.37 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 20:26:10,096 INFO [trainer.py:765] (1/8) Epoch 27, batch 1000, train_loss[loss=2.721, NarTop10Accuracy=0.787, over 6681.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7089, over 5902.93 frames. ], batch size: 14, lr: 3.17e-03 +2024-08-06 20:26:18,313 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 20:26:26,346 INFO [trainer.py:811] (1/8) Epoch 27, validation: loss=2.95, NarTop10Accuracy=0.735, over 1905321.00 frames. +2024-08-06 20:26:26,347 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29627MB +2024-08-06 20:26:26,878 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.166e+02 2.331e+02 2.512e+02 4.284e+02, threshold=4.663e+02, percent-clipped=0.0 +2024-08-06 20:26:50,899 INFO [trainer.py:765] (1/8) Epoch 27, batch 1100, train_loss[loss=2.985, NarTop10Accuracy=0.7321, over 6867.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7092, over 5939.25 frames. ], batch size: 17, lr: 3.17e-03 +2024-08-06 20:27:24,545 INFO [trainer.py:765] (1/8) Epoch 27, batch 1200, train_loss[loss=2.813, NarTop10Accuracy=0.7674, over 7578.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7109, over 5934.02 frames. ], batch size: 32, lr: 3.16e-03 +2024-08-06 20:27:58,568 INFO [trainer.py:765] (1/8) Epoch 27, batch 1300, train_loss[loss=2.733, NarTop10Accuracy=0.7733, over 4305.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.712, over 5994.31 frames. ], batch size: 5, lr: 3.16e-03 +2024-08-06 20:28:36,745 INFO [trainer.py:765] (1/8) Epoch 27, batch 1400, train_loss[loss=3.16, NarTop10Accuracy=0.6856, over 6168.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7083, over 6023.75 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 20:29:04,633 INFO [trainer.py:765] (1/8) Epoch 27, batch 1500, train_loss[loss=3.08, NarTop10Accuracy=0.7068, over 5673.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7091, over 5958.39 frames. ], batch size: 50, lr: 3.16e-03 +2024-08-06 20:29:32,362 INFO [trainer.py:765] (1/8) Epoch 27, batch 1600, train_loss[loss=2.917, NarTop10Accuracy=0.7447, over 7251.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.708, over 5935.62 frames. ], batch size: 23, lr: 3.15e-03 +2024-08-06 20:29:58,977 INFO [trainer.py:765] (1/8) Epoch 27, batch 1700, train_loss[loss=3.149, NarTop10Accuracy=0.6894, over 6324.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7092, over 5917.51 frames. ], batch size: 13, lr: 3.15e-03 +2024-08-06 20:30:25,463 INFO [trainer.py:765] (1/8) Epoch 27, batch 1800, train_loss[loss=3.496, NarTop10Accuracy=0.6237, over 7089.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7085, over 5977.84 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:30:51,845 INFO [trainer.py:765] (1/8) Epoch 27, batch 1900, train_loss[loss=3.059, NarTop10Accuracy=0.7159, over 5550.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7084, over 6027.87 frames. ], batch size: 50, lr: 3.15e-03 +2024-08-06 20:31:17,390 INFO [trainer.py:765] (1/8) Epoch 27, batch 2000, train_loss[loss=3.059, NarTop10Accuracy=0.7229, over 5751.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7115, over 5994.08 frames. ], batch size: 50, lr: 3.15e-03 +2024-08-06 20:31:42,660 INFO [trainer.py:765] (1/8) Epoch 27, batch 2100, train_loss[loss=2.796, NarTop10Accuracy=0.7666, over 3843.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.711, over 5946.01 frames. ], batch size: 4, lr: 3.14e-03 +2024-08-06 20:32:07,804 INFO [trainer.py:765] (1/8) Epoch 27, batch 2200, train_loss[loss=3.378, NarTop10Accuracy=0.6448, over 7332.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7088, over 6003.89 frames. ], batch size: 32, lr: 3.14e-03 +2024-08-06 20:32:32,942 INFO [trainer.py:765] (1/8) Epoch 27, batch 2300, train_loss[loss=2.695, NarTop10Accuracy=0.7814, over 5745.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7081, over 6007.16 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 20:32:57,246 INFO [trainer.py:765] (1/8) Epoch 27, batch 2400, train_loss[loss=2.76, NarTop10Accuracy=0.7708, over 5175.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7068, over 5769.21 frames. ], batch size: 7, lr: 3.14e-03 +2024-08-06 20:33:20,615 INFO [trainer.py:765] (1/8) Epoch 27, batch 2500, train_loss[loss=3.403, NarTop10Accuracy=0.6476, over 5133.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7136, over 5470.51 frames. ], batch size: 7, lr: 3.13e-03 +2024-08-06 20:33:40,626 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 20:34:35,830 INFO [trainer.py:765] (1/8) Epoch 28, batch 100, train_loss[loss=2.793, NarTop10Accuracy=0.7599, over 7290.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7094, over 2363.08 frames. ], batch size: 31, lr: 3.07e-03 +2024-08-06 20:35:07,393 INFO [trainer.py:765] (1/8) Epoch 28, batch 200, train_loss[loss=2.816, NarTop10Accuracy=0.7642, over 6717.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.709, over 3865.88 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 20:35:45,422 INFO [trainer.py:765] (1/8) Epoch 28, batch 300, train_loss[loss=3.156, NarTop10Accuracy=0.6934, over 7011.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7108, over 4673.68 frames. ], batch size: 22, lr: 3.07e-03 +2024-08-06 20:36:15,865 INFO [trainer.py:765] (1/8) Epoch 28, batch 400, train_loss[loss=3.197, NarTop10Accuracy=0.6757, over 5118.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7082, over 5116.48 frames. ], batch size: 7, lr: 3.07e-03 +2024-08-06 20:36:32,407 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 20:36:40,530 INFO [trainer.py:811] (1/8) Epoch 28, validation: loss=2.963, NarTop10Accuracy=0.7327, over 1905321.00 frames. +2024-08-06 20:36:40,531 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29627MB +2024-08-06 20:36:41,102 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.179e+02 2.348e+02 2.536e+02 3.573e+02, threshold=4.696e+02, percent-clipped=0.0 +2024-08-06 20:36:56,664 INFO [trainer.py:765] (1/8) Epoch 28, batch 500, train_loss[loss=3.25, NarTop10Accuracy=0.6723, over 6114.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7104, over 5406.14 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 20:37:29,463 INFO [trainer.py:765] (1/8) Epoch 28, batch 600, train_loss[loss=3.005, NarTop10Accuracy=0.719, over 5748.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7099, over 5663.72 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 20:38:08,891 INFO [trainer.py:765] (1/8) Epoch 28, batch 700, train_loss[loss=3.103, NarTop10Accuracy=0.7026, over 4938.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.709, over 5733.50 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 20:38:42,489 INFO [trainer.py:765] (1/8) Epoch 28, batch 800, train_loss[loss=2.913, NarTop10Accuracy=0.7445, over 5037.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7133, over 5801.39 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 20:39:15,507 INFO [trainer.py:765] (1/8) Epoch 28, batch 900, train_loss[loss=3.259, NarTop10Accuracy=0.6739, over 6153.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7136, over 5803.08 frames. ], batch size: 13, lr: 3.06e-03 +2024-08-06 20:39:53,241 INFO [trainer.py:765] (1/8) Epoch 28, batch 1000, train_loss[loss=3.139, NarTop10Accuracy=0.7096, over 6618.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.714, over 5909.67 frames. ], batch size: 14, lr: 3.05e-03 +2024-08-06 20:40:25,868 INFO [trainer.py:765] (1/8) Epoch 28, batch 1100, train_loss[loss=2.772, NarTop10Accuracy=0.781, over 6804.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7105, over 5955.71 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 20:40:59,419 INFO [trainer.py:765] (1/8) Epoch 28, batch 1200, train_loss[loss=3.186, NarTop10Accuracy=0.6822, over 7374.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.71, over 5962.09 frames. ], batch size: 31, lr: 3.05e-03 +2024-08-06 20:41:38,681 INFO [trainer.py:765] (1/8) Epoch 28, batch 1300, train_loss[loss=3.107, NarTop10Accuracy=0.7032, over 4242.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7104, over 6009.63 frames. ], batch size: 5, lr: 3.05e-03 +2024-08-06 20:42:13,048 INFO [trainer.py:765] (1/8) Epoch 28, batch 1400, train_loss[loss=3.049, NarTop10Accuracy=0.7225, over 5970.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7085, over 6039.80 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 20:42:43,171 INFO [trainer.py:765] (1/8) Epoch 28, batch 1500, train_loss[loss=3.411, NarTop10Accuracy=0.6474, over 6228.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7107, over 5950.63 frames. ], batch size: 50, lr: 3.04e-03 +2024-08-06 20:43:11,081 INFO [trainer.py:765] (1/8) Epoch 28, batch 1600, train_loss[loss=2.918, NarTop10Accuracy=0.7464, over 7365.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.711, over 5944.98 frames. ], batch size: 23, lr: 3.04e-03 +2024-08-06 20:43:37,786 INFO [trainer.py:765] (1/8) Epoch 28, batch 1700, train_loss[loss=2.968, NarTop10Accuracy=0.7396, over 6294.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7097, over 5923.97 frames. ], batch size: 13, lr: 3.04e-03 +2024-08-06 20:44:04,326 INFO [trainer.py:765] (1/8) Epoch 28, batch 1800, train_loss[loss=3.15, NarTop10Accuracy=0.6911, over 6960.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7109, over 5994.42 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 20:44:30,757 INFO [trainer.py:765] (1/8) Epoch 28, batch 1900, train_loss[loss=3.051, NarTop10Accuracy=0.7198, over 5928.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7108, over 6025.63 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 20:44:56,329 INFO [trainer.py:765] (1/8) Epoch 28, batch 2000, train_loss[loss=3.04, NarTop10Accuracy=0.721, over 5979.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7127, over 5987.64 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 20:45:21,651 INFO [trainer.py:765] (1/8) Epoch 28, batch 2100, train_loss[loss=2.983, NarTop10Accuracy=0.7398, over 3990.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7134, over 5988.67 frames. ], batch size: 4, lr: 3.03e-03 +2024-08-06 20:45:47,077 INFO [trainer.py:765] (1/8) Epoch 28, batch 2200, train_loss[loss=2.893, NarTop10Accuracy=0.7515, over 7257.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7121, over 6014.21 frames. ], batch size: 31, lr: 3.03e-03 +2024-08-06 20:46:12,308 INFO [trainer.py:765] (1/8) Epoch 28, batch 2300, train_loss[loss=3.527, NarTop10Accuracy=0.6145, over 5718.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7073, over 6025.74 frames. ], batch size: 9, lr: 3.03e-03 +2024-08-06 20:46:36,807 INFO [trainer.py:765] (1/8) Epoch 28, batch 2400, train_loss[loss=2.931, NarTop10Accuracy=0.7389, over 5103.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7074, over 5777.83 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:46:48,595 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 20:46:56,604 INFO [trainer.py:811] (1/8) Epoch 28, validation: loss=2.931, NarTop10Accuracy=0.7396, over 1905321.00 frames. +2024-08-06 20:46:56,605 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29627MB +2024-08-06 20:46:57,081 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.201e+02 2.381e+02 2.551e+02 4.872e+02, threshold=4.762e+02, percent-clipped=0.1 +2024-08-06 20:47:08,292 INFO [trainer.py:765] (1/8) Epoch 28, batch 2500, train_loss[loss=3.093, NarTop10Accuracy=0.7008, over 5115.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7117, over 5470.36 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:47:28,310 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 20:48:21,052 INFO [trainer.py:765] (1/8) Epoch 29, batch 100, train_loss[loss=3.016, NarTop10Accuracy=0.7261, over 7410.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7096, over 2351.97 frames. ], batch size: 31, lr: 2.96e-03 +2024-08-06 20:48:53,405 INFO [trainer.py:765] (1/8) Epoch 29, batch 200, train_loss[loss=3.125, NarTop10Accuracy=0.7001, over 6945.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7165, over 3838.68 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 20:49:27,476 INFO [trainer.py:765] (1/8) Epoch 29, batch 300, train_loss[loss=3.059, NarTop10Accuracy=0.7137, over 7059.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7181, over 4645.34 frames. ], batch size: 22, lr: 2.96e-03 +2024-08-06 20:49:56,052 INFO [trainer.py:765] (1/8) Epoch 29, batch 400, train_loss[loss=3.373, NarTop10Accuracy=0.6456, over 5778.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7125, over 5110.65 frames. ], batch size: 8, lr: 2.96e-03 +2024-08-06 20:50:29,435 INFO [trainer.py:765] (1/8) Epoch 29, batch 500, train_loss[loss=3.174, NarTop10Accuracy=0.6909, over 6147.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7152, over 5401.59 frames. ], batch size: 11, lr: 2.96e-03 +2024-08-06 20:51:00,023 INFO [trainer.py:765] (1/8) Epoch 29, batch 600, train_loss[loss=2.79, NarTop10Accuracy=0.7774, over 5664.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7153, over 5641.89 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 20:51:35,677 INFO [trainer.py:765] (1/8) Epoch 29, batch 700, train_loss[loss=2.822, NarTop10Accuracy=0.7551, over 5001.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7099, over 5709.05 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 20:52:10,724 INFO [trainer.py:765] (1/8) Epoch 29, batch 800, train_loss[loss=2.679, NarTop10Accuracy=0.7864, over 4230.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7105, over 5754.37 frames. ], batch size: 5, lr: 2.95e-03 +2024-08-06 20:52:40,742 INFO [trainer.py:765] (1/8) Epoch 29, batch 900, train_loss[loss=2.813, NarTop10Accuracy=0.7644, over 6693.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7094, over 5790.76 frames. ], batch size: 14, lr: 2.95e-03 +2024-08-06 20:53:16,861 INFO [trainer.py:765] (1/8) Epoch 29, batch 1000, train_loss[loss=3.434, NarTop10Accuracy=0.6325, over 6654.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7073, over 5895.51 frames. ], batch size: 14, lr: 2.95e-03 +2024-08-06 20:53:52,902 INFO [trainer.py:765] (1/8) Epoch 29, batch 1100, train_loss[loss=3.037, NarTop10Accuracy=0.706, over 6696.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7067, over 5928.11 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 20:54:23,690 INFO [trainer.py:765] (1/8) Epoch 29, batch 1200, train_loss[loss=3.007, NarTop10Accuracy=0.7228, over 7437.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.708, over 5912.68 frames. ], batch size: 31, lr: 2.94e-03 +2024-08-06 20:55:01,428 INFO [trainer.py:765] (1/8) Epoch 29, batch 1300, train_loss[loss=3.088, NarTop10Accuracy=0.7082, over 5055.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.709, over 5985.53 frames. ], batch size: 6, lr: 2.94e-03 +2024-08-06 20:55:32,557 INFO [trainer.py:765] (1/8) Epoch 29, batch 1400, train_loss[loss=3.294, NarTop10Accuracy=0.6515, over 6039.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7089, over 6016.89 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 20:56:04,359 INFO [trainer.py:765] (1/8) Epoch 29, batch 1500, train_loss[loss=3.367, NarTop10Accuracy=0.6511, over 6114.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7093, over 5941.46 frames. ], batch size: 50, lr: 2.94e-03 +2024-08-06 20:56:32,040 INFO [trainer.py:765] (1/8) Epoch 29, batch 1600, train_loss[loss=3.35, NarTop10Accuracy=0.6634, over 6897.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7085, over 5935.42 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:56:58,639 INFO [trainer.py:765] (1/8) Epoch 29, batch 1700, train_loss[loss=2.859, NarTop10Accuracy=0.7583, over 6612.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7106, over 5928.18 frames. ], batch size: 14, lr: 2.93e-03 +2024-08-06 20:57:25,000 INFO [trainer.py:765] (1/8) Epoch 29, batch 1800, train_loss[loss=3.14, NarTop10Accuracy=0.6906, over 7020.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7118, over 5981.88 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:57:44,621 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 20:57:52,863 INFO [trainer.py:811] (1/8) Epoch 29, validation: loss=2.897, NarTop10Accuracy=0.7458, over 1905321.00 frames. +2024-08-06 20:57:52,864 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29627MB +2024-08-06 20:57:53,424 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.206e+02 2.380e+02 2.554e+02 4.464e+02, threshold=4.759e+02, percent-clipped=0.0 +2024-08-06 20:57:59,756 INFO [trainer.py:765] (1/8) Epoch 29, batch 1900, train_loss[loss=2.99, NarTop10Accuracy=0.7323, over 6405.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7086, over 6030.95 frames. ], batch size: 52, lr: 2.93e-03 +2024-08-06 20:58:25,308 INFO [trainer.py:765] (1/8) Epoch 29, batch 2000, train_loss[loss=3.412, NarTop10Accuracy=0.6483, over 6471.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7086, over 6005.66 frames. ], batch size: 50, lr: 2.93e-03 +2024-08-06 20:58:50,629 INFO [trainer.py:765] (1/8) Epoch 29, batch 2100, train_loss[loss=2.941, NarTop10Accuracy=0.7425, over 3921.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7082, over 5965.43 frames. ], batch size: 4, lr: 2.92e-03 +2024-08-06 20:59:15,805 INFO [trainer.py:765] (1/8) Epoch 29, batch 2200, train_loss[loss=2.943, NarTop10Accuracy=0.7411, over 7131.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7096, over 6002.47 frames. ], batch size: 31, lr: 2.92e-03 +2024-08-06 20:59:40,910 INFO [trainer.py:765] (1/8) Epoch 29, batch 2300, train_loss[loss=2.899, NarTop10Accuracy=0.7417, over 5823.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.706, over 6023.72 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 21:00:05,155 INFO [trainer.py:765] (1/8) Epoch 29, batch 2400, train_loss[loss=2.693, NarTop10Accuracy=0.7854, over 5172.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7094, over 5804.51 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 21:00:28,741 INFO [trainer.py:765] (1/8) Epoch 29, batch 2500, train_loss[loss=3.316, NarTop10Accuracy=0.6561, over 5178.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7137, over 5501.74 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 21:00:48,843 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 21:01:41,717 INFO [trainer.py:765] (1/8) Epoch 30, batch 100, train_loss[loss=2.863, NarTop10Accuracy=0.747, over 7194.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.721, over 2365.50 frames. ], batch size: 31, lr: 2.86e-03 +2024-08-06 21:02:17,014 INFO [trainer.py:765] (1/8) Epoch 30, batch 200, train_loss[loss=3.029, NarTop10Accuracy=0.7179, over 6816.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7229, over 3853.40 frames. ], batch size: 17, lr: 2.86e-03 +2024-08-06 21:02:51,343 INFO [trainer.py:765] (1/8) Epoch 30, batch 300, train_loss[loss=3.072, NarTop10Accuracy=0.7135, over 6987.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7237, over 4656.20 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 21:03:21,643 INFO [trainer.py:765] (1/8) Epoch 30, batch 400, train_loss[loss=2.692, NarTop10Accuracy=0.7895, over 5085.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.721, over 5112.05 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 21:03:58,546 INFO [trainer.py:765] (1/8) Epoch 30, batch 500, train_loss[loss=3.161, NarTop10Accuracy=0.6833, over 5961.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7186, over 5385.74 frames. ], batch size: 11, lr: 2.86e-03 +2024-08-06 21:04:31,657 INFO [trainer.py:765] (1/8) Epoch 30, batch 600, train_loss[loss=3.038, NarTop10Accuracy=0.7222, over 5751.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7173, over 5640.07 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 21:05:03,526 INFO [trainer.py:765] (1/8) Epoch 30, batch 700, train_loss[loss=3.027, NarTop10Accuracy=0.728, over 5085.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7197, over 5710.99 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 21:05:44,132 INFO [trainer.py:765] (1/8) Epoch 30, batch 800, train_loss[loss=2.914, NarTop10Accuracy=0.7315, over 4962.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7199, over 5753.61 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 21:06:14,844 INFO [trainer.py:765] (1/8) Epoch 30, batch 900, train_loss[loss=2.883, NarTop10Accuracy=0.7548, over 6579.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7202, over 5772.61 frames. ], batch size: 14, lr: 2.85e-03 +2024-08-06 21:06:48,952 INFO [trainer.py:765] (1/8) Epoch 30, batch 1000, train_loss[loss=2.953, NarTop10Accuracy=0.7338, over 6741.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7129, over 5870.92 frames. ], batch size: 14, lr: 2.85e-03 +2024-08-06 21:07:25,937 INFO [trainer.py:765] (1/8) Epoch 30, batch 1100, train_loss[loss=3.476, NarTop10Accuracy=0.6357, over 6942.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7106, over 5905.53 frames. ], batch size: 17, lr: 2.84e-03 +2024-08-06 21:08:02,381 INFO [trainer.py:765] (1/8) Epoch 30, batch 1200, train_loss[loss=2.989, NarTop10Accuracy=0.7295, over 7317.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7122, over 5915.49 frames. ], batch size: 33, lr: 2.84e-03 +2024-08-06 21:08:35,371 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 21:08:43,457 INFO [trainer.py:811] (1/8) Epoch 30, validation: loss=2.93, NarTop10Accuracy=0.7391, over 1905321.00 frames. +2024-08-06 21:08:43,458 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29627MB +2024-08-06 21:08:44,197 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.209e+02 2.377e+02 2.553e+02 3.956e+02, threshold=4.754e+02, percent-clipped=0.0 +2024-08-06 21:08:44,203 INFO [trainer.py:765] (1/8) Epoch 30, batch 1300, train_loss[loss=3.077, NarTop10Accuracy=0.7174, over 4239.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7125, over 5988.54 frames. ], batch size: 5, lr: 2.84e-03 +2024-08-06 21:09:22,397 INFO [trainer.py:765] (1/8) Epoch 30, batch 1400, train_loss[loss=2.89, NarTop10Accuracy=0.7472, over 6174.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7109, over 6024.67 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 21:09:52,373 INFO [trainer.py:765] (1/8) Epoch 30, batch 1500, train_loss[loss=3.036, NarTop10Accuracy=0.7172, over 6102.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7114, over 5959.65 frames. ], batch size: 50, lr: 2.84e-03 +2024-08-06 21:10:20,084 INFO [trainer.py:765] (1/8) Epoch 30, batch 1600, train_loss[loss=2.993, NarTop10Accuracy=0.7313, over 6945.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7117, over 5945.51 frames. ], batch size: 22, lr: 2.84e-03 +2024-08-06 21:10:46,680 INFO [trainer.py:765] (1/8) Epoch 30, batch 1700, train_loss[loss=3.203, NarTop10Accuracy=0.6805, over 6147.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7093, over 5908.89 frames. ], batch size: 13, lr: 2.83e-03 +2024-08-06 21:11:13,059 INFO [trainer.py:765] (1/8) Epoch 30, batch 1800, train_loss[loss=3.403, NarTop10Accuracy=0.6375, over 7062.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7098, over 5979.15 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 21:11:39,419 INFO [trainer.py:765] (1/8) Epoch 30, batch 1900, train_loss[loss=3.049, NarTop10Accuracy=0.7167, over 6333.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7097, over 6032.95 frames. ], batch size: 50, lr: 2.83e-03 +2024-08-06 21:12:04,826 INFO [trainer.py:765] (1/8) Epoch 30, batch 2000, train_loss[loss=3.375, NarTop10Accuracy=0.6458, over 6231.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7119, over 6009.01 frames. ], batch size: 50, lr: 2.83e-03 +2024-08-06 21:12:30,088 INFO [trainer.py:765] (1/8) Epoch 30, batch 2100, train_loss[loss=2.919, NarTop10Accuracy=0.7476, over 4794.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7103, over 5986.02 frames. ], batch size: 5, lr: 2.83e-03 +2024-08-06 21:12:55,225 INFO [trainer.py:765] (1/8) Epoch 30, batch 2200, train_loss[loss=3.046, NarTop10Accuracy=0.7249, over 7359.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7111, over 6018.75 frames. ], batch size: 31, lr: 2.82e-03 +2024-08-06 21:13:20,298 INFO [trainer.py:765] (1/8) Epoch 30, batch 2300, train_loss[loss=2.75, NarTop10Accuracy=0.7806, over 5688.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7073, over 6031.16 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 21:13:44,491 INFO [trainer.py:765] (1/8) Epoch 30, batch 2400, train_loss[loss=2.812, NarTop10Accuracy=0.7672, over 5097.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7145, over 5776.45 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:07,988 INFO [trainer.py:765] (1/8) Epoch 30, batch 2500, train_loss[loss=2.899, NarTop10Accuracy=0.7382, over 5070.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7157, over 5486.72 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:27,907 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 21:15:23,633 INFO [trainer.py:765] (1/8) Epoch 31, batch 100, train_loss[loss=3.531, NarTop10Accuracy=0.61, over 7197.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.713, over 2372.72 frames. ], batch size: 31, lr: 2.77e-03 +2024-08-06 21:15:55,127 INFO [trainer.py:765] (1/8) Epoch 31, batch 200, train_loss[loss=2.941, NarTop10Accuracy=0.7436, over 6756.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.717, over 3857.65 frames. ], batch size: 17, lr: 2.77e-03 +2024-08-06 21:16:31,215 INFO [trainer.py:765] (1/8) Epoch 31, batch 300, train_loss[loss=2.917, NarTop10Accuracy=0.7462, over 7092.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7175, over 4663.60 frames. ], batch size: 22, lr: 2.77e-03 +2024-08-06 21:17:01,624 INFO [trainer.py:765] (1/8) Epoch 31, batch 400, train_loss[loss=3.268, NarTop10Accuracy=0.6745, over 5142.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7156, over 5107.84 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 21:17:35,724 INFO [trainer.py:765] (1/8) Epoch 31, batch 500, train_loss[loss=2.705, NarTop10Accuracy=0.7835, over 6180.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7179, over 5390.94 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 21:18:07,083 INFO [trainer.py:765] (1/8) Epoch 31, batch 600, train_loss[loss=2.812, NarTop10Accuracy=0.7733, over 5643.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7147, over 5668.89 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 21:18:44,610 INFO [trainer.py:765] (1/8) Epoch 31, batch 700, train_loss[loss=3.418, NarTop10Accuracy=0.632, over 4242.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7135, over 5725.38 frames. ], batch size: 5, lr: 2.76e-03 +2024-08-06 21:18:51,095 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 21:18:59,276 INFO [trainer.py:811] (1/8) Epoch 31, validation: loss=2.984, NarTop10Accuracy=0.7279, over 1905321.00 frames. +2024-08-06 21:18:59,277 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29627MB +2024-08-06 21:18:59,985 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.222e+02 2.378e+02 2.557e+02 4.306e+02, threshold=4.755e+02, percent-clipped=0.0 +2024-08-06 21:19:24,246 INFO [trainer.py:765] (1/8) Epoch 31, batch 800, train_loss[loss=2.804, NarTop10Accuracy=0.7754, over 4290.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7161, over 5789.20 frames. ], batch size: 5, lr: 2.76e-03 +2024-08-06 21:19:56,951 INFO [trainer.py:765] (1/8) Epoch 31, batch 900, train_loss[loss=3.306, NarTop10Accuracy=0.6609, over 6273.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7166, over 5799.88 frames. ], batch size: 13, lr: 2.76e-03 +2024-08-06 21:20:33,311 INFO [trainer.py:765] (1/8) Epoch 31, batch 1000, train_loss[loss=3.327, NarTop10Accuracy=0.649, over 6264.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7172, over 5889.69 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 21:21:10,216 INFO [trainer.py:765] (1/8) Epoch 31, batch 1100, train_loss[loss=3.268, NarTop10Accuracy=0.6685, over 7131.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.716, over 5921.24 frames. ], batch size: 18, lr: 2.75e-03 +2024-08-06 21:21:41,120 INFO [trainer.py:765] (1/8) Epoch 31, batch 1200, train_loss[loss=3.002, NarTop10Accuracy=0.7267, over 7077.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7193, over 5927.86 frames. ], batch size: 31, lr: 2.75e-03 +2024-08-06 21:22:19,742 INFO [trainer.py:765] (1/8) Epoch 31, batch 1300, train_loss[loss=2.976, NarTop10Accuracy=0.7312, over 5040.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7143, over 6003.52 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 21:22:53,534 INFO [trainer.py:765] (1/8) Epoch 31, batch 1400, train_loss[loss=2.959, NarTop10Accuracy=0.7401, over 6069.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7124, over 6020.16 frames. ], batch size: 11, lr: 2.75e-03 +2024-08-06 21:23:21,270 INFO [trainer.py:765] (1/8) Epoch 31, batch 1500, train_loss[loss=3.226, NarTop10Accuracy=0.6751, over 6294.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7148, over 5938.40 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 21:23:49,005 INFO [trainer.py:765] (1/8) Epoch 31, batch 1600, train_loss[loss=3.309, NarTop10Accuracy=0.671, over 7026.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7156, over 5927.50 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 21:24:15,512 INFO [trainer.py:765] (1/8) Epoch 31, batch 1700, train_loss[loss=3.379, NarTop10Accuracy=0.6475, over 6276.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7151, over 5922.32 frames. ], batch size: 13, lr: 2.74e-03 +2024-08-06 21:24:41,996 INFO [trainer.py:765] (1/8) Epoch 31, batch 1800, train_loss[loss=2.769, NarTop10Accuracy=0.7694, over 7038.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7168, over 5980.70 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 21:25:08,357 INFO [trainer.py:765] (1/8) Epoch 31, batch 1900, train_loss[loss=3.292, NarTop10Accuracy=0.6711, over 6291.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7129, over 6015.76 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 21:25:33,773 INFO [trainer.py:765] (1/8) Epoch 31, batch 2000, train_loss[loss=3.05, NarTop10Accuracy=0.7222, over 6060.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7136, over 5987.22 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 21:25:59,107 INFO [trainer.py:765] (1/8) Epoch 31, batch 2100, train_loss[loss=2.685, NarTop10Accuracy=0.7822, over 3903.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7146, over 5959.52 frames. ], batch size: 4, lr: 2.73e-03 +2024-08-06 21:26:24,238 INFO [trainer.py:765] (1/8) Epoch 31, batch 2200, train_loss[loss=3.009, NarTop10Accuracy=0.7215, over 7503.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7181, over 5986.93 frames. ], batch size: 32, lr: 2.73e-03 +2024-08-06 21:26:49,322 INFO [trainer.py:765] (1/8) Epoch 31, batch 2300, train_loss[loss=2.766, NarTop10Accuracy=0.7748, over 5637.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7146, over 6002.60 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 21:27:13,608 INFO [trainer.py:765] (1/8) Epoch 31, batch 2400, train_loss[loss=2.835, NarTop10Accuracy=0.7563, over 5169.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.716, over 5759.94 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 21:27:37,028 INFO [trainer.py:765] (1/8) Epoch 31, batch 2500, train_loss[loss=2.88, NarTop10Accuracy=0.741, over 5766.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7183, over 5488.49 frames. ], batch size: 8, lr: 2.73e-03 +2024-08-06 21:27:57,345 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 21:28:49,392 INFO [trainer.py:765] (1/8) Epoch 32, batch 100, train_loss[loss=2.843, NarTop10Accuracy=0.7575, over 7167.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7143, over 2342.23 frames. ], batch size: 31, lr: 2.68e-03 +2024-08-06 21:29:08,160 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 21:29:16,392 INFO [trainer.py:811] (1/8) Epoch 32, validation: loss=2.919, NarTop10Accuracy=0.7409, over 1905321.00 frames. +2024-08-06 21:29:16,393 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29627MB +2024-08-06 21:29:16,939 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.842e+02 2.253e+02 2.413e+02 2.600e+02 5.680e+02, threshold=4.826e+02, percent-clipped=0.1 +2024-08-06 21:29:32,273 INFO [trainer.py:765] (1/8) Epoch 32, batch 200, train_loss[loss=3.234, NarTop10Accuracy=0.6721, over 6909.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.714, over 3830.65 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 21:30:05,279 INFO [trainer.py:765] (1/8) Epoch 32, batch 300, train_loss[loss=3.115, NarTop10Accuracy=0.7094, over 7050.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7156, over 4652.07 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 21:30:34,103 INFO [trainer.py:765] (1/8) Epoch 32, batch 400, train_loss[loss=2.793, NarTop10Accuracy=0.7577, over 5202.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.712, over 5107.57 frames. ], batch size: 7, lr: 2.68e-03 +2024-08-06 21:31:13,530 INFO [trainer.py:765] (1/8) Epoch 32, batch 500, train_loss[loss=2.959, NarTop10Accuracy=0.7305, over 6072.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7128, over 5385.92 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 21:31:42,487 INFO [trainer.py:765] (1/8) Epoch 32, batch 600, train_loss[loss=3.297, NarTop10Accuracy=0.6635, over 5661.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7138, over 5653.09 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 21:32:17,029 INFO [trainer.py:765] (1/8) Epoch 32, batch 700, train_loss[loss=2.622, NarTop10Accuracy=0.8059, over 5178.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7164, over 5723.50 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 21:33:00,647 INFO [trainer.py:765] (1/8) Epoch 32, batch 800, train_loss[loss=3.13, NarTop10Accuracy=0.6861, over 4257.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7176, over 5774.54 frames. ], batch size: 5, lr: 2.67e-03 +2024-08-06 21:33:28,992 INFO [trainer.py:765] (1/8) Epoch 32, batch 900, train_loss[loss=2.749, NarTop10Accuracy=0.7818, over 6702.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.719, over 5804.66 frames. ], batch size: 14, lr: 2.67e-03 +2024-08-06 21:34:04,050 INFO [trainer.py:765] (1/8) Epoch 32, batch 1000, train_loss[loss=3.311, NarTop10Accuracy=0.6666, over 6651.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7156, over 5908.19 frames. ], batch size: 14, lr: 2.67e-03 +2024-08-06 21:34:46,674 INFO [trainer.py:765] (1/8) Epoch 32, batch 1100, train_loss[loss=3.127, NarTop10Accuracy=0.6957, over 6861.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7151, over 5932.45 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 21:35:18,171 INFO [trainer.py:765] (1/8) Epoch 32, batch 1200, train_loss[loss=3.267, NarTop10Accuracy=0.6681, over 7449.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7134, over 5940.49 frames. ], batch size: 32, lr: 2.66e-03 +2024-08-06 21:35:52,802 INFO [trainer.py:765] (1/8) Epoch 32, batch 1300, train_loss[loss=3.049, NarTop10Accuracy=0.7154, over 5088.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7131, over 6000.88 frames. ], batch size: 6, lr: 2.66e-03 +2024-08-06 21:36:29,479 INFO [trainer.py:765] (1/8) Epoch 32, batch 1400, train_loss[loss=3.37, NarTop10Accuracy=0.6451, over 6084.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.713, over 6003.27 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 21:37:04,734 INFO [trainer.py:765] (1/8) Epoch 32, batch 1500, train_loss[loss=3.439, NarTop10Accuracy=0.6385, over 5769.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7141, over 5933.33 frames. ], batch size: 51, lr: 2.66e-03 +2024-08-06 21:37:32,522 INFO [trainer.py:765] (1/8) Epoch 32, batch 1600, train_loss[loss=2.989, NarTop10Accuracy=0.7298, over 7230.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7147, over 5929.27 frames. ], batch size: 23, lr: 2.66e-03 +2024-08-06 21:37:59,161 INFO [trainer.py:765] (1/8) Epoch 32, batch 1700, train_loss[loss=3.098, NarTop10Accuracy=0.7112, over 6705.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7148, over 5916.64 frames. ], batch size: 14, lr: 2.65e-03 +2024-08-06 21:38:25,702 INFO [trainer.py:765] (1/8) Epoch 32, batch 1800, train_loss[loss=3.136, NarTop10Accuracy=0.6958, over 7065.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7141, over 5982.39 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 21:38:52,169 INFO [trainer.py:765] (1/8) Epoch 32, batch 1900, train_loss[loss=2.992, NarTop10Accuracy=0.7239, over 6120.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7106, over 6028.11 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 21:39:17,769 INFO [trainer.py:765] (1/8) Epoch 32, batch 2000, train_loss[loss=3.439, NarTop10Accuracy=0.6379, over 5517.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7138, over 5992.15 frames. ], batch size: 51, lr: 2.65e-03 +2024-08-06 21:39:43,178 INFO [trainer.py:765] (1/8) Epoch 32, batch 2100, train_loss[loss=2.584, NarTop10Accuracy=0.8135, over 4911.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7154, over 5982.38 frames. ], batch size: 5, lr: 2.65e-03 +2024-08-06 21:39:54,782 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 21:40:02,941 INFO [trainer.py:811] (1/8) Epoch 32, validation: loss=2.886, NarTop10Accuracy=0.7482, over 1905321.00 frames. +2024-08-06 21:40:02,942 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30116MB +2024-08-06 21:40:03,423 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.874e+02 2.278e+02 2.449e+02 2.609e+02 8.207e+02, threshold=4.898e+02, percent-clipped=0.3 +2024-08-06 21:40:16,628 INFO [trainer.py:765] (1/8) Epoch 32, batch 2200, train_loss[loss=3.113, NarTop10Accuracy=0.7031, over 7389.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7152, over 6010.75 frames. ], batch size: 32, lr: 2.65e-03 +2024-08-06 21:40:41,717 INFO [trainer.py:765] (1/8) Epoch 32, batch 2300, train_loss[loss=3.383, NarTop10Accuracy=0.6527, over 5736.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7107, over 6019.40 frames. ], batch size: 9, lr: 2.65e-03 +2024-08-06 21:41:06,072 INFO [trainer.py:765] (1/8) Epoch 32, batch 2400, train_loss[loss=3.375, NarTop10Accuracy=0.6587, over 5103.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7144, over 5775.44 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:29,537 INFO [trainer.py:765] (1/8) Epoch 32, batch 2500, train_loss[loss=2.725, NarTop10Accuracy=0.7899, over 5154.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7211, over 5479.79 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:49,800 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 21:42:47,615 INFO [trainer.py:765] (1/8) Epoch 33, batch 100, train_loss[loss=3.158, NarTop10Accuracy=0.694, over 6972.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7218, over 2374.33 frames. ], batch size: 31, lr: 2.60e-03 +2024-08-06 21:43:22,368 INFO [trainer.py:765] (1/8) Epoch 33, batch 200, train_loss[loss=2.782, NarTop10Accuracy=0.7654, over 6738.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7198, over 3872.61 frames. ], batch size: 17, lr: 2.60e-03 +2024-08-06 21:43:56,513 INFO [trainer.py:765] (1/8) Epoch 33, batch 300, train_loss[loss=3.383, NarTop10Accuracy=0.648, over 7056.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7183, over 4670.50 frames. ], batch size: 22, lr: 2.60e-03 +2024-08-06 21:44:30,315 INFO [trainer.py:765] (1/8) Epoch 33, batch 400, train_loss[loss=2.803, NarTop10Accuracy=0.7642, over 5124.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7173, over 5123.61 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 21:45:02,870 INFO [trainer.py:765] (1/8) Epoch 33, batch 500, train_loss[loss=2.752, NarTop10Accuracy=0.776, over 5991.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.72, over 5384.11 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 21:45:36,226 INFO [trainer.py:765] (1/8) Epoch 33, batch 600, train_loss[loss=3.395, NarTop10Accuracy=0.6427, over 5802.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7151, over 5635.83 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 21:46:11,316 INFO [trainer.py:765] (1/8) Epoch 33, batch 700, train_loss[loss=2.844, NarTop10Accuracy=0.7563, over 4989.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7149, over 5716.88 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:46:46,169 INFO [trainer.py:765] (1/8) Epoch 33, batch 800, train_loss[loss=2.704, NarTop10Accuracy=0.7778, over 5085.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7156, over 5763.28 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:47:18,908 INFO [trainer.py:765] (1/8) Epoch 33, batch 900, train_loss[loss=3.197, NarTop10Accuracy=0.6866, over 6681.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7148, over 5780.59 frames. ], batch size: 14, lr: 2.59e-03 +2024-08-06 21:47:57,316 INFO [trainer.py:765] (1/8) Epoch 33, batch 1000, train_loss[loss=3.032, NarTop10Accuracy=0.7224, over 6117.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7148, over 5901.28 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 21:48:30,908 INFO [trainer.py:765] (1/8) Epoch 33, batch 1100, train_loss[loss=2.782, NarTop10Accuracy=0.7746, over 6822.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7106, over 5923.68 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 21:49:06,659 INFO [trainer.py:765] (1/8) Epoch 33, batch 1200, train_loss[loss=2.875, NarTop10Accuracy=0.7559, over 7359.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7111, over 5911.66 frames. ], batch size: 31, lr: 2.58e-03 +2024-08-06 21:49:42,815 INFO [trainer.py:765] (1/8) Epoch 33, batch 1300, train_loss[loss=2.983, NarTop10Accuracy=0.7321, over 5112.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7119, over 5972.87 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 21:50:17,310 INFO [trainer.py:765] (1/8) Epoch 33, batch 1400, train_loss[loss=3.199, NarTop10Accuracy=0.6784, over 6141.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7103, over 5997.27 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 21:50:45,370 INFO [trainer.py:765] (1/8) Epoch 33, batch 1500, train_loss[loss=3.076, NarTop10Accuracy=0.7109, over 6231.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7133, over 5944.18 frames. ], batch size: 51, lr: 2.58e-03 +2024-08-06 21:51:04,606 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 21:51:12,661 INFO [trainer.py:811] (1/8) Epoch 33, validation: loss=2.938, NarTop10Accuracy=0.7372, over 1905321.00 frames. +2024-08-06 21:51:12,662 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30116MB +2024-08-06 21:51:13,180 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.834e+02 2.250e+02 2.409e+02 2.586e+02 3.975e+02, threshold=4.818e+02, percent-clipped=0.0 +2024-08-06 21:51:21,261 INFO [trainer.py:765] (1/8) Epoch 33, batch 1600, train_loss[loss=3.163, NarTop10Accuracy=0.6932, over 7086.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7153, over 5924.12 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:51:47,922 INFO [trainer.py:765] (1/8) Epoch 33, batch 1700, train_loss[loss=2.855, NarTop10Accuracy=0.7612, over 6135.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7145, over 5924.06 frames. ], batch size: 13, lr: 2.57e-03 +2024-08-06 21:52:14,392 INFO [trainer.py:765] (1/8) Epoch 33, batch 1800, train_loss[loss=2.838, NarTop10Accuracy=0.7573, over 7200.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.716, over 5998.44 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:52:40,856 INFO [trainer.py:765] (1/8) Epoch 33, batch 1900, train_loss[loss=3.408, NarTop10Accuracy=0.6435, over 5955.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7121, over 6042.75 frames. ], batch size: 50, lr: 2.57e-03 +2024-08-06 21:53:06,352 INFO [trainer.py:765] (1/8) Epoch 33, batch 2000, train_loss[loss=3.44, NarTop10Accuracy=0.6413, over 6015.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7179, over 6004.14 frames. ], batch size: 50, lr: 2.57e-03 +2024-08-06 21:53:31,658 INFO [trainer.py:765] (1/8) Epoch 33, batch 2100, train_loss[loss=3.297, NarTop10Accuracy=0.6633, over 3891.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7151, over 5975.34 frames. ], batch size: 4, lr: 2.57e-03 +2024-08-06 21:53:56,890 INFO [trainer.py:765] (1/8) Epoch 33, batch 2200, train_loss[loss=3.448, NarTop10Accuracy=0.6313, over 7080.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7131, over 6000.95 frames. ], batch size: 31, lr: 2.57e-03 +2024-08-06 21:54:21,990 INFO [trainer.py:765] (1/8) Epoch 33, batch 2300, train_loss[loss=2.841, NarTop10Accuracy=0.7688, over 5796.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7148, over 6015.26 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 21:54:46,430 INFO [trainer.py:765] (1/8) Epoch 33, batch 2400, train_loss[loss=2.728, NarTop10Accuracy=0.7779, over 5190.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7171, over 5774.43 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:09,862 INFO [trainer.py:765] (1/8) Epoch 33, batch 2500, train_loss[loss=2.772, NarTop10Accuracy=0.7841, over 5142.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7209, over 5465.95 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:29,641 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 21:56:24,721 INFO [trainer.py:765] (1/8) Epoch 34, batch 100, train_loss[loss=3.414, NarTop10Accuracy=0.6407, over 7278.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7194, over 2368.66 frames. ], batch size: 31, lr: 2.52e-03 +2024-08-06 21:56:55,613 INFO [trainer.py:765] (1/8) Epoch 34, batch 200, train_loss[loss=3.149, NarTop10Accuracy=0.693, over 7053.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7229, over 3861.51 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 21:57:31,776 INFO [trainer.py:765] (1/8) Epoch 34, batch 300, train_loss[loss=2.919, NarTop10Accuracy=0.7412, over 7077.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7201, over 4649.06 frames. ], batch size: 22, lr: 2.52e-03 +2024-08-06 21:58:02,724 INFO [trainer.py:765] (1/8) Epoch 34, batch 400, train_loss[loss=3.109, NarTop10Accuracy=0.7002, over 5007.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7236, over 5114.74 frames. ], batch size: 7, lr: 2.52e-03 +2024-08-06 21:58:34,690 INFO [trainer.py:765] (1/8) Epoch 34, batch 500, train_loss[loss=3.215, NarTop10Accuracy=0.6779, over 6600.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.72, over 5389.13 frames. ], batch size: 12, lr: 2.51e-03 +2024-08-06 21:59:09,616 INFO [trainer.py:765] (1/8) Epoch 34, batch 600, train_loss[loss=2.923, NarTop10Accuracy=0.7348, over 5802.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7186, over 5647.21 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 21:59:46,056 INFO [trainer.py:765] (1/8) Epoch 34, batch 700, train_loss[loss=3.018, NarTop10Accuracy=0.7168, over 5199.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7174, over 5733.83 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:17,575 INFO [trainer.py:765] (1/8) Epoch 34, batch 800, train_loss[loss=2.974, NarTop10Accuracy=0.7305, over 5034.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7192, over 5786.69 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:49,874 INFO [trainer.py:765] (1/8) Epoch 34, batch 900, train_loss[loss=2.929, NarTop10Accuracy=0.7442, over 6723.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7189, over 5796.92 frames. ], batch size: 14, lr: 2.51e-03 +2024-08-06 22:01:25,338 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 22:01:33,386 INFO [trainer.py:811] (1/8) Epoch 34, validation: loss=2.9, NarTop10Accuracy=0.7444, over 1905321.00 frames. +2024-08-06 22:01:33,387 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30116MB +2024-08-06 22:01:34,091 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.259e+02 2.434e+02 2.615e+02 5.125e+02, threshold=4.868e+02, percent-clipped=0.1 +2024-08-06 22:01:35,624 INFO [trainer.py:765] (1/8) Epoch 34, batch 1000, train_loss[loss=3.354, NarTop10Accuracy=0.655, over 6207.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7162, over 5896.67 frames. ], batch size: 13, lr: 2.51e-03 +2024-08-06 22:02:10,829 INFO [trainer.py:765] (1/8) Epoch 34, batch 1100, train_loss[loss=3.254, NarTop10Accuracy=0.671, over 6882.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7161, over 5939.47 frames. ], batch size: 17, lr: 2.51e-03 +2024-08-06 22:02:46,786 INFO [trainer.py:765] (1/8) Epoch 34, batch 1200, train_loss[loss=2.776, NarTop10Accuracy=0.7676, over 7290.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7172, over 5940.91 frames. ], batch size: 31, lr: 2.50e-03 +2024-08-06 22:03:20,813 INFO [trainer.py:765] (1/8) Epoch 34, batch 1300, train_loss[loss=2.827, NarTop10Accuracy=0.7615, over 4299.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7166, over 5984.44 frames. ], batch size: 5, lr: 2.50e-03 +2024-08-06 22:03:52,949 INFO [trainer.py:765] (1/8) Epoch 34, batch 1400, train_loss[loss=3.218, NarTop10Accuracy=0.673, over 6531.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7165, over 5996.67 frames. ], batch size: 12, lr: 2.50e-03 +2024-08-06 22:04:20,822 INFO [trainer.py:765] (1/8) Epoch 34, batch 1500, train_loss[loss=3.028, NarTop10Accuracy=0.7248, over 5766.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7158, over 5932.28 frames. ], batch size: 50, lr: 2.50e-03 +2024-08-06 22:04:48,599 INFO [trainer.py:765] (1/8) Epoch 34, batch 1600, train_loss[loss=2.889, NarTop10Accuracy=0.7512, over 6912.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7151, over 5920.24 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 22:05:15,241 INFO [trainer.py:765] (1/8) Epoch 34, batch 1700, train_loss[loss=3.141, NarTop10Accuracy=0.7046, over 6132.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7166, over 5917.76 frames. ], batch size: 13, lr: 2.50e-03 +2024-08-06 22:05:41,720 INFO [trainer.py:765] (1/8) Epoch 34, batch 1800, train_loss[loss=3.417, NarTop10Accuracy=0.6488, over 6744.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7167, over 5993.45 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 22:06:08,206 INFO [trainer.py:765] (1/8) Epoch 34, batch 1900, train_loss[loss=3.019, NarTop10Accuracy=0.721, over 6213.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7128, over 6033.37 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 22:06:33,769 INFO [trainer.py:765] (1/8) Epoch 34, batch 2000, train_loss[loss=2.979, NarTop10Accuracy=0.7383, over 6504.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7155, over 5991.79 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 22:06:59,126 INFO [trainer.py:765] (1/8) Epoch 34, batch 2100, train_loss[loss=3.309, NarTop10Accuracy=0.6634, over 4821.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7133, over 5948.76 frames. ], batch size: 5, lr: 2.49e-03 +2024-08-06 22:07:24,398 INFO [trainer.py:765] (1/8) Epoch 34, batch 2200, train_loss[loss=2.996, NarTop10Accuracy=0.7294, over 7296.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7135, over 5984.42 frames. ], batch size: 32, lr: 2.49e-03 +2024-08-06 22:07:49,535 INFO [trainer.py:765] (1/8) Epoch 34, batch 2300, train_loss[loss=2.872, NarTop10Accuracy=0.76, over 5766.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7136, over 6011.30 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 22:08:14,059 INFO [trainer.py:765] (1/8) Epoch 34, batch 2400, train_loss[loss=3.357, NarTop10Accuracy=0.6603, over 5178.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7146, over 5777.19 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:37,648 INFO [trainer.py:765] (1/8) Epoch 34, batch 2500, train_loss[loss=2.688, NarTop10Accuracy=0.787, over 5214.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7218, over 5467.06 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:57,272 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 22:09:52,640 INFO [trainer.py:765] (1/8) Epoch 35, batch 100, train_loss[loss=3.017, NarTop10Accuracy=0.7252, over 7185.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7177, over 2386.35 frames. ], batch size: 31, lr: 2.45e-03 +2024-08-06 22:10:29,697 INFO [trainer.py:765] (1/8) Epoch 35, batch 200, train_loss[loss=3.17, NarTop10Accuracy=0.692, over 6816.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7144, over 3868.92 frames. ], batch size: 17, lr: 2.45e-03 +2024-08-06 22:11:04,942 INFO [trainer.py:765] (1/8) Epoch 35, batch 300, train_loss[loss=2.877, NarTop10Accuracy=0.758, over 7008.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7206, over 4661.35 frames. ], batch size: 22, lr: 2.44e-03 +2024-08-06 22:11:35,333 INFO [trainer.py:765] (1/8) Epoch 35, batch 400, train_loss[loss=2.877, NarTop10Accuracy=0.7516, over 5259.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7201, over 5108.22 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 22:11:40,047 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 22:11:48,129 INFO [trainer.py:811] (1/8) Epoch 35, validation: loss=2.84, NarTop10Accuracy=0.7576, over 1905321.00 frames. +2024-08-06 22:11:48,129 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30116MB +2024-08-06 22:11:48,701 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.275e+02 2.426e+02 2.615e+02 4.095e+02, threshold=4.852e+02, percent-clipped=0.0 +2024-08-06 22:12:17,722 INFO [trainer.py:765] (1/8) Epoch 35, batch 500, train_loss[loss=2.7, NarTop10Accuracy=0.7897, over 6111.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7212, over 5403.17 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 22:12:51,424 INFO [trainer.py:765] (1/8) Epoch 35, batch 600, train_loss[loss=3.299, NarTop10Accuracy=0.6689, over 5679.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7182, over 5664.54 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 22:13:24,940 INFO [trainer.py:765] (1/8) Epoch 35, batch 700, train_loss[loss=2.711, NarTop10Accuracy=0.7898, over 4947.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7179, over 5732.83 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 22:14:01,383 INFO [trainer.py:765] (1/8) Epoch 35, batch 800, train_loss[loss=2.649, NarTop10Accuracy=0.7961, over 5229.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7164, over 5779.22 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 22:14:34,372 INFO [trainer.py:765] (1/8) Epoch 35, batch 900, train_loss[loss=3.161, NarTop10Accuracy=0.6825, over 6249.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7189, over 5808.41 frames. ], batch size: 13, lr: 2.44e-03 +2024-08-06 22:15:09,371 INFO [trainer.py:765] (1/8) Epoch 35, batch 1000, train_loss[loss=2.916, NarTop10Accuracy=0.7417, over 6324.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7186, over 5917.06 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 22:15:48,494 INFO [trainer.py:765] (1/8) Epoch 35, batch 1100, train_loss[loss=3.05, NarTop10Accuracy=0.712, over 6765.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7178, over 5932.42 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 22:16:22,483 INFO [trainer.py:765] (1/8) Epoch 35, batch 1200, train_loss[loss=2.979, NarTop10Accuracy=0.7341, over 7287.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7195, over 5929.48 frames. ], batch size: 31, lr: 2.43e-03 +2024-08-06 22:16:57,060 INFO [trainer.py:765] (1/8) Epoch 35, batch 1300, train_loss[loss=2.863, NarTop10Accuracy=0.754, over 4992.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7214, over 5984.46 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 22:17:31,060 INFO [trainer.py:765] (1/8) Epoch 35, batch 1400, train_loss[loss=3.082, NarTop10Accuracy=0.6973, over 6072.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7197, over 6011.17 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 22:18:03,062 INFO [trainer.py:765] (1/8) Epoch 35, batch 1500, train_loss[loss=3.047, NarTop10Accuracy=0.719, over 6441.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7177, over 5968.52 frames. ], batch size: 53, lr: 2.43e-03 +2024-08-06 22:18:30,727 INFO [trainer.py:765] (1/8) Epoch 35, batch 1600, train_loss[loss=3, NarTop10Accuracy=0.7341, over 7080.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7157, over 5939.21 frames. ], batch size: 22, lr: 2.43e-03 +2024-08-06 22:18:57,319 INFO [trainer.py:765] (1/8) Epoch 35, batch 1700, train_loss[loss=2.87, NarTop10Accuracy=0.7589, over 6207.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7147, over 5930.11 frames. ], batch size: 13, lr: 2.42e-03 +2024-08-06 22:19:23,702 INFO [trainer.py:765] (1/8) Epoch 35, batch 1800, train_loss[loss=3.417, NarTop10Accuracy=0.6379, over 7047.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7163, over 5989.33 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 22:19:50,201 INFO [trainer.py:765] (1/8) Epoch 35, batch 1900, train_loss[loss=3.176, NarTop10Accuracy=0.6895, over 5655.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7152, over 6026.24 frames. ], batch size: 50, lr: 2.42e-03 +2024-08-06 22:20:15,761 INFO [trainer.py:765] (1/8) Epoch 35, batch 2000, train_loss[loss=3.065, NarTop10Accuracy=0.7166, over 6540.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7167, over 5996.93 frames. ], batch size: 51, lr: 2.42e-03 +2024-08-06 22:20:41,044 INFO [trainer.py:765] (1/8) Epoch 35, batch 2100, train_loss[loss=2.746, NarTop10Accuracy=0.7786, over 3894.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.716, over 5984.78 frames. ], batch size: 4, lr: 2.42e-03 +2024-08-06 22:21:06,226 INFO [trainer.py:765] (1/8) Epoch 35, batch 2200, train_loss[loss=2.859, NarTop10Accuracy=0.7511, over 7359.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7149, over 6003.72 frames. ], batch size: 31, lr: 2.42e-03 +2024-08-06 22:21:31,285 INFO [trainer.py:765] (1/8) Epoch 35, batch 2300, train_loss[loss=2.99, NarTop10Accuracy=0.7205, over 5850.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7134, over 6014.74 frames. ], batch size: 9, lr: 2.42e-03 +2024-08-06 22:21:55,647 INFO [trainer.py:765] (1/8) Epoch 35, batch 2400, train_loss[loss=3.293, NarTop10Accuracy=0.6765, over 5664.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7145, over 5770.25 frames. ], batch size: 8, lr: 2.42e-03 +2024-08-06 22:21:59,680 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 22:22:07,656 INFO [trainer.py:811] (1/8) Epoch 35, validation: loss=2.905, NarTop10Accuracy=0.7437, over 1905321.00 frames. +2024-08-06 22:22:07,657 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30116MB +2024-08-06 22:22:08,116 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.895e+02 2.316e+02 2.462e+02 2.653e+02 5.566e+02, threshold=4.923e+02, percent-clipped=0.1 +2024-08-06 22:22:27,128 INFO [trainer.py:765] (1/8) Epoch 35, batch 2500, train_loss[loss=2.966, NarTop10Accuracy=0.7238, over 5043.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7186, over 5468.43 frames. ], batch size: 7, lr: 2.41e-03 +2024-08-06 22:22:47,022 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 22:23:47,171 INFO [trainer.py:765] (1/8) Epoch 36, batch 100, train_loss[loss=3.163, NarTop10Accuracy=0.6913, over 7287.00 frames. ], tot_loss[loss=2.989, NarTop10Accuracy=0.7282, over 2368.39 frames. ], batch size: 31, lr: 2.38e-03 +2024-08-06 22:24:22,494 INFO [trainer.py:765] (1/8) Epoch 36, batch 200, train_loss[loss=2.765, NarTop10Accuracy=0.7785, over 6660.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7221, over 3858.56 frames. ], batch size: 17, lr: 2.38e-03 +2024-08-06 22:24:54,720 INFO [trainer.py:765] (1/8) Epoch 36, batch 300, train_loss[loss=3.212, NarTop10Accuracy=0.6889, over 6996.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7189, over 4658.35 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 22:25:29,275 INFO [trainer.py:765] (1/8) Epoch 36, batch 400, train_loss[loss=2.873, NarTop10Accuracy=0.7535, over 5103.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.722, over 5128.39 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 22:26:01,818 INFO [trainer.py:765] (1/8) Epoch 36, batch 500, train_loss[loss=3.301, NarTop10Accuracy=0.6548, over 6195.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7213, over 5402.84 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 22:26:35,025 INFO [trainer.py:765] (1/8) Epoch 36, batch 600, train_loss[loss=2.92, NarTop10Accuracy=0.7441, over 5706.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.722, over 5666.60 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 22:27:10,990 INFO [trainer.py:765] (1/8) Epoch 36, batch 700, train_loss[loss=3.18, NarTop10Accuracy=0.6918, over 5097.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7212, over 5720.92 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 22:27:44,914 INFO [trainer.py:765] (1/8) Epoch 36, batch 800, train_loss[loss=3.187, NarTop10Accuracy=0.6901, over 4278.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7172, over 5771.46 frames. ], batch size: 5, lr: 2.37e-03 +2024-08-06 22:28:17,811 INFO [trainer.py:765] (1/8) Epoch 36, batch 900, train_loss[loss=2.84, NarTop10Accuracy=0.7544, over 6201.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7201, over 5802.23 frames. ], batch size: 13, lr: 2.37e-03 +2024-08-06 22:28:56,983 INFO [trainer.py:765] (1/8) Epoch 36, batch 1000, train_loss[loss=3.294, NarTop10Accuracy=0.6547, over 6714.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.72, over 5889.04 frames. ], batch size: 14, lr: 2.37e-03 +2024-08-06 22:29:29,364 INFO [trainer.py:765] (1/8) Epoch 36, batch 1100, train_loss[loss=2.855, NarTop10Accuracy=0.7572, over 7167.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7196, over 5949.02 frames. ], batch size: 18, lr: 2.36e-03 +2024-08-06 22:30:05,680 INFO [trainer.py:765] (1/8) Epoch 36, batch 1200, train_loss[loss=3.055, NarTop10Accuracy=0.7109, over 7419.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7204, over 5933.11 frames. ], batch size: 32, lr: 2.36e-03 +2024-08-06 22:30:42,575 INFO [trainer.py:765] (1/8) Epoch 36, batch 1300, train_loss[loss=2.857, NarTop10Accuracy=0.7486, over 5010.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7184, over 5991.17 frames. ], batch size: 6, lr: 2.36e-03 +2024-08-06 22:31:15,938 INFO [trainer.py:765] (1/8) Epoch 36, batch 1400, train_loss[loss=3.118, NarTop10Accuracy=0.7092, over 6147.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7211, over 5999.12 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 22:31:43,748 INFO [trainer.py:765] (1/8) Epoch 36, batch 1500, train_loss[loss=3.427, NarTop10Accuracy=0.6409, over 6435.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7201, over 5959.06 frames. ], batch size: 50, lr: 2.36e-03 +2024-08-06 22:32:11,459 INFO [trainer.py:765] (1/8) Epoch 36, batch 1600, train_loss[loss=3.409, NarTop10Accuracy=0.6425, over 7299.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7196, over 5937.19 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 22:32:38,108 INFO [trainer.py:765] (1/8) Epoch 36, batch 1700, train_loss[loss=3.401, NarTop10Accuracy=0.6473, over 6120.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.717, over 5928.55 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 22:33:04,554 INFO [trainer.py:765] (1/8) Epoch 36, batch 1800, train_loss[loss=3.178, NarTop10Accuracy=0.6892, over 7308.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7171, over 6003.88 frames. ], batch size: 23, lr: 2.36e-03 +2024-08-06 22:33:15,169 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 22:33:23,567 INFO [trainer.py:811] (1/8) Epoch 36, validation: loss=2.897, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 22:33:23,568 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30116MB +2024-08-06 22:33:24,096 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.876e+02 2.309e+02 2.476e+02 2.664e+02 4.811e+02, threshold=4.951e+02, percent-clipped=0.0 +2024-08-06 22:33:39,456 INFO [trainer.py:765] (1/8) Epoch 36, batch 1900, train_loss[loss=2.947, NarTop10Accuracy=0.7387, over 6345.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7171, over 6047.23 frames. ], batch size: 50, lr: 2.35e-03 +2024-08-06 22:34:05,077 INFO [trainer.py:765] (1/8) Epoch 36, batch 2000, train_loss[loss=3.138, NarTop10Accuracy=0.7048, over 6033.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7183, over 6003.53 frames. ], batch size: 51, lr: 2.35e-03 +2024-08-06 22:34:30,514 INFO [trainer.py:765] (1/8) Epoch 36, batch 2100, train_loss[loss=2.645, NarTop10Accuracy=0.7958, over 4890.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7195, over 5969.45 frames. ], batch size: 5, lr: 2.35e-03 +2024-08-06 22:34:55,939 INFO [trainer.py:765] (1/8) Epoch 36, batch 2200, train_loss[loss=3.466, NarTop10Accuracy=0.6293, over 7311.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7156, over 6005.98 frames. ], batch size: 31, lr: 2.35e-03 +2024-08-06 22:35:21,145 INFO [trainer.py:765] (1/8) Epoch 36, batch 2300, train_loss[loss=3.494, NarTop10Accuracy=0.6161, over 5643.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7143, over 6021.92 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 22:35:45,601 INFO [trainer.py:765] (1/8) Epoch 36, batch 2400, train_loss[loss=3.03, NarTop10Accuracy=0.6994, over 5124.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7173, over 5779.77 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:09,182 INFO [trainer.py:765] (1/8) Epoch 36, batch 2500, train_loss[loss=2.782, NarTop10Accuracy=0.757, over 5283.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7201, over 5491.16 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:28,928 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 22:37:29,725 INFO [trainer.py:765] (1/8) Epoch 37, batch 100, train_loss[loss=2.889, NarTop10Accuracy=0.7457, over 7434.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7144, over 2372.27 frames. ], batch size: 32, lr: 2.31e-03 +2024-08-06 22:38:01,272 INFO [trainer.py:765] (1/8) Epoch 37, batch 200, train_loss[loss=2.828, NarTop10Accuracy=0.7692, over 6864.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7188, over 3856.27 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 22:38:35,956 INFO [trainer.py:765] (1/8) Epoch 37, batch 300, train_loss[loss=3.165, NarTop10Accuracy=0.6978, over 7137.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7209, over 4653.78 frames. ], batch size: 22, lr: 2.31e-03 +2024-08-06 22:39:09,307 INFO [trainer.py:765] (1/8) Epoch 37, batch 400, train_loss[loss=2.631, NarTop10Accuracy=0.8044, over 5199.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7227, over 5111.60 frames. ], batch size: 7, lr: 2.31e-03 +2024-08-06 22:39:43,861 INFO [trainer.py:765] (1/8) Epoch 37, batch 500, train_loss[loss=3.299, NarTop10Accuracy=0.6695, over 6084.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7228, over 5402.65 frames. ], batch size: 11, lr: 2.31e-03 +2024-08-06 22:40:17,333 INFO [trainer.py:765] (1/8) Epoch 37, batch 600, train_loss[loss=2.762, NarTop10Accuracy=0.7777, over 5745.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7218, over 5673.63 frames. ], batch size: 9, lr: 2.31e-03 +2024-08-06 22:40:51,615 INFO [trainer.py:765] (1/8) Epoch 37, batch 700, train_loss[loss=3.263, NarTop10Accuracy=0.6702, over 5277.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7172, over 5736.42 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:41:30,564 INFO [trainer.py:765] (1/8) Epoch 37, batch 800, train_loss[loss=2.799, NarTop10Accuracy=0.7656, over 4947.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.717, over 5789.08 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:41:59,083 INFO [trainer.py:765] (1/8) Epoch 37, batch 900, train_loss[loss=2.854, NarTop10Accuracy=0.7586, over 6243.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7205, over 5808.77 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 22:42:38,267 INFO [trainer.py:765] (1/8) Epoch 37, batch 1000, train_loss[loss=3.155, NarTop10Accuracy=0.6893, over 6231.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7175, over 5913.32 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 22:43:15,907 INFO [trainer.py:765] (1/8) Epoch 37, batch 1100, train_loss[loss=3.046, NarTop10Accuracy=0.7221, over 6858.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7175, over 5948.12 frames. ], batch size: 17, lr: 2.30e-03 +2024-08-06 22:43:47,740 INFO [trainer.py:765] (1/8) Epoch 37, batch 1200, train_loss[loss=2.929, NarTop10Accuracy=0.7468, over 7137.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7167, over 5940.17 frames. ], batch size: 31, lr: 2.30e-03 +2024-08-06 22:44:11,754 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 22:44:20,075 INFO [trainer.py:811] (1/8) Epoch 37, validation: loss=2.92, NarTop10Accuracy=0.7407, over 1905321.00 frames. +2024-08-06 22:44:20,076 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30116MB +2024-08-06 22:44:20,606 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.887e+02 2.309e+02 2.481e+02 2.647e+02 8.766e+02, threshold=4.961e+02, percent-clipped=0.1 +2024-08-06 22:44:32,784 INFO [trainer.py:765] (1/8) Epoch 37, batch 1300, train_loss[loss=2.83, NarTop10Accuracy=0.7631, over 5073.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7189, over 5994.57 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:45:10,388 INFO [trainer.py:765] (1/8) Epoch 37, batch 1400, train_loss[loss=2.72, NarTop10Accuracy=0.7787, over 6150.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7193, over 6019.87 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 22:45:40,512 INFO [trainer.py:765] (1/8) Epoch 37, batch 1500, train_loss[loss=2.893, NarTop10Accuracy=0.746, over 6375.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7171, over 5946.75 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:46:08,438 INFO [trainer.py:765] (1/8) Epoch 37, batch 1600, train_loss[loss=3.387, NarTop10Accuracy=0.6486, over 7110.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7144, over 5919.61 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 22:46:35,187 INFO [trainer.py:765] (1/8) Epoch 37, batch 1700, train_loss[loss=3.37, NarTop10Accuracy=0.6437, over 6828.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7164, over 5896.41 frames. ], batch size: 14, lr: 2.29e-03 +2024-08-06 22:47:01,793 INFO [trainer.py:765] (1/8) Epoch 37, batch 1800, train_loss[loss=2.787, NarTop10Accuracy=0.779, over 7182.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7172, over 5974.21 frames. ], batch size: 23, lr: 2.29e-03 +2024-08-06 22:47:28,312 INFO [trainer.py:765] (1/8) Epoch 37, batch 1900, train_loss[loss=3.077, NarTop10Accuracy=0.7134, over 6084.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.717, over 6031.91 frames. ], batch size: 51, lr: 2.29e-03 +2024-08-06 22:47:53,925 INFO [trainer.py:765] (1/8) Epoch 37, batch 2000, train_loss[loss=3.3, NarTop10Accuracy=0.6694, over 6489.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7176, over 5999.71 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:48:19,326 INFO [trainer.py:765] (1/8) Epoch 37, batch 2100, train_loss[loss=2.951, NarTop10Accuracy=0.7315, over 3876.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7155, over 5980.42 frames. ], batch size: 4, lr: 2.29e-03 +2024-08-06 22:48:44,707 INFO [trainer.py:765] (1/8) Epoch 37, batch 2200, train_loss[loss=2.965, NarTop10Accuracy=0.7388, over 7395.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7155, over 6003.23 frames. ], batch size: 31, lr: 2.29e-03 +2024-08-06 22:49:09,913 INFO [trainer.py:765] (1/8) Epoch 37, batch 2300, train_loss[loss=2.64, NarTop10Accuracy=0.7994, over 5718.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7163, over 6018.26 frames. ], batch size: 9, lr: 2.29e-03 +2024-08-06 22:49:34,318 INFO [trainer.py:765] (1/8) Epoch 37, batch 2400, train_loss[loss=3.332, NarTop10Accuracy=0.657, over 4998.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7207, over 5787.32 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:49:57,861 INFO [trainer.py:765] (1/8) Epoch 37, batch 2500, train_loss[loss=3.371, NarTop10Accuracy=0.6483, over 5109.00 frames. ], tot_loss[loss=2.991, NarTop10Accuracy=0.727, over 5471.62 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:50:18,265 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 22:51:16,152 INFO [trainer.py:765] (1/8) Epoch 38, batch 100, train_loss[loss=3.099, NarTop10Accuracy=0.7136, over 7227.00 frames. ], tot_loss[loss=3.011, NarTop10Accuracy=0.7244, over 2358.38 frames. ], batch size: 31, lr: 2.25e-03 +2024-08-06 22:51:53,015 INFO [trainer.py:765] (1/8) Epoch 38, batch 200, train_loss[loss=3.296, NarTop10Accuracy=0.6639, over 6876.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7229, over 3855.26 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 22:52:25,203 INFO [trainer.py:765] (1/8) Epoch 38, batch 300, train_loss[loss=2.878, NarTop10Accuracy=0.7447, over 7266.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7187, over 4655.09 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 22:52:55,627 INFO [trainer.py:765] (1/8) Epoch 38, batch 400, train_loss[loss=3.13, NarTop10Accuracy=0.6933, over 4992.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7214, over 5122.48 frames. ], batch size: 7, lr: 2.25e-03 +2024-08-06 22:53:32,229 INFO [trainer.py:765] (1/8) Epoch 38, batch 500, train_loss[loss=2.714, NarTop10Accuracy=0.7871, over 6258.00 frames. ], tot_loss[loss=2.992, NarTop10Accuracy=0.7275, over 5405.28 frames. ], batch size: 11, lr: 2.25e-03 +2024-08-06 22:54:05,498 INFO [trainer.py:765] (1/8) Epoch 38, batch 600, train_loss[loss=3.346, NarTop10Accuracy=0.6578, over 5619.00 frames. ], tot_loss[loss=3.003, NarTop10Accuracy=0.7253, over 5665.62 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 22:54:36,003 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 22:54:43,918 INFO [trainer.py:811] (1/8) Epoch 38, validation: loss=2.939, NarTop10Accuracy=0.7369, over 1905321.00 frames. +2024-08-06 22:54:43,919 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30116MB +2024-08-06 22:54:44,427 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.313e+02 2.478e+02 2.663e+02 7.254e+02, threshold=4.957e+02, percent-clipped=0.3 +2024-08-06 22:54:46,658 INFO [trainer.py:765] (1/8) Epoch 38, batch 700, train_loss[loss=2.73, NarTop10Accuracy=0.7727, over 4989.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.724, over 5723.28 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:24,937 INFO [trainer.py:765] (1/8) Epoch 38, batch 800, train_loss[loss=3.183, NarTop10Accuracy=0.6865, over 5037.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7204, over 5781.24 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:59,703 INFO [trainer.py:765] (1/8) Epoch 38, batch 900, train_loss[loss=2.848, NarTop10Accuracy=0.7505, over 6318.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7212, over 5803.13 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 22:56:32,091 INFO [trainer.py:765] (1/8) Epoch 38, batch 1000, train_loss[loss=3.537, NarTop10Accuracy=0.6205, over 6603.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7207, over 5891.11 frames. ], batch size: 14, lr: 2.24e-03 +2024-08-06 22:57:08,990 INFO [trainer.py:765] (1/8) Epoch 38, batch 1100, train_loss[loss=3.044, NarTop10Accuracy=0.7128, over 6888.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7185, over 5919.93 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 22:57:42,661 INFO [trainer.py:765] (1/8) Epoch 38, batch 1200, train_loss[loss=2.873, NarTop10Accuracy=0.7594, over 7200.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7181, over 5921.70 frames. ], batch size: 31, lr: 2.24e-03 +2024-08-06 22:58:16,546 INFO [trainer.py:765] (1/8) Epoch 38, batch 1300, train_loss[loss=3.19, NarTop10Accuracy=0.6812, over 5067.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7181, over 5984.97 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:58:49,810 INFO [trainer.py:765] (1/8) Epoch 38, batch 1400, train_loss[loss=2.888, NarTop10Accuracy=0.7442, over 6192.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7133, over 6018.42 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 22:59:22,853 INFO [trainer.py:765] (1/8) Epoch 38, batch 1500, train_loss[loss=3.554, NarTop10Accuracy=0.6097, over 6144.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7176, over 5961.66 frames. ], batch size: 51, lr: 2.23e-03 +2024-08-06 22:59:50,643 INFO [trainer.py:765] (1/8) Epoch 38, batch 1600, train_loss[loss=3.32, NarTop10Accuracy=0.6646, over 7038.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7183, over 5950.61 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 23:00:17,314 INFO [trainer.py:765] (1/8) Epoch 38, batch 1700, train_loss[loss=2.884, NarTop10Accuracy=0.7471, over 6741.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7158, over 5930.01 frames. ], batch size: 14, lr: 2.23e-03 +2024-08-06 23:00:43,763 INFO [trainer.py:765] (1/8) Epoch 38, batch 1800, train_loss[loss=3.241, NarTop10Accuracy=0.6727, over 7029.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7155, over 5989.18 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 23:01:10,191 INFO [trainer.py:765] (1/8) Epoch 38, batch 1900, train_loss[loss=3.445, NarTop10Accuracy=0.6377, over 6180.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7151, over 6026.57 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 23:01:35,681 INFO [trainer.py:765] (1/8) Epoch 38, batch 2000, train_loss[loss=3.37, NarTop10Accuracy=0.6469, over 6147.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7139, over 6003.63 frames. ], batch size: 54, lr: 2.23e-03 +2024-08-06 23:02:01,050 INFO [trainer.py:765] (1/8) Epoch 38, batch 2100, train_loss[loss=2.992, NarTop10Accuracy=0.7168, over 3909.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7157, over 5987.03 frames. ], batch size: 4, lr: 2.23e-03 +2024-08-06 23:02:26,313 INFO [trainer.py:765] (1/8) Epoch 38, batch 2200, train_loss[loss=2.838, NarTop10Accuracy=0.7588, over 7224.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.716, over 6017.54 frames. ], batch size: 31, lr: 2.23e-03 +2024-08-06 23:02:51,419 INFO [trainer.py:765] (1/8) Epoch 38, batch 2300, train_loss[loss=2.887, NarTop10Accuracy=0.7491, over 5673.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7162, over 6036.88 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 23:03:16,347 INFO [trainer.py:765] (1/8) Epoch 38, batch 2400, train_loss[loss=2.656, NarTop10Accuracy=0.8005, over 5196.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7184, over 5785.54 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:39,823 INFO [trainer.py:765] (1/8) Epoch 38, batch 2500, train_loss[loss=3.318, NarTop10Accuracy=0.6563, over 5670.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7218, over 5494.44 frames. ], batch size: 8, lr: 2.22e-03 +2024-08-06 23:03:59,763 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 23:04:58,940 INFO [trainer.py:765] (1/8) Epoch 39, batch 100, train_loss[loss=3.229, NarTop10Accuracy=0.6741, over 7188.00 frames. ], tot_loss[loss=3, NarTop10Accuracy=0.7263, over 2381.99 frames. ], batch size: 31, lr: 2.19e-03 +2024-08-06 23:05:03,467 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 23:05:11,563 INFO [trainer.py:811] (1/8) Epoch 39, validation: loss=2.9, NarTop10Accuracy=0.7445, over 1905321.00 frames. +2024-08-06 23:05:11,564 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30116MB +2024-08-06 23:05:12,137 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 2.316e+02 2.500e+02 2.688e+02 4.683e+02, threshold=5.001e+02, percent-clipped=0.0 +2024-08-06 23:05:40,163 INFO [trainer.py:765] (1/8) Epoch 39, batch 200, train_loss[loss=2.737, NarTop10Accuracy=0.7811, over 6915.00 frames. ], tot_loss[loss=3.003, NarTop10Accuracy=0.7256, over 3866.31 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 23:06:17,293 INFO [trainer.py:765] (1/8) Epoch 39, batch 300, train_loss[loss=3.059, NarTop10Accuracy=0.7202, over 7155.00 frames. ], tot_loss[loss=2.988, NarTop10Accuracy=0.7281, over 4638.63 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 23:06:48,275 INFO [trainer.py:765] (1/8) Epoch 39, batch 400, train_loss[loss=2.889, NarTop10Accuracy=0.7537, over 5250.00 frames. ], tot_loss[loss=2.991, NarTop10Accuracy=0.7274, over 5080.26 frames. ], batch size: 7, lr: 2.19e-03 +2024-08-06 23:07:19,175 INFO [trainer.py:765] (1/8) Epoch 39, batch 500, train_loss[loss=3.215, NarTop10Accuracy=0.6754, over 6093.00 frames. ], tot_loss[loss=2.995, NarTop10Accuracy=0.7262, over 5368.60 frames. ], batch size: 11, lr: 2.19e-03 +2024-08-06 23:07:52,563 INFO [trainer.py:765] (1/8) Epoch 39, batch 600, train_loss[loss=2.841, NarTop10Accuracy=0.7618, over 5745.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7225, over 5637.39 frames. ], batch size: 9, lr: 2.19e-03 +2024-08-06 23:08:33,695 INFO [trainer.py:765] (1/8) Epoch 39, batch 700, train_loss[loss=3.121, NarTop10Accuracy=0.7037, over 4203.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7209, over 5705.41 frames. ], batch size: 5, lr: 2.18e-03 +2024-08-06 23:09:05,861 INFO [trainer.py:765] (1/8) Epoch 39, batch 800, train_loss[loss=2.754, NarTop10Accuracy=0.7759, over 4257.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7202, over 5765.94 frames. ], batch size: 5, lr: 2.18e-03 +2024-08-06 23:09:38,865 INFO [trainer.py:765] (1/8) Epoch 39, batch 900, train_loss[loss=3.321, NarTop10Accuracy=0.6568, over 6648.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7204, over 5784.69 frames. ], batch size: 14, lr: 2.18e-03 +2024-08-06 23:10:18,460 INFO [trainer.py:765] (1/8) Epoch 39, batch 1000, train_loss[loss=2.847, NarTop10Accuracy=0.7511, over 6411.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7223, over 5884.64 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 23:10:53,934 INFO [trainer.py:765] (1/8) Epoch 39, batch 1100, train_loss[loss=2.858, NarTop10Accuracy=0.7592, over 6951.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7194, over 5919.87 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 23:11:27,822 INFO [trainer.py:765] (1/8) Epoch 39, batch 1200, train_loss[loss=2.93, NarTop10Accuracy=0.7359, over 7560.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7207, over 5918.66 frames. ], batch size: 31, lr: 2.18e-03 +2024-08-06 23:12:07,253 INFO [trainer.py:765] (1/8) Epoch 39, batch 1300, train_loss[loss=2.749, NarTop10Accuracy=0.7686, over 5172.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7218, over 5998.24 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:12:39,302 INFO [trainer.py:765] (1/8) Epoch 39, batch 1400, train_loss[loss=3.042, NarTop10Accuracy=0.718, over 6075.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7215, over 6024.36 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 23:13:09,756 INFO [trainer.py:765] (1/8) Epoch 39, batch 1500, train_loss[loss=3.552, NarTop10Accuracy=0.6169, over 6252.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7216, over 5954.91 frames. ], batch size: 50, lr: 2.18e-03 +2024-08-06 23:13:37,586 INFO [trainer.py:765] (1/8) Epoch 39, batch 1600, train_loss[loss=2.873, NarTop10Accuracy=0.7518, over 7278.00 frames. ], tot_loss[loss=3.003, NarTop10Accuracy=0.7248, over 5938.00 frames. ], batch size: 23, lr: 2.17e-03 +2024-08-06 23:14:04,220 INFO [trainer.py:765] (1/8) Epoch 39, batch 1700, train_loss[loss=3.448, NarTop10Accuracy=0.6306, over 6198.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7182, over 5914.15 frames. ], batch size: 13, lr: 2.17e-03 +2024-08-06 23:14:30,767 INFO [trainer.py:765] (1/8) Epoch 39, batch 1800, train_loss[loss=2.828, NarTop10Accuracy=0.7639, over 7185.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7181, over 5975.55 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:57,180 INFO [trainer.py:765] (1/8) Epoch 39, batch 1900, train_loss[loss=2.951, NarTop10Accuracy=0.7409, over 5865.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7161, over 6008.73 frames. ], batch size: 53, lr: 2.17e-03 +2024-08-06 23:15:22,751 INFO [trainer.py:765] (1/8) Epoch 39, batch 2000, train_loss[loss=3.354, NarTop10Accuracy=0.6579, over 5220.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7205, over 5988.24 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 23:15:48,060 INFO [trainer.py:765] (1/8) Epoch 39, batch 2100, train_loss[loss=3.191, NarTop10Accuracy=0.6839, over 3969.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7206, over 5972.63 frames. ], batch size: 4, lr: 2.17e-03 +2024-08-06 23:15:51,871 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 23:16:02,156 INFO [trainer.py:811] (1/8) Epoch 39, validation: loss=2.85, NarTop10Accuracy=0.7552, over 1905321.00 frames. +2024-08-06 23:16:02,156 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30116MB +2024-08-06 23:16:02,645 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.369e+02 2.530e+02 2.720e+02 6.127e+02, threshold=5.059e+02, percent-clipped=0.2 +2024-08-06 23:16:23,652 INFO [trainer.py:765] (1/8) Epoch 39, batch 2200, train_loss[loss=3.224, NarTop10Accuracy=0.6782, over 7119.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7206, over 6009.57 frames. ], batch size: 31, lr: 2.17e-03 +2024-08-06 23:16:48,847 INFO [trainer.py:765] (1/8) Epoch 39, batch 2300, train_loss[loss=2.669, NarTop10Accuracy=0.7945, over 5700.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7188, over 6017.63 frames. ], batch size: 9, lr: 2.17e-03 +2024-08-06 23:17:13,136 INFO [trainer.py:765] (1/8) Epoch 39, batch 2400, train_loss[loss=2.836, NarTop10Accuracy=0.7596, over 5178.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7232, over 5763.36 frames. ], batch size: 7, lr: 2.17e-03 +2024-08-06 23:17:36,712 INFO [trainer.py:765] (1/8) Epoch 39, batch 2500, train_loss[loss=3.115, NarTop10Accuracy=0.7056, over 5016.00 frames. ], tot_loss[loss=2.989, NarTop10Accuracy=0.727, over 5484.33 frames. ], batch size: 7, lr: 2.16e-03 +2024-08-06 23:17:56,631 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 23:18:48,946 INFO [trainer.py:765] (1/8) Epoch 40, batch 100, train_loss[loss=3.065, NarTop10Accuracy=0.7184, over 7212.00 frames. ], tot_loss[loss=2.998, NarTop10Accuracy=0.7262, over 2350.66 frames. ], batch size: 31, lr: 2.14e-03 +2024-08-06 23:19:23,035 INFO [trainer.py:765] (1/8) Epoch 40, batch 200, train_loss[loss=2.751, NarTop10Accuracy=0.7752, over 6765.00 frames. ], tot_loss[loss=2.992, NarTop10Accuracy=0.7268, over 3838.66 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 23:19:57,188 INFO [trainer.py:765] (1/8) Epoch 40, batch 300, train_loss[loss=2.772, NarTop10Accuracy=0.7693, over 6993.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7225, over 4663.44 frames. ], batch size: 22, lr: 2.13e-03 +2024-08-06 23:20:30,182 INFO [trainer.py:765] (1/8) Epoch 40, batch 400, train_loss[loss=2.814, NarTop10Accuracy=0.766, over 5238.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7227, over 5106.63 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 23:21:00,251 INFO [trainer.py:765] (1/8) Epoch 40, batch 500, train_loss[loss=2.749, NarTop10Accuracy=0.7735, over 6126.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7233, over 5377.65 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 23:21:34,882 INFO [trainer.py:765] (1/8) Epoch 40, batch 600, train_loss[loss=2.977, NarTop10Accuracy=0.7332, over 5751.00 frames. ], tot_loss[loss=2.996, NarTop10Accuracy=0.7261, over 5639.65 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 23:22:11,097 INFO [trainer.py:765] (1/8) Epoch 40, batch 700, train_loss[loss=2.851, NarTop10Accuracy=0.7473, over 5010.00 frames. ], tot_loss[loss=3.006, NarTop10Accuracy=0.7239, over 5722.59 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:22:44,754 INFO [trainer.py:765] (1/8) Epoch 40, batch 800, train_loss[loss=2.669, NarTop10Accuracy=0.794, over 5025.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.722, over 5779.67 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:23:16,636 INFO [trainer.py:765] (1/8) Epoch 40, batch 900, train_loss[loss=3.358, NarTop10Accuracy=0.6545, over 6606.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7214, over 5805.59 frames. ], batch size: 14, lr: 2.13e-03 +2024-08-06 23:23:55,591 INFO [trainer.py:765] (1/8) Epoch 40, batch 1000, train_loss[loss=3.432, NarTop10Accuracy=0.6425, over 6705.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7192, over 5906.34 frames. ], batch size: 14, lr: 2.13e-03 +2024-08-06 23:24:30,208 INFO [trainer.py:765] (1/8) Epoch 40, batch 1100, train_loss[loss=2.733, NarTop10Accuracy=0.7781, over 6999.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7202, over 5934.80 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 23:25:03,090 INFO [trainer.py:765] (1/8) Epoch 40, batch 1200, train_loss[loss=2.938, NarTop10Accuracy=0.7295, over 7176.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7217, over 5945.80 frames. ], batch size: 31, lr: 2.12e-03 +2024-08-06 23:25:41,843 INFO [trainer.py:765] (1/8) Epoch 40, batch 1300, train_loss[loss=2.829, NarTop10Accuracy=0.7619, over 5163.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7234, over 6006.28 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 23:26:13,385 INFO [trainer.py:765] (1/8) Epoch 40, batch 1400, train_loss[loss=2.863, NarTop10Accuracy=0.7556, over 5994.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.721, over 6025.68 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 23:26:43,377 INFO [trainer.py:765] (1/8) Epoch 40, batch 1500, train_loss[loss=3.386, NarTop10Accuracy=0.644, over 5655.00 frames. ], tot_loss[loss=3.005, NarTop10Accuracy=0.7244, over 5967.31 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:26:54,419 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 23:27:02,676 INFO [trainer.py:811] (1/8) Epoch 40, validation: loss=2.86, NarTop10Accuracy=0.7522, over 1905321.00 frames. +2024-08-06 23:27:02,677 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30116MB +2024-08-06 23:27:03,156 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.329e+02 2.511e+02 2.723e+02 1.241e+03, threshold=5.022e+02, percent-clipped=0.2 +2024-08-06 23:27:19,381 INFO [trainer.py:765] (1/8) Epoch 40, batch 1600, train_loss[loss=3.044, NarTop10Accuracy=0.7196, over 7164.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7227, over 5936.13 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:27:46,056 INFO [trainer.py:765] (1/8) Epoch 40, batch 1700, train_loss[loss=3.362, NarTop10Accuracy=0.6507, over 6606.00 frames. ], tot_loss[loss=3.011, NarTop10Accuracy=0.7227, over 5913.10 frames. ], batch size: 14, lr: 2.12e-03 +2024-08-06 23:28:12,578 INFO [trainer.py:765] (1/8) Epoch 40, batch 1800, train_loss[loss=2.914, NarTop10Accuracy=0.7408, over 7299.00 frames. ], tot_loss[loss=2.995, NarTop10Accuracy=0.7264, over 5986.41 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:28:38,908 INFO [trainer.py:765] (1/8) Epoch 40, batch 1900, train_loss[loss=3.249, NarTop10Accuracy=0.6772, over 6153.00 frames. ], tot_loss[loss=3, NarTop10Accuracy=0.7254, over 6031.59 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:04,444 INFO [trainer.py:765] (1/8) Epoch 40, batch 2000, train_loss[loss=3.534, NarTop10Accuracy=0.6074, over 6039.00 frames. ], tot_loss[loss=3.004, NarTop10Accuracy=0.7243, over 5988.20 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:29,749 INFO [trainer.py:765] (1/8) Epoch 40, batch 2100, train_loss[loss=2.974, NarTop10Accuracy=0.7417, over 4005.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7236, over 5973.04 frames. ], batch size: 4, lr: 2.11e-03 +2024-08-06 23:29:54,938 INFO [trainer.py:765] (1/8) Epoch 40, batch 2200, train_loss[loss=3.165, NarTop10Accuracy=0.6909, over 7509.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7206, over 6004.97 frames. ], batch size: 32, lr: 2.11e-03 +2024-08-06 23:30:20,012 INFO [trainer.py:765] (1/8) Epoch 40, batch 2300, train_loss[loss=2.933, NarTop10Accuracy=0.7422, over 5757.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7188, over 6018.47 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 23:30:44,295 INFO [trainer.py:765] (1/8) Epoch 40, batch 2400, train_loss[loss=2.925, NarTop10Accuracy=0.7431, over 5070.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7207, over 5770.23 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:07,738 INFO [trainer.py:765] (1/8) Epoch 40, batch 2500, train_loss[loss=2.992, NarTop10Accuracy=0.7198, over 5283.00 frames. ], tot_loss[loss=2.989, NarTop10Accuracy=0.7272, over 5470.32 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:27,218 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 23:31:27,221 INFO [trainer.py:1069] (1/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-14-23-41-2 b/libritts-r/log/log-train-2024-08-06-14-23-41-2 new file mode 100644 index 0000000000000000000000000000000000000000..665aa9edd812d3ebb4f3af8f8d5d38e93237c823 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-14-23-41-2 @@ -0,0 +1,1260 @@ +2024-08-06 14:23:41,727 INFO [trainer.py:870] (2/8) Training started +2024-08-06 14:23:41,728 INFO [trainer.py:889] (2/8) Device: cuda:2 +2024-08-06 14:23:41,728 INFO [trainer.py:890] (2/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 100000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 14:23:41,728 INFO [trainer.py:892] (2/8) About to create model +2024-08-06 14:23:42,446 INFO [trainer.py:899] (2/8) Number of model parameters: 367386628 +2024-08-06 14:23:42,446 INFO [checkpoint.py:112] (2/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 14:23:47,398 INFO [trainer.py:914] (2/8) Using DDP +2024-08-06 14:23:49,639 INFO [datamodule.py:427] (2/8) About to get train cuts +2024-08-06 14:23:49,641 INFO [datamodule.py:434] (2/8) About to get dev cuts +2024-08-06 14:23:49,642 INFO [datamodule.py:292] (2/8) Disable SpecAugment +2024-08-06 14:23:49,643 INFO [datamodule.py:294] (2/8) About to create train dataset +2024-08-06 14:23:49,643 INFO [datamodule.py:323] (2/8) Using DynamicBucketingSampler +2024-08-06 14:23:50,257 INFO [datamodule.py:344] (2/8) About to create train dataloader +2024-08-06 14:23:50,257 INFO [datamodule.py:367] (2/8) About to create dev dataset +2024-08-06 14:23:50,585 INFO [datamodule.py:388] (2/8) About to create dev dataloader +2024-08-06 14:24:38,249 INFO [trainer.py:765] (2/8) Epoch 1, batch 100, train_loss[loss=105.9, NarTop10Accuracy=0.02382, over 7296.00 frames. ], tot_loss[loss=73.84, NarTop10Accuracy=0.04677, over 2381.79 frames. ], batch size: 31, lr: 2.25e-02 +2024-08-06 14:25:07,518 INFO [trainer.py:765] (2/8) Epoch 1, batch 200, train_loss[loss=129.7, NarTop10Accuracy=0.01297, over 6786.00 frames. ], tot_loss[loss=97.4, NarTop10Accuracy=0.04128, over 3860.02 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 14:25:37,111 INFO [trainer.py:765] (2/8) Epoch 1, batch 300, train_loss[loss=113.5, NarTop10Accuracy=0.02626, over 7110.00 frames. ], tot_loss[loss=85.22, NarTop10Accuracy=0.04226, over 4663.73 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 14:26:07,483 INFO [trainer.py:765] (2/8) Epoch 1, batch 400, train_loss[loss=48.85, NarTop10Accuracy=0.02065, over 5205.00 frames. ], tot_loss[loss=67.92, NarTop10Accuracy=0.04664, over 5094.86 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 14:26:35,358 INFO [trainer.py:765] (2/8) Epoch 1, batch 500, train_loss[loss=14.61, NarTop10Accuracy=0.02276, over 6099.00 frames. ], tot_loss[loss=49.19, NarTop10Accuracy=0.04923, over 5350.99 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 14:27:04,001 INFO [trainer.py:765] (2/8) Epoch 1, batch 600, train_loss[loss=6.125, NarTop10Accuracy=0.201, over 5574.00 frames. ], tot_loss[loss=33.5, NarTop10Accuracy=0.05448, over 5625.48 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 14:27:39,490 INFO [trainer.py:765] (2/8) Epoch 1, batch 700, train_loss[loss=6.785, NarTop10Accuracy=0.1159, over 5076.00 frames. ], tot_loss[loss=23.45, NarTop10Accuracy=0.0634, over 5696.43 frames. ], batch size: 6, lr: 2.99e-02 +2024-08-06 14:28:08,831 INFO [trainer.py:765] (2/8) Epoch 1, batch 800, train_loss[loss=6.402, NarTop10Accuracy=0.132, over 4344.00 frames. ], tot_loss[loss=17.17, NarTop10Accuracy=0.0844, over 5771.42 frames. ], batch size: 5, lr: 2.98e-02 +2024-08-06 14:28:36,757 INFO [trainer.py:765] (2/8) Epoch 1, batch 900, train_loss[loss=5.68, NarTop10Accuracy=0.1962, over 6396.00 frames. ], tot_loss[loss=12.8, NarTop10Accuracy=0.1132, over 5780.55 frames. ], batch size: 13, lr: 2.98e-02 +2024-08-06 14:29:12,587 INFO [trainer.py:765] (2/8) Epoch 1, batch 1000, train_loss[loss=5.633, NarTop10Accuracy=0.2054, over 6135.00 frames. ], tot_loss[loss=10.11, NarTop10Accuracy=0.1352, over 5886.64 frames. ], batch size: 13, lr: 2.97e-02 +2024-08-06 14:29:42,825 INFO [trainer.py:765] (2/8) Epoch 1, batch 1100, train_loss[loss=5.767, NarTop10Accuracy=0.1733, over 7020.00 frames. ], tot_loss[loss=8.415, NarTop10Accuracy=0.1539, over 5922.01 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 14:30:11,468 INFO [trainer.py:765] (2/8) Epoch 1, batch 1200, train_loss[loss=5.946, NarTop10Accuracy=0.168, over 7062.00 frames. ], tot_loss[loss=7.35, NarTop10Accuracy=0.1716, over 5913.77 frames. ], batch size: 31, lr: 2.96e-02 +2024-08-06 14:30:48,748 INFO [trainer.py:765] (2/8) Epoch 1, batch 1300, train_loss[loss=5.254, NarTop10Accuracy=0.2807, over 5016.00 frames. ], tot_loss[loss=6.684, NarTop10Accuracy=0.1861, over 5981.32 frames. ], batch size: 6, lr: 2.95e-02 +2024-08-06 14:31:18,144 INFO [trainer.py:765] (2/8) Epoch 1, batch 1400, train_loss[loss=5.694, NarTop10Accuracy=0.1826, over 6111.00 frames. ], tot_loss[loss=6.256, NarTop10Accuracy=0.1961, over 5997.68 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 14:31:46,027 INFO [trainer.py:765] (2/8) Epoch 1, batch 1500, train_loss[loss=5.798, NarTop10Accuracy=0.1867, over 6036.00 frames. ], tot_loss[loss=5.968, NarTop10Accuracy=0.2095, over 5966.11 frames. ], batch size: 50, lr: 2.94e-02 +2024-08-06 14:32:13,692 INFO [trainer.py:765] (2/8) Epoch 1, batch 1600, train_loss[loss=5.579, NarTop10Accuracy=0.2149, over 7053.00 frames. ], tot_loss[loss=5.784, NarTop10Accuracy=0.219, over 5944.76 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 14:32:40,199 INFO [trainer.py:765] (2/8) Epoch 1, batch 1700, train_loss[loss=5.472, NarTop10Accuracy=0.2443, over 6723.00 frames. ], tot_loss[loss=5.664, NarTop10Accuracy=0.2258, over 5918.97 frames. ], batch size: 14, lr: 2.92e-02 +2024-08-06 14:33:06,500 INFO [trainer.py:765] (2/8) Epoch 1, batch 1800, train_loss[loss=5.539, NarTop10Accuracy=0.2244, over 7185.00 frames. ], tot_loss[loss=5.576, NarTop10Accuracy=0.2327, over 5974.83 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 14:33:32,625 INFO [trainer.py:765] (2/8) Epoch 1, batch 1900, train_loss[loss=5.709, NarTop10Accuracy=0.1936, over 6222.00 frames. ], tot_loss[loss=5.503, NarTop10Accuracy=0.2419, over 6025.19 frames. ], batch size: 55, lr: 2.90e-02 +2024-08-06 14:33:58,015 INFO [trainer.py:765] (2/8) Epoch 1, batch 2000, train_loss[loss=5.51, NarTop10Accuracy=0.2418, over 6603.00 frames. ], tot_loss[loss=5.442, NarTop10Accuracy=0.2507, over 6000.52 frames. ], batch size: 52, lr: 2.89e-02 +2024-08-06 14:33:58,016 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 14:34:06,103 INFO [trainer.py:811] (2/8) Epoch 1, validation: loss=5.397, NarTop10Accuracy=0.2581, over 1905321.00 frames. +2024-08-06 14:34:06,104 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 26698MB +2024-08-06 14:34:06,612 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 4.749e+01 2.278e+02 7.300e+02 1.664e+04 7.177e+05, threshold=1.460e+03, percent-clipped=0.0 +2024-08-06 14:34:32,062 INFO [trainer.py:765] (2/8) Epoch 1, batch 2100, train_loss[loss=5.372, NarTop10Accuracy=0.2644, over 4869.00 frames. ], tot_loss[loss=5.383, NarTop10Accuracy=0.2605, over 5977.59 frames. ], batch size: 5, lr: 2.88e-02 +2024-08-06 14:34:57,304 INFO [trainer.py:765] (2/8) Epoch 1, batch 2200, train_loss[loss=5.348, NarTop10Accuracy=0.268, over 7281.00 frames. ], tot_loss[loss=5.344, NarTop10Accuracy=0.2661, over 6005.99 frames. ], batch size: 31, lr: 2.87e-02 +2024-08-06 14:35:22,456 INFO [trainer.py:765] (2/8) Epoch 1, batch 2300, train_loss[loss=5.355, NarTop10Accuracy=0.2604, over 5811.00 frames. ], tot_loss[loss=5.333, NarTop10Accuracy=0.2676, over 6023.17 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 14:35:46,816 INFO [trainer.py:765] (2/8) Epoch 1, batch 2400, train_loss[loss=5.161, NarTop10Accuracy=0.2962, over 5100.00 frames. ], tot_loss[loss=5.286, NarTop10Accuracy=0.2763, over 5772.42 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 14:36:10,408 INFO [trainer.py:765] (2/8) Epoch 1, batch 2500, train_loss[loss=4.773, NarTop10Accuracy=0.3783, over 5310.00 frames. ], tot_loss[loss=5.22, NarTop10Accuracy=0.2884, over 5465.53 frames. ], batch size: 7, lr: 2.84e-02 +2024-08-06 14:36:31,180 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 14:37:29,668 INFO [trainer.py:765] (2/8) Epoch 2, batch 100, train_loss[loss=5.064, NarTop10Accuracy=0.3203, over 7248.00 frames. ], tot_loss[loss=5.183, NarTop10Accuracy=0.2967, over 2367.36 frames. ], batch size: 31, lr: 2.77e-02 +2024-08-06 14:38:10,015 INFO [trainer.py:765] (2/8) Epoch 2, batch 200, train_loss[loss=5.106, NarTop10Accuracy=0.3178, over 6894.00 frames. ], tot_loss[loss=5.148, NarTop10Accuracy=0.3024, over 3843.92 frames. ], batch size: 17, lr: 2.76e-02 +2024-08-06 14:38:38,297 INFO [trainer.py:765] (2/8) Epoch 2, batch 300, train_loss[loss=5.21, NarTop10Accuracy=0.2845, over 7362.00 frames. ], tot_loss[loss=5.139, NarTop10Accuracy=0.3032, over 4662.87 frames. ], batch size: 23, lr: 2.75e-02 +2024-08-06 14:39:06,998 INFO [trainer.py:765] (2/8) Epoch 2, batch 400, train_loss[loss=4.933, NarTop10Accuracy=0.3372, over 5157.00 frames. ], tot_loss[loss=5.112, NarTop10Accuracy=0.3081, over 5101.29 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 14:39:46,119 INFO [trainer.py:765] (2/8) Epoch 2, batch 500, train_loss[loss=4.893, NarTop10Accuracy=0.3482, over 6192.00 frames. ], tot_loss[loss=5.069, NarTop10Accuracy=0.3163, over 5377.24 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 14:40:15,083 INFO [trainer.py:765] (2/8) Epoch 2, batch 600, train_loss[loss=4.94, NarTop10Accuracy=0.3521, over 5772.00 frames. ], tot_loss[loss=5.044, NarTop10Accuracy=0.321, over 5648.45 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 14:40:44,589 INFO [trainer.py:765] (2/8) Epoch 2, batch 700, train_loss[loss=4.832, NarTop10Accuracy=0.3576, over 5094.00 frames. ], tot_loss[loss=5.024, NarTop10Accuracy=0.3245, over 5727.07 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 14:41:24,513 INFO [trainer.py:765] (2/8) Epoch 2, batch 800, train_loss[loss=5.213, NarTop10Accuracy=0.2888, over 4248.00 frames. ], tot_loss[loss=5.014, NarTop10Accuracy=0.3262, over 5775.62 frames. ], batch size: 5, lr: 2.69e-02 +2024-08-06 14:41:54,404 INFO [trainer.py:765] (2/8) Epoch 2, batch 900, train_loss[loss=4.723, NarTop10Accuracy=0.3814, over 6540.00 frames. ], tot_loss[loss=4.982, NarTop10Accuracy=0.3324, over 5802.34 frames. ], batch size: 14, lr: 2.68e-02 +2024-08-06 14:42:23,901 INFO [trainer.py:765] (2/8) Epoch 2, batch 1000, train_loss[loss=4.783, NarTop10Accuracy=0.3749, over 6732.00 frames. ], tot_loss[loss=4.952, NarTop10Accuracy=0.3379, over 5898.02 frames. ], batch size: 14, lr: 2.66e-02 +2024-08-06 14:42:56,254 INFO [trainer.py:765] (2/8) Epoch 2, batch 1100, train_loss[loss=4.935, NarTop10Accuracy=0.3404, over 6933.00 frames. ], tot_loss[loss=4.932, NarTop10Accuracy=0.3415, over 5939.91 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 14:43:35,188 INFO [trainer.py:765] (2/8) Epoch 2, batch 1200, train_loss[loss=4.861, NarTop10Accuracy=0.3565, over 7323.00 frames. ], tot_loss[loss=4.909, NarTop10Accuracy=0.3462, over 5942.34 frames. ], batch size: 31, lr: 2.64e-02 +2024-08-06 14:44:04,347 INFO [trainer.py:765] (2/8) Epoch 2, batch 1300, train_loss[loss=4.952, NarTop10Accuracy=0.3437, over 5115.00 frames. ], tot_loss[loss=4.871, NarTop10Accuracy=0.353, over 5993.17 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 14:44:33,728 INFO [trainer.py:765] (2/8) Epoch 2, batch 1400, train_loss[loss=5.119, NarTop10Accuracy=0.3063, over 6135.00 frames. ], tot_loss[loss=4.854, NarTop10Accuracy=0.3562, over 6013.65 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 14:44:40,442 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 14:44:48,506 INFO [trainer.py:811] (2/8) Epoch 2, validation: loss=4.808, NarTop10Accuracy=0.3642, over 1905321.00 frames. +2024-08-06 14:44:48,506 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27019MB +2024-08-06 14:44:49,204 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 6.328e+01 1.178e+02 1.410e+02 1.789e+02 6.269e+02, threshold=2.821e+02, percent-clipped=0.0 +2024-08-06 14:45:09,806 INFO [trainer.py:765] (2/8) Epoch 2, batch 1500, train_loss[loss=4.715, NarTop10Accuracy=0.3803, over 6348.00 frames. ], tot_loss[loss=4.826, NarTop10Accuracy=0.3618, over 5953.46 frames. ], batch size: 50, lr: 2.60e-02 +2024-08-06 14:45:37,659 INFO [trainer.py:765] (2/8) Epoch 2, batch 1600, train_loss[loss=4.74, NarTop10Accuracy=0.3797, over 6996.00 frames. ], tot_loss[loss=4.799, NarTop10Accuracy=0.3668, over 5926.73 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 14:46:04,368 INFO [trainer.py:765] (2/8) Epoch 2, batch 1700, train_loss[loss=4.924, NarTop10Accuracy=0.3345, over 6618.00 frames. ], tot_loss[loss=4.795, NarTop10Accuracy=0.3673, over 5916.99 frames. ], batch size: 14, lr: 2.58e-02 +2024-08-06 14:46:31,034 INFO [trainer.py:765] (2/8) Epoch 2, batch 1800, train_loss[loss=4.717, NarTop10Accuracy=0.3814, over 7230.00 frames. ], tot_loss[loss=4.777, NarTop10Accuracy=0.3708, over 5990.76 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 14:46:57,532 INFO [trainer.py:765] (2/8) Epoch 2, batch 1900, train_loss[loss=4.814, NarTop10Accuracy=0.3632, over 6186.00 frames. ], tot_loss[loss=4.759, NarTop10Accuracy=0.3744, over 6009.76 frames. ], batch size: 50, lr: 2.55e-02 +2024-08-06 14:47:23,234 INFO [trainer.py:765] (2/8) Epoch 2, batch 2000, train_loss[loss=4.783, NarTop10Accuracy=0.3749, over 6000.00 frames. ], tot_loss[loss=4.731, NarTop10Accuracy=0.3795, over 5984.04 frames. ], batch size: 51, lr: 2.54e-02 +2024-08-06 14:47:48,589 INFO [trainer.py:765] (2/8) Epoch 2, batch 2100, train_loss[loss=4.766, NarTop10Accuracy=0.3765, over 4830.00 frames. ], tot_loss[loss=4.719, NarTop10Accuracy=0.3816, over 5974.86 frames. ], batch size: 5, lr: 2.53e-02 +2024-08-06 14:48:13,765 INFO [trainer.py:765] (2/8) Epoch 2, batch 2200, train_loss[loss=4.762, NarTop10Accuracy=0.3754, over 7140.00 frames. ], tot_loss[loss=4.685, NarTop10Accuracy=0.3882, over 6017.07 frames. ], batch size: 31, lr: 2.51e-02 +2024-08-06 14:48:38,951 INFO [trainer.py:765] (2/8) Epoch 2, batch 2300, train_loss[loss=4.663, NarTop10Accuracy=0.3922, over 5703.00 frames. ], tot_loss[loss=4.687, NarTop10Accuracy=0.388, over 6021.88 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 14:49:03,320 INFO [trainer.py:765] (2/8) Epoch 2, batch 2400, train_loss[loss=4.572, NarTop10Accuracy=0.4112, over 5121.00 frames. ], tot_loss[loss=4.651, NarTop10Accuracy=0.395, over 5760.93 frames. ], batch size: 7, lr: 2.49e-02 +2024-08-06 14:49:26,867 INFO [trainer.py:765] (2/8) Epoch 2, batch 2500, train_loss[loss=4.654, NarTop10Accuracy=0.3908, over 5244.00 frames. ], tot_loss[loss=4.622, NarTop10Accuracy=0.4007, over 5470.90 frames. ], batch size: 7, lr: 2.48e-02 +2024-08-06 14:49:46,767 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 14:50:51,117 INFO [trainer.py:765] (2/8) Epoch 3, batch 100, train_loss[loss=4.668, NarTop10Accuracy=0.3934, over 7326.00 frames. ], tot_loss[loss=4.576, NarTop10Accuracy=0.4099, over 2360.61 frames. ], batch size: 31, lr: 2.36e-02 +2024-08-06 14:51:20,388 INFO [trainer.py:765] (2/8) Epoch 3, batch 200, train_loss[loss=4.653, NarTop10Accuracy=0.4051, over 6726.00 frames. ], tot_loss[loss=4.545, NarTop10Accuracy=0.416, over 3837.44 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 14:51:50,954 INFO [trainer.py:765] (2/8) Epoch 3, batch 300, train_loss[loss=4.691, NarTop10Accuracy=0.3874, over 7452.00 frames. ], tot_loss[loss=4.52, NarTop10Accuracy=0.4209, over 4638.88 frames. ], batch size: 24, lr: 2.33e-02 +2024-08-06 14:52:32,359 INFO [trainer.py:765] (2/8) Epoch 3, batch 400, train_loss[loss=4.491, NarTop10Accuracy=0.4303, over 5124.00 frames. ], tot_loss[loss=4.495, NarTop10Accuracy=0.4258, over 5093.83 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 14:53:00,680 INFO [trainer.py:765] (2/8) Epoch 3, batch 500, train_loss[loss=4.336, NarTop10Accuracy=0.4529, over 6084.00 frames. ], tot_loss[loss=4.483, NarTop10Accuracy=0.4275, over 5400.75 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 14:53:29,552 INFO [trainer.py:765] (2/8) Epoch 3, batch 600, train_loss[loss=4.397, NarTop10Accuracy=0.445, over 5643.00 frames. ], tot_loss[loss=4.468, NarTop10Accuracy=0.4306, over 5663.01 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 14:54:12,466 INFO [trainer.py:765] (2/8) Epoch 3, batch 700, train_loss[loss=4.317, NarTop10Accuracy=0.4677, over 4362.00 frames. ], tot_loss[loss=4.446, NarTop10Accuracy=0.4351, over 5733.28 frames. ], batch size: 5, lr: 2.29e-02 +2024-08-06 14:54:44,785 INFO [trainer.py:765] (2/8) Epoch 3, batch 800, train_loss[loss=3.943, NarTop10Accuracy=0.5424, over 5190.00 frames. ], tot_loss[loss=4.42, NarTop10Accuracy=0.4404, over 5784.32 frames. ], batch size: 6, lr: 2.28e-02 +2024-08-06 14:54:58,684 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 14:55:06,655 INFO [trainer.py:811] (2/8) Epoch 3, validation: loss=4.276, NarTop10Accuracy=0.4689, over 1905321.00 frames. +2024-08-06 14:55:06,656 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27019MB +2024-08-06 14:55:07,183 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 8.443e+01 1.396e+02 1.639e+02 2.017e+02 7.124e+02, threshold=3.277e+02, percent-clipped=4.5 +2024-08-06 14:55:21,052 INFO [trainer.py:765] (2/8) Epoch 3, batch 900, train_loss[loss=4.042, NarTop10Accuracy=0.5151, over 6729.00 frames. ], tot_loss[loss=4.386, NarTop10Accuracy=0.4473, over 5824.38 frames. ], batch size: 14, lr: 2.26e-02 +2024-08-06 14:56:04,958 INFO [trainer.py:765] (2/8) Epoch 3, batch 1000, train_loss[loss=4.403, NarTop10Accuracy=0.4427, over 6513.00 frames. ], tot_loss[loss=4.373, NarTop10Accuracy=0.4496, over 5912.61 frames. ], batch size: 14, lr: 2.25e-02 +2024-08-06 14:56:37,300 INFO [trainer.py:765] (2/8) Epoch 3, batch 1100, train_loss[loss=4.483, NarTop10Accuracy=0.4238, over 6849.00 frames. ], tot_loss[loss=4.349, NarTop10Accuracy=0.4546, over 5930.93 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 14:57:06,377 INFO [trainer.py:765] (2/8) Epoch 3, batch 1200, train_loss[loss=4.36, NarTop10Accuracy=0.4487, over 7464.00 frames. ], tot_loss[loss=4.332, NarTop10Accuracy=0.4576, over 5927.51 frames. ], batch size: 31, lr: 2.23e-02 +2024-08-06 14:57:51,630 INFO [trainer.py:765] (2/8) Epoch 3, batch 1300, train_loss[loss=4.333, NarTop10Accuracy=0.4592, over 5100.00 frames. ], tot_loss[loss=4.308, NarTop10Accuracy=0.4625, over 5997.09 frames. ], batch size: 6, lr: 2.22e-02 +2024-08-06 14:58:22,900 INFO [trainer.py:765] (2/8) Epoch 3, batch 1400, train_loss[loss=4.303, NarTop10Accuracy=0.4694, over 6237.00 frames. ], tot_loss[loss=4.298, NarTop10Accuracy=0.4642, over 6004.07 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 14:58:50,855 INFO [trainer.py:765] (2/8) Epoch 3, batch 1500, train_loss[loss=4.374, NarTop10Accuracy=0.4456, over 6144.00 frames. ], tot_loss[loss=4.273, NarTop10Accuracy=0.469, over 5942.15 frames. ], batch size: 51, lr: 2.20e-02 +2024-08-06 14:59:18,715 INFO [trainer.py:765] (2/8) Epoch 3, batch 1600, train_loss[loss=4.065, NarTop10Accuracy=0.5123, over 7098.00 frames. ], tot_loss[loss=4.251, NarTop10Accuracy=0.4734, over 5900.94 frames. ], batch size: 23, lr: 2.19e-02 +2024-08-06 14:59:45,952 INFO [trainer.py:765] (2/8) Epoch 3, batch 1700, train_loss[loss=4.079, NarTop10Accuracy=0.51, over 6651.00 frames. ], tot_loss[loss=4.228, NarTop10Accuracy=0.4779, over 5887.70 frames. ], batch size: 14, lr: 2.18e-02 +2024-08-06 15:00:12,498 INFO [trainer.py:765] (2/8) Epoch 3, batch 1800, train_loss[loss=4.021, NarTop10Accuracy=0.5186, over 7029.00 frames. ], tot_loss[loss=4.207, NarTop10Accuracy=0.4816, over 5959.05 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 15:00:38,948 INFO [trainer.py:765] (2/8) Epoch 3, batch 1900, train_loss[loss=4.671, NarTop10Accuracy=0.386, over 6285.00 frames. ], tot_loss[loss=4.187, NarTop10Accuracy=0.486, over 6018.85 frames. ], batch size: 50, lr: 2.16e-02 +2024-08-06 15:01:04,606 INFO [trainer.py:765] (2/8) Epoch 3, batch 2000, train_loss[loss=4.419, NarTop10Accuracy=0.4369, over 6279.00 frames. ], tot_loss[loss=4.163, NarTop10Accuracy=0.4908, over 5984.78 frames. ], batch size: 50, lr: 2.15e-02 +2024-08-06 15:01:29,898 INFO [trainer.py:765] (2/8) Epoch 3, batch 2100, train_loss[loss=3.764, NarTop10Accuracy=0.5761, over 4002.00 frames. ], tot_loss[loss=4.14, NarTop10Accuracy=0.4956, over 5966.17 frames. ], batch size: 4, lr: 2.14e-02 +2024-08-06 15:01:55,182 INFO [trainer.py:765] (2/8) Epoch 3, batch 2200, train_loss[loss=3.964, NarTop10Accuracy=0.5353, over 7188.00 frames. ], tot_loss[loss=4.115, NarTop10Accuracy=0.501, over 6002.13 frames. ], batch size: 31, lr: 2.13e-02 +2024-08-06 15:02:20,410 INFO [trainer.py:765] (2/8) Epoch 3, batch 2300, train_loss[loss=4.362, NarTop10Accuracy=0.4477, over 5820.00 frames. ], tot_loss[loss=4.131, NarTop10Accuracy=0.4974, over 6020.95 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 15:02:44,664 INFO [trainer.py:765] (2/8) Epoch 3, batch 2400, train_loss[loss=4.1, NarTop10Accuracy=0.4958, over 5139.00 frames. ], tot_loss[loss=4.097, NarTop10Accuracy=0.5047, over 5779.94 frames. ], batch size: 7, lr: 2.11e-02 +2024-08-06 15:03:08,234 INFO [trainer.py:765] (2/8) Epoch 3, batch 2500, train_loss[loss=3.793, NarTop10Accuracy=0.5601, over 5133.00 frames. ], tot_loss[loss=4.042, NarTop10Accuracy=0.5157, over 5482.18 frames. ], batch size: 7, lr: 2.10e-02 +2024-08-06 15:03:28,326 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 15:04:28,131 INFO [trainer.py:765] (2/8) Epoch 4, batch 100, train_loss[loss=3.906, NarTop10Accuracy=0.5521, over 7287.00 frames. ], tot_loss[loss=4.04, NarTop10Accuracy=0.5175, over 2347.95 frames. ], batch size: 31, lr: 1.97e-02 +2024-08-06 15:04:59,842 INFO [trainer.py:765] (2/8) Epoch 4, batch 200, train_loss[loss=3.791, NarTop10Accuracy=0.5727, over 6831.00 frames. ], tot_loss[loss=4.02, NarTop10Accuracy=0.5211, over 3844.73 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 15:05:27,508 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 15:05:35,694 INFO [trainer.py:811] (2/8) Epoch 4, validation: loss=3.804, NarTop10Accuracy=0.5644, over 1905321.00 frames. +2024-08-06 15:05:35,695 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27023MB +2024-08-06 15:05:36,238 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.765e+02 1.975e+02 2.270e+02 5.852e+02, threshold=3.949e+02, percent-clipped=2.8 +2024-08-06 15:05:43,890 INFO [trainer.py:765] (2/8) Epoch 4, batch 300, train_loss[loss=3.804, NarTop10Accuracy=0.5716, over 7113.00 frames. ], tot_loss[loss=3.999, NarTop10Accuracy=0.5246, over 4650.35 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 15:06:16,124 INFO [trainer.py:765] (2/8) Epoch 4, batch 400, train_loss[loss=3.83, NarTop10Accuracy=0.5553, over 5184.00 frames. ], tot_loss[loss=4.007, NarTop10Accuracy=0.5232, over 5110.59 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 15:06:46,473 INFO [trainer.py:765] (2/8) Epoch 4, batch 500, train_loss[loss=3.924, NarTop10Accuracy=0.5267, over 5964.00 frames. ], tot_loss[loss=3.984, NarTop10Accuracy=0.5279, over 5378.83 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 15:07:23,818 INFO [trainer.py:765] (2/8) Epoch 4, batch 600, train_loss[loss=3.773, NarTop10Accuracy=0.578, over 5769.00 frames. ], tot_loss[loss=3.974, NarTop10Accuracy=0.53, over 5641.89 frames. ], batch size: 9, lr: 1.93e-02 +2024-08-06 15:07:59,001 INFO [trainer.py:765] (2/8) Epoch 4, batch 700, train_loss[loss=4.092, NarTop10Accuracy=0.5078, over 5178.00 frames. ], tot_loss[loss=3.969, NarTop10Accuracy=0.5308, over 5715.03 frames. ], batch size: 6, lr: 1.92e-02 +2024-08-06 15:08:32,429 INFO [trainer.py:765] (2/8) Epoch 4, batch 800, train_loss[loss=3.838, NarTop10Accuracy=0.5613, over 5070.00 frames. ], tot_loss[loss=3.957, NarTop10Accuracy=0.5332, over 5789.14 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 15:09:10,689 INFO [trainer.py:765] (2/8) Epoch 4, batch 900, train_loss[loss=3.722, NarTop10Accuracy=0.5882, over 6165.00 frames. ], tot_loss[loss=3.923, NarTop10Accuracy=0.5399, over 5811.26 frames. ], batch size: 13, lr: 1.90e-02 +2024-08-06 15:09:46,075 INFO [trainer.py:765] (2/8) Epoch 4, batch 1000, train_loss[loss=3.619, NarTop10Accuracy=0.6008, over 6150.00 frames. ], tot_loss[loss=3.914, NarTop10Accuracy=0.5417, over 5905.05 frames. ], batch size: 13, lr: 1.89e-02 +2024-08-06 15:10:18,139 INFO [trainer.py:765] (2/8) Epoch 4, batch 1100, train_loss[loss=3.686, NarTop10Accuracy=0.5895, over 6672.00 frames. ], tot_loss[loss=3.907, NarTop10Accuracy=0.5432, over 5929.93 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 15:10:55,075 INFO [trainer.py:765] (2/8) Epoch 4, batch 1200, train_loss[loss=4.237, NarTop10Accuracy=0.4721, over 7326.00 frames. ], tot_loss[loss=3.9, NarTop10Accuracy=0.5445, over 5941.01 frames. ], batch size: 31, lr: 1.88e-02 +2024-08-06 15:11:32,074 INFO [trainer.py:765] (2/8) Epoch 4, batch 1300, train_loss[loss=3.754, NarTop10Accuracy=0.5817, over 4986.00 frames. ], tot_loss[loss=3.86, NarTop10Accuracy=0.5528, over 6001.22 frames. ], batch size: 6, lr: 1.87e-02 +2024-08-06 15:12:05,688 INFO [trainer.py:765] (2/8) Epoch 4, batch 1400, train_loss[loss=3.615, NarTop10Accuracy=0.6008, over 6090.00 frames. ], tot_loss[loss=3.852, NarTop10Accuracy=0.5543, over 6005.11 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 15:12:33,695 INFO [trainer.py:765] (2/8) Epoch 4, batch 1500, train_loss[loss=3.88, NarTop10Accuracy=0.5543, over 6534.00 frames. ], tot_loss[loss=3.854, NarTop10Accuracy=0.5538, over 5956.29 frames. ], batch size: 51, lr: 1.85e-02 +2024-08-06 15:13:01,510 INFO [trainer.py:765] (2/8) Epoch 4, batch 1600, train_loss[loss=3.772, NarTop10Accuracy=0.5715, over 6768.00 frames. ], tot_loss[loss=3.847, NarTop10Accuracy=0.5555, over 5918.16 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 15:13:28,133 INFO [trainer.py:765] (2/8) Epoch 4, batch 1700, train_loss[loss=3.904, NarTop10Accuracy=0.5408, over 6243.00 frames. ], tot_loss[loss=3.82, NarTop10Accuracy=0.5607, over 5919.66 frames. ], batch size: 13, lr: 1.84e-02 +2024-08-06 15:13:54,557 INFO [trainer.py:765] (2/8) Epoch 4, batch 1800, train_loss[loss=3.75, NarTop10Accuracy=0.5765, over 7095.00 frames. ], tot_loss[loss=3.822, NarTop10Accuracy=0.5608, over 5993.54 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 15:14:20,998 INFO [trainer.py:765] (2/8) Epoch 4, batch 1900, train_loss[loss=3.765, NarTop10Accuracy=0.5707, over 5946.00 frames. ], tot_loss[loss=3.84, NarTop10Accuracy=0.5573, over 6040.09 frames. ], batch size: 50, lr: 1.82e-02 +2024-08-06 15:14:46,672 INFO [trainer.py:765] (2/8) Epoch 4, batch 2000, train_loss[loss=3.784, NarTop10Accuracy=0.5663, over 5775.00 frames. ], tot_loss[loss=3.814, NarTop10Accuracy=0.5624, over 6017.69 frames. ], batch size: 50, lr: 1.81e-02 +2024-08-06 15:15:11,859 INFO [trainer.py:765] (2/8) Epoch 4, batch 2100, train_loss[loss=3.603, NarTop10Accuracy=0.6047, over 4920.00 frames. ], tot_loss[loss=3.805, NarTop10Accuracy=0.5642, over 6005.44 frames. ], batch size: 5, lr: 1.81e-02 +2024-08-06 15:15:37,089 INFO [trainer.py:765] (2/8) Epoch 4, batch 2200, train_loss[loss=3.721, NarTop10Accuracy=0.5906, over 7011.00 frames. ], tot_loss[loss=3.797, NarTop10Accuracy=0.5658, over 6034.74 frames. ], batch size: 31, lr: 1.80e-02 +2024-08-06 15:15:55,090 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 15:16:03,243 INFO [trainer.py:811] (2/8) Epoch 4, validation: loss=3.665, NarTop10Accuracy=0.5912, over 1905321.00 frames. +2024-08-06 15:16:03,243 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27390MB +2024-08-06 15:16:03,740 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.414e+02 1.889e+02 2.096e+02 2.369e+02 1.168e+03, threshold=4.192e+02, percent-clipped=1.7 +2024-08-06 15:16:10,347 INFO [trainer.py:765] (2/8) Epoch 4, batch 2300, train_loss[loss=3.603, NarTop10Accuracy=0.6202, over 5751.00 frames. ], tot_loss[loss=3.805, NarTop10Accuracy=0.5641, over 6025.74 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 15:16:34,840 INFO [trainer.py:765] (2/8) Epoch 4, batch 2400, train_loss[loss=3.347, NarTop10Accuracy=0.6635, over 5160.00 frames. ], tot_loss[loss=3.777, NarTop10Accuracy=0.5696, over 5787.17 frames. ], batch size: 7, lr: 1.79e-02 +2024-08-06 15:16:58,534 INFO [trainer.py:765] (2/8) Epoch 4, batch 2500, train_loss[loss=3.476, NarTop10Accuracy=0.6374, over 5163.00 frames. ], tot_loss[loss=3.762, NarTop10Accuracy=0.5722, over 5482.39 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 15:17:18,339 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 15:18:24,101 INFO [trainer.py:765] (2/8) Epoch 5, batch 100, train_loss[loss=3.507, NarTop10Accuracy=0.6254, over 7317.00 frames. ], tot_loss[loss=3.778, NarTop10Accuracy=0.5688, over 2376.60 frames. ], batch size: 32, lr: 1.66e-02 +2024-08-06 15:18:59,675 INFO [trainer.py:765] (2/8) Epoch 5, batch 200, train_loss[loss=4.102, NarTop10Accuracy=0.5063, over 6744.00 frames. ], tot_loss[loss=3.76, NarTop10Accuracy=0.573, over 3855.98 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 15:19:32,888 INFO [trainer.py:765] (2/8) Epoch 5, batch 300, train_loss[loss=4.03, NarTop10Accuracy=0.52, over 7206.00 frames. ], tot_loss[loss=3.734, NarTop10Accuracy=0.5787, over 4656.37 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 15:20:01,656 INFO [trainer.py:765] (2/8) Epoch 5, batch 400, train_loss[loss=3.585, NarTop10Accuracy=0.6116, over 5277.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.5805, over 5102.01 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 15:20:38,298 INFO [trainer.py:765] (2/8) Epoch 5, batch 500, train_loss[loss=3.923, NarTop10Accuracy=0.5344, over 6045.00 frames. ], tot_loss[loss=3.735, NarTop10Accuracy=0.5779, over 5390.27 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 15:21:13,711 INFO [trainer.py:765] (2/8) Epoch 5, batch 600, train_loss[loss=3.936, NarTop10Accuracy=0.5464, over 5673.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.5807, over 5658.39 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 15:21:45,881 INFO [trainer.py:765] (2/8) Epoch 5, batch 700, train_loss[loss=3.504, NarTop10Accuracy=0.6242, over 5208.00 frames. ], tot_loss[loss=3.717, NarTop10Accuracy=0.5818, over 5724.68 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 15:22:24,499 INFO [trainer.py:765] (2/8) Epoch 5, batch 800, train_loss[loss=3.954, NarTop10Accuracy=0.5239, over 5070.00 frames. ], tot_loss[loss=3.705, NarTop10Accuracy=0.584, over 5792.67 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 15:22:56,783 INFO [trainer.py:765] (2/8) Epoch 5, batch 900, train_loss[loss=3.649, NarTop10Accuracy=0.5919, over 6279.00 frames. ], tot_loss[loss=3.693, NarTop10Accuracy=0.5863, over 5813.85 frames. ], batch size: 13, lr: 1.61e-02 +2024-08-06 15:23:31,914 INFO [trainer.py:765] (2/8) Epoch 5, batch 1000, train_loss[loss=3.596, NarTop10Accuracy=0.6069, over 6567.00 frames. ], tot_loss[loss=3.681, NarTop10Accuracy=0.5885, over 5911.91 frames. ], batch size: 14, lr: 1.60e-02 +2024-08-06 15:24:09,572 INFO [trainer.py:765] (2/8) Epoch 5, batch 1100, train_loss[loss=3.45, NarTop10Accuracy=0.6364, over 6882.00 frames. ], tot_loss[loss=3.684, NarTop10Accuracy=0.5883, over 5940.84 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 15:24:44,529 INFO [trainer.py:765] (2/8) Epoch 5, batch 1200, train_loss[loss=3.522, NarTop10Accuracy=0.6223, over 7173.00 frames. ], tot_loss[loss=3.682, NarTop10Accuracy=0.5886, over 5931.78 frames. ], batch size: 31, lr: 1.59e-02 +2024-08-06 15:25:19,380 INFO [trainer.py:765] (2/8) Epoch 5, batch 1300, train_loss[loss=3.93, NarTop10Accuracy=0.5343, over 5106.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5922, over 6005.58 frames. ], batch size: 6, lr: 1.59e-02 +2024-08-06 15:25:51,694 INFO [trainer.py:765] (2/8) Epoch 5, batch 1400, train_loss[loss=3.84, NarTop10Accuracy=0.5533, over 6096.00 frames. ], tot_loss[loss=3.672, NarTop10Accuracy=0.5908, over 6018.64 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 15:26:26,196 INFO [trainer.py:765] (2/8) Epoch 5, batch 1500, train_loss[loss=3.637, NarTop10Accuracy=0.6032, over 5979.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5924, over 5950.07 frames. ], batch size: 50, lr: 1.58e-02 +2024-08-06 15:26:54,130 INFO [trainer.py:765] (2/8) Epoch 5, batch 1600, train_loss[loss=3.451, NarTop10Accuracy=0.6374, over 7008.00 frames. ], tot_loss[loss=3.677, NarTop10Accuracy=0.5896, over 5934.19 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 15:27:19,604 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 15:27:27,821 INFO [trainer.py:811] (2/8) Epoch 5, validation: loss=3.552, NarTop10Accuracy=0.6147, over 1905321.00 frames. +2024-08-06 15:27:27,822 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27390MB +2024-08-06 15:27:28,341 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.756e+02 1.962e+02 2.205e+02 5.880e+02, threshold=3.924e+02, percent-clipped=0.8 +2024-08-06 15:27:29,131 INFO [trainer.py:765] (2/8) Epoch 5, batch 1700, train_loss[loss=3.849, NarTop10Accuracy=0.5533, over 6747.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5923, over 5925.87 frames. ], batch size: 14, lr: 1.56e-02 +2024-08-06 15:27:55,652 INFO [trainer.py:765] (2/8) Epoch 5, batch 1800, train_loss[loss=3.884, NarTop10Accuracy=0.5403, over 7101.00 frames. ], tot_loss[loss=3.663, NarTop10Accuracy=0.5928, over 5983.42 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 15:28:22,171 INFO [trainer.py:765] (2/8) Epoch 5, batch 1900, train_loss[loss=3.747, NarTop10Accuracy=0.5814, over 5859.00 frames. ], tot_loss[loss=3.663, NarTop10Accuracy=0.5927, over 6030.47 frames. ], batch size: 51, lr: 1.55e-02 +2024-08-06 15:28:47,893 INFO [trainer.py:765] (2/8) Epoch 5, batch 2000, train_loss[loss=3.623, NarTop10Accuracy=0.603, over 6315.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.5924, over 5990.18 frames. ], batch size: 50, lr: 1.55e-02 +2024-08-06 15:29:13,769 INFO [trainer.py:765] (2/8) Epoch 5, batch 2100, train_loss[loss=3.383, NarTop10Accuracy=0.6379, over 3888.00 frames. ], tot_loss[loss=3.682, NarTop10Accuracy=0.5878, over 5970.52 frames. ], batch size: 4, lr: 1.54e-02 +2024-08-06 15:29:39,177 INFO [trainer.py:765] (2/8) Epoch 5, batch 2200, train_loss[loss=4.095, NarTop10Accuracy=0.5082, over 7203.00 frames. ], tot_loss[loss=3.66, NarTop10Accuracy=0.5921, over 6011.53 frames. ], batch size: 32, lr: 1.54e-02 +2024-08-06 15:30:04,429 INFO [trainer.py:765] (2/8) Epoch 5, batch 2300, train_loss[loss=3.479, NarTop10Accuracy=0.6296, over 5667.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5901, over 6026.91 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 15:30:28,862 INFO [trainer.py:765] (2/8) Epoch 5, batch 2400, train_loss[loss=3.327, NarTop10Accuracy=0.6434, over 5052.00 frames. ], tot_loss[loss=3.644, NarTop10Accuracy=0.5959, over 5785.53 frames. ], batch size: 7, lr: 1.53e-02 +2024-08-06 15:30:52,502 INFO [trainer.py:765] (2/8) Epoch 5, batch 2500, train_loss[loss=3.324, NarTop10Accuracy=0.6611, over 5190.00 frames. ], tot_loss[loss=3.615, NarTop10Accuracy=0.602, over 5491.34 frames. ], batch size: 7, lr: 1.52e-02 +2024-08-06 15:31:12,342 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 15:32:14,415 INFO [trainer.py:765] (2/8) Epoch 6, batch 100, train_loss[loss=3.551, NarTop10Accuracy=0.6109, over 7386.00 frames. ], tot_loss[loss=3.646, NarTop10Accuracy=0.596, over 2383.31 frames. ], batch size: 31, lr: 1.42e-02 +2024-08-06 15:32:46,015 INFO [trainer.py:765] (2/8) Epoch 6, batch 200, train_loss[loss=3.879, NarTop10Accuracy=0.5421, over 6636.00 frames. ], tot_loss[loss=3.619, NarTop10Accuracy=0.6013, over 3878.68 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 15:33:21,242 INFO [trainer.py:765] (2/8) Epoch 6, batch 300, train_loss[loss=3.438, NarTop10Accuracy=0.6432, over 7230.00 frames. ], tot_loss[loss=3.615, NarTop10Accuracy=0.602, over 4671.74 frames. ], batch size: 23, lr: 1.41e-02 +2024-08-06 15:33:56,035 INFO [trainer.py:765] (2/8) Epoch 6, batch 400, train_loss[loss=3.423, NarTop10Accuracy=0.6355, over 5103.00 frames. ], tot_loss[loss=3.601, NarTop10Accuracy=0.6052, over 5112.41 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 15:34:26,759 INFO [trainer.py:765] (2/8) Epoch 6, batch 500, train_loss[loss=3.321, NarTop10Accuracy=0.6619, over 6051.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6093, over 5388.35 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 15:35:01,458 INFO [trainer.py:765] (2/8) Epoch 6, batch 600, train_loss[loss=3.279, NarTop10Accuracy=0.6755, over 5790.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6105, over 5647.15 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 15:35:32,734 INFO [trainer.py:765] (2/8) Epoch 6, batch 700, train_loss[loss=3.562, NarTop10Accuracy=0.6091, over 4254.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.6095, over 5716.97 frames. ], batch size: 5, lr: 1.39e-02 +2024-08-06 15:36:06,844 INFO [trainer.py:765] (2/8) Epoch 6, batch 800, train_loss[loss=3.745, NarTop10Accuracy=0.5615, over 5001.00 frames. ], tot_loss[loss=3.587, NarTop10Accuracy=0.6076, over 5768.63 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 15:36:40,384 INFO [trainer.py:765] (2/8) Epoch 6, batch 900, train_loss[loss=3.984, NarTop10Accuracy=0.5259, over 6231.00 frames. ], tot_loss[loss=3.572, NarTop10Accuracy=0.6105, over 5790.46 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 15:37:15,272 INFO [trainer.py:765] (2/8) Epoch 6, batch 1000, train_loss[loss=3.545, NarTop10Accuracy=0.6167, over 6549.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.6059, over 5901.73 frames. ], batch size: 14, lr: 1.38e-02 +2024-08-06 15:37:50,508 INFO [trainer.py:765] (2/8) Epoch 6, batch 1100, train_loss[loss=3.332, NarTop10Accuracy=0.6621, over 6951.00 frames. ], tot_loss[loss=3.589, NarTop10Accuracy=0.6067, over 5946.70 frames. ], batch size: 17, lr: 1.38e-02 +2024-08-06 15:37:55,828 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 15:38:04,436 INFO [trainer.py:811] (2/8) Epoch 6, validation: loss=3.421, NarTop10Accuracy=0.6418, over 1905321.00 frames. +2024-08-06 15:38:04,437 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27390MB +2024-08-06 15:38:04,966 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.809e+02 1.991e+02 2.234e+02 5.215e+02, threshold=3.983e+02, percent-clipped=0.5 +2024-08-06 15:38:36,168 INFO [trainer.py:765] (2/8) Epoch 6, batch 1200, train_loss[loss=3.364, NarTop10Accuracy=0.6521, over 7218.00 frames. ], tot_loss[loss=3.577, NarTop10Accuracy=0.6096, over 5957.40 frames. ], batch size: 31, lr: 1.37e-02 +2024-08-06 15:39:08,242 INFO [trainer.py:765] (2/8) Epoch 6, batch 1300, train_loss[loss=3.367, NarTop10Accuracy=0.6573, over 5085.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6106, over 6013.27 frames. ], batch size: 6, lr: 1.37e-02 +2024-08-06 15:39:44,070 INFO [trainer.py:765] (2/8) Epoch 6, batch 1400, train_loss[loss=3.354, NarTop10Accuracy=0.662, over 6126.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6112, over 6041.26 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 15:40:15,383 INFO [trainer.py:765] (2/8) Epoch 6, batch 1500, train_loss[loss=3.972, NarTop10Accuracy=0.5319, over 5790.00 frames. ], tot_loss[loss=3.574, NarTop10Accuracy=0.6105, over 5976.62 frames. ], batch size: 52, lr: 1.36e-02 +2024-08-06 15:40:43,106 INFO [trainer.py:765] (2/8) Epoch 6, batch 1600, train_loss[loss=3.291, NarTop10Accuracy=0.6706, over 6993.00 frames. ], tot_loss[loss=3.563, NarTop10Accuracy=0.6124, over 5925.58 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 15:41:09,789 INFO [trainer.py:765] (2/8) Epoch 6, batch 1700, train_loss[loss=3.467, NarTop10Accuracy=0.6368, over 6321.00 frames. ], tot_loss[loss=3.555, NarTop10Accuracy=0.6142, over 5909.49 frames. ], batch size: 13, lr: 1.35e-02 +2024-08-06 15:41:36,317 INFO [trainer.py:765] (2/8) Epoch 6, batch 1800, train_loss[loss=3.24, NarTop10Accuracy=0.6849, over 6975.00 frames. ], tot_loss[loss=3.565, NarTop10Accuracy=0.6125, over 5995.00 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 15:42:02,720 INFO [trainer.py:765] (2/8) Epoch 6, batch 1900, train_loss[loss=3.791, NarTop10Accuracy=0.5599, over 5976.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.6092, over 6017.54 frames. ], batch size: 51, lr: 1.34e-02 +2024-08-06 15:42:28,319 INFO [trainer.py:765] (2/8) Epoch 6, batch 2000, train_loss[loss=3.591, NarTop10Accuracy=0.6123, over 6147.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6086, over 5986.07 frames. ], batch size: 51, lr: 1.34e-02 +2024-08-06 15:42:53,669 INFO [trainer.py:765] (2/8) Epoch 6, batch 2100, train_loss[loss=3.335, NarTop10Accuracy=0.6677, over 4935.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.61, over 5988.71 frames. ], batch size: 5, lr: 1.33e-02 +2024-08-06 15:43:18,978 INFO [trainer.py:765] (2/8) Epoch 6, batch 2200, train_loss[loss=3.826, NarTop10Accuracy=0.5588, over 7410.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6107, over 6021.80 frames. ], batch size: 31, lr: 1.33e-02 +2024-08-06 15:43:44,105 INFO [trainer.py:765] (2/8) Epoch 6, batch 2300, train_loss[loss=3.416, NarTop10Accuracy=0.6425, over 5784.00 frames. ], tot_loss[loss=3.569, NarTop10Accuracy=0.6112, over 6016.38 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 15:44:08,620 INFO [trainer.py:765] (2/8) Epoch 6, batch 2400, train_loss[loss=3.412, NarTop10Accuracy=0.6444, over 5118.00 frames. ], tot_loss[loss=3.545, NarTop10Accuracy=0.6163, over 5782.93 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:32,132 INFO [trainer.py:765] (2/8) Epoch 6, batch 2500, train_loss[loss=3.5, NarTop10Accuracy=0.616, over 5727.00 frames. ], tot_loss[loss=3.52, NarTop10Accuracy=0.621, over 5485.14 frames. ], batch size: 8, lr: 1.32e-02 +2024-08-06 15:44:51,803 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 15:45:58,043 INFO [trainer.py:765] (2/8) Epoch 7, batch 100, train_loss[loss=3.201, NarTop10Accuracy=0.6889, over 7209.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.6164, over 2377.07 frames. ], batch size: 31, lr: 1.24e-02 +2024-08-06 15:46:33,614 INFO [trainer.py:765] (2/8) Epoch 7, batch 200, train_loss[loss=3.371, NarTop10Accuracy=0.6507, over 6720.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.6188, over 3856.40 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 15:47:03,247 INFO [trainer.py:765] (2/8) Epoch 7, batch 300, train_loss[loss=3.685, NarTop10Accuracy=0.5859, over 7095.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.6166, over 4663.41 frames. ], batch size: 22, lr: 1.23e-02 +2024-08-06 15:47:34,495 INFO [trainer.py:765] (2/8) Epoch 7, batch 400, train_loss[loss=3.595, NarTop10Accuracy=0.6045, over 5073.00 frames. ], tot_loss[loss=3.531, NarTop10Accuracy=0.6192, over 5111.09 frames. ], batch size: 7, lr: 1.23e-02 +2024-08-06 15:48:13,730 INFO [trainer.py:765] (2/8) Epoch 7, batch 500, train_loss[loss=3.574, NarTop10Accuracy=0.6112, over 6189.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.621, over 5389.93 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 15:48:26,369 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 15:48:34,533 INFO [trainer.py:811] (2/8) Epoch 7, validation: loss=3.326, NarTop10Accuracy=0.6612, over 1905321.00 frames. +2024-08-06 15:48:34,534 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29439MB +2024-08-06 15:48:35,078 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.860e+02 2.018e+02 2.241e+02 5.111e+02, threshold=4.035e+02, percent-clipped=0.3 +2024-08-06 15:48:52,719 INFO [trainer.py:765] (2/8) Epoch 7, batch 600, train_loss[loss=3.291, NarTop10Accuracy=0.6684, over 5583.00 frames. ], tot_loss[loss=3.525, NarTop10Accuracy=0.6202, over 5649.61 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 15:49:24,911 INFO [trainer.py:765] (2/8) Epoch 7, batch 700, train_loss[loss=3.847, NarTop10Accuracy=0.5536, over 5091.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.6214, over 5729.10 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 15:50:04,380 INFO [trainer.py:765] (2/8) Epoch 7, batch 800, train_loss[loss=3.133, NarTop10Accuracy=0.6999, over 4341.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.6247, over 5774.88 frames. ], batch size: 5, lr: 1.21e-02 +2024-08-06 15:50:34,547 INFO [trainer.py:765] (2/8) Epoch 7, batch 900, train_loss[loss=3.368, NarTop10Accuracy=0.6589, over 6291.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6267, over 5799.57 frames. ], batch size: 13, lr: 1.21e-02 +2024-08-06 15:51:07,154 INFO [trainer.py:765] (2/8) Epoch 7, batch 1000, train_loss[loss=3.289, NarTop10Accuracy=0.6801, over 6672.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6279, over 5910.37 frames. ], batch size: 14, lr: 1.20e-02 +2024-08-06 15:51:51,757 INFO [trainer.py:765] (2/8) Epoch 7, batch 1100, train_loss[loss=3.368, NarTop10Accuracy=0.6537, over 6801.00 frames. ], tot_loss[loss=3.496, NarTop10Accuracy=0.6262, over 5941.90 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 15:52:22,699 INFO [trainer.py:765] (2/8) Epoch 7, batch 1200, train_loss[loss=3.452, NarTop10Accuracy=0.6371, over 7476.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6279, over 5936.90 frames. ], batch size: 31, lr: 1.20e-02 +2024-08-06 15:52:52,006 INFO [trainer.py:765] (2/8) Epoch 7, batch 1300, train_loss[loss=3.473, NarTop10Accuracy=0.6316, over 5223.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6277, over 5996.03 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 15:53:33,841 INFO [trainer.py:765] (2/8) Epoch 7, batch 1400, train_loss[loss=3.398, NarTop10Accuracy=0.6558, over 5988.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6256, over 6025.01 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 15:54:04,599 INFO [trainer.py:765] (2/8) Epoch 7, batch 1500, train_loss[loss=3.743, NarTop10Accuracy=0.5684, over 6297.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6303, over 5963.36 frames. ], batch size: 50, lr: 1.19e-02 +2024-08-06 15:54:32,384 INFO [trainer.py:765] (2/8) Epoch 7, batch 1600, train_loss[loss=3.704, NarTop10Accuracy=0.5812, over 7071.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6312, over 5937.90 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 15:54:59,054 INFO [trainer.py:765] (2/8) Epoch 7, batch 1700, train_loss[loss=3.787, NarTop10Accuracy=0.5661, over 6210.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6271, over 5914.91 frames. ], batch size: 13, lr: 1.18e-02 +2024-08-06 15:55:25,511 INFO [trainer.py:765] (2/8) Epoch 7, batch 1800, train_loss[loss=3.855, NarTop10Accuracy=0.55, over 6987.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6277, over 5961.14 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 15:55:52,081 INFO [trainer.py:765] (2/8) Epoch 7, batch 1900, train_loss[loss=3.394, NarTop10Accuracy=0.6572, over 5796.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.624, over 6009.36 frames. ], batch size: 52, lr: 1.18e-02 +2024-08-06 15:56:17,590 INFO [trainer.py:765] (2/8) Epoch 7, batch 2000, train_loss[loss=3.698, NarTop10Accuracy=0.5891, over 5979.00 frames. ], tot_loss[loss=3.505, NarTop10Accuracy=0.6241, over 5982.85 frames. ], batch size: 51, lr: 1.17e-02 +2024-08-06 15:56:42,855 INFO [trainer.py:765] (2/8) Epoch 7, batch 2100, train_loss[loss=3.648, NarTop10Accuracy=0.5912, over 3870.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6276, over 5962.24 frames. ], batch size: 4, lr: 1.17e-02 +2024-08-06 15:57:08,078 INFO [trainer.py:765] (2/8) Epoch 7, batch 2200, train_loss[loss=3.461, NarTop10Accuracy=0.632, over 7194.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6244, over 5993.34 frames. ], batch size: 31, lr: 1.17e-02 +2024-08-06 15:57:33,177 INFO [trainer.py:765] (2/8) Epoch 7, batch 2300, train_loss[loss=3.245, NarTop10Accuracy=0.6782, over 5703.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6245, over 6010.10 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 15:57:57,618 INFO [trainer.py:765] (2/8) Epoch 7, batch 2400, train_loss[loss=3.212, NarTop10Accuracy=0.6778, over 5178.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.627, over 5791.00 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 15:58:21,087 INFO [trainer.py:765] (2/8) Epoch 7, batch 2500, train_loss[loss=3.8, NarTop10Accuracy=0.5652, over 5271.00 frames. ], tot_loss[loss=3.466, NarTop10Accuracy=0.6318, over 5486.17 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 15:58:31,564 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 15:58:39,769 INFO [trainer.py:811] (2/8) Epoch 7, validation: loss=3.381, NarTop10Accuracy=0.6488, over 1905321.00 frames. +2024-08-06 15:58:39,770 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29439MB +2024-08-06 15:58:40,220 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.831e+02 1.996e+02 2.207e+02 5.229e+02, threshold=3.992e+02, percent-clipped=0.2 +2024-08-06 15:58:49,130 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 15:59:52,876 INFO [trainer.py:765] (2/8) Epoch 8, batch 100, train_loss[loss=3.65, NarTop10Accuracy=0.5927, over 7314.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6361, over 2357.64 frames. ], batch size: 31, lr: 1.09e-02 +2024-08-06 16:00:27,881 INFO [trainer.py:765] (2/8) Epoch 8, batch 200, train_loss[loss=3.318, NarTop10Accuracy=0.67, over 6930.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.6318, over 3842.62 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 16:00:58,562 INFO [trainer.py:765] (2/8) Epoch 8, batch 300, train_loss[loss=3.236, NarTop10Accuracy=0.6792, over 7137.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6334, over 4636.31 frames. ], batch size: 22, lr: 1.08e-02 +2024-08-06 16:01:29,759 INFO [trainer.py:765] (2/8) Epoch 8, batch 400, train_loss[loss=3.733, NarTop10Accuracy=0.5707, over 5076.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.6319, over 5096.96 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 16:02:04,065 INFO [trainer.py:765] (2/8) Epoch 8, batch 500, train_loss[loss=3.877, NarTop10Accuracy=0.5398, over 6189.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6343, over 5397.83 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 16:02:41,836 INFO [trainer.py:765] (2/8) Epoch 8, batch 600, train_loss[loss=3.282, NarTop10Accuracy=0.6803, over 5697.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6303, over 5662.16 frames. ], batch size: 9, lr: 1.08e-02 +2024-08-06 16:03:11,499 INFO [trainer.py:765] (2/8) Epoch 8, batch 700, train_loss[loss=3.653, NarTop10Accuracy=0.5788, over 5208.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6289, over 5749.77 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 16:03:50,084 INFO [trainer.py:765] (2/8) Epoch 8, batch 800, train_loss[loss=3.522, NarTop10Accuracy=0.6176, over 4368.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6308, over 5786.35 frames. ], batch size: 5, lr: 1.07e-02 +2024-08-06 16:04:27,587 INFO [trainer.py:765] (2/8) Epoch 8, batch 900, train_loss[loss=3.259, NarTop10Accuracy=0.6728, over 6252.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6337, over 5794.24 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 16:04:57,466 INFO [trainer.py:765] (2/8) Epoch 8, batch 1000, train_loss[loss=3.846, NarTop10Accuracy=0.5503, over 6768.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6364, over 5896.25 frames. ], batch size: 14, lr: 1.07e-02 +2024-08-06 16:05:37,293 INFO [trainer.py:765] (2/8) Epoch 8, batch 1100, train_loss[loss=3.587, NarTop10Accuracy=0.6102, over 6732.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6387, over 5933.18 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 16:06:15,859 INFO [trainer.py:765] (2/8) Epoch 8, batch 1200, train_loss[loss=3.393, NarTop10Accuracy=0.6447, over 7011.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6361, over 5919.75 frames. ], batch size: 31, lr: 1.06e-02 +2024-08-06 16:06:45,187 INFO [trainer.py:765] (2/8) Epoch 8, batch 1300, train_loss[loss=3.268, NarTop10Accuracy=0.6839, over 5061.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6384, over 5984.27 frames. ], batch size: 6, lr: 1.06e-02 +2024-08-06 16:07:24,235 INFO [trainer.py:765] (2/8) Epoch 8, batch 1400, train_loss[loss=3.407, NarTop10Accuracy=0.6464, over 6024.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6372, over 6008.49 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 16:07:52,168 INFO [trainer.py:765] (2/8) Epoch 8, batch 1500, train_loss[loss=3.41, NarTop10Accuracy=0.6464, over 6423.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.64, over 5941.71 frames. ], batch size: 50, lr: 1.05e-02 +2024-08-06 16:08:19,948 INFO [trainer.py:765] (2/8) Epoch 8, batch 1600, train_loss[loss=3.213, NarTop10Accuracy=0.6815, over 7410.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6404, over 5928.69 frames. ], batch size: 23, lr: 1.05e-02 +2024-08-06 16:08:46,617 INFO [trainer.py:765] (2/8) Epoch 8, batch 1700, train_loss[loss=3.392, NarTop10Accuracy=0.6457, over 6189.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6394, over 5917.59 frames. ], batch size: 13, lr: 1.05e-02 +2024-08-06 16:09:13,105 INFO [trainer.py:765] (2/8) Epoch 8, batch 1800, train_loss[loss=3.384, NarTop10Accuracy=0.6599, over 7194.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6412, over 5977.79 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 16:09:39,635 INFO [trainer.py:765] (2/8) Epoch 8, batch 1900, train_loss[loss=3.787, NarTop10Accuracy=0.563, over 6129.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6423, over 6035.17 frames. ], batch size: 54, lr: 1.04e-02 +2024-08-06 16:09:56,939 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 16:10:04,970 INFO [trainer.py:811] (2/8) Epoch 8, validation: loss=3.282, NarTop10Accuracy=0.6699, over 1905321.00 frames. +2024-08-06 16:10:04,970 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29439MB +2024-08-06 16:10:05,469 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.814e+02 1.981e+02 2.158e+02 5.862e+02, threshold=3.962e+02, percent-clipped=0.1 +2024-08-06 16:10:13,202 INFO [trainer.py:765] (2/8) Epoch 8, batch 2000, train_loss[loss=4, NarTop10Accuracy=0.5311, over 6636.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6414, over 6001.51 frames. ], batch size: 51, lr: 1.04e-02 +2024-08-06 16:10:38,513 INFO [trainer.py:765] (2/8) Epoch 8, batch 2100, train_loss[loss=3.04, NarTop10Accuracy=0.701, over 3867.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6431, over 5985.20 frames. ], batch size: 4, lr: 1.04e-02 +2024-08-06 16:11:03,746 INFO [trainer.py:765] (2/8) Epoch 8, batch 2200, train_loss[loss=3.519, NarTop10Accuracy=0.6214, over 7326.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.641, over 6019.75 frames. ], batch size: 31, lr: 1.04e-02 +2024-08-06 16:11:28,903 INFO [trainer.py:765] (2/8) Epoch 8, batch 2300, train_loss[loss=3.761, NarTop10Accuracy=0.565, over 5745.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.637, over 6012.04 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 16:11:53,091 INFO [trainer.py:765] (2/8) Epoch 8, batch 2400, train_loss[loss=3.303, NarTop10Accuracy=0.6556, over 5181.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6408, over 5771.45 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 16:12:16,443 INFO [trainer.py:765] (2/8) Epoch 8, batch 2500, train_loss[loss=3.398, NarTop10Accuracy=0.6403, over 5295.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6427, over 5463.72 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 16:12:36,206 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 16:13:37,514 INFO [trainer.py:765] (2/8) Epoch 9, batch 100, train_loss[loss=3.098, NarTop10Accuracy=0.7039, over 7470.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6514, over 2364.57 frames. ], batch size: 32, lr: 9.72e-03 +2024-08-06 16:14:14,440 INFO [trainer.py:765] (2/8) Epoch 9, batch 200, train_loss[loss=3.614, NarTop10Accuracy=0.5992, over 6843.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6522, over 3846.38 frames. ], batch size: 17, lr: 9.70e-03 +2024-08-06 16:14:44,507 INFO [trainer.py:765] (2/8) Epoch 9, batch 300, train_loss[loss=3.404, NarTop10Accuracy=0.6464, over 6966.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6509, over 4649.89 frames. ], batch size: 22, lr: 9.68e-03 +2024-08-06 16:15:14,914 INFO [trainer.py:765] (2/8) Epoch 9, batch 400, train_loss[loss=3.147, NarTop10Accuracy=0.6944, over 5256.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6524, over 5096.10 frames. ], batch size: 7, lr: 9.65e-03 +2024-08-06 16:15:50,335 INFO [trainer.py:765] (2/8) Epoch 9, batch 500, train_loss[loss=2.965, NarTop10Accuracy=0.7181, over 6213.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.655, over 5388.85 frames. ], batch size: 11, lr: 9.63e-03 +2024-08-06 16:16:23,972 INFO [trainer.py:765] (2/8) Epoch 9, batch 600, train_loss[loss=3.688, NarTop10Accuracy=0.5887, over 5703.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6567, over 5652.41 frames. ], batch size: 9, lr: 9.61e-03 +2024-08-06 16:16:57,145 INFO [trainer.py:765] (2/8) Epoch 9, batch 700, train_loss[loss=3.254, NarTop10Accuracy=0.6749, over 5097.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6548, over 5715.28 frames. ], batch size: 6, lr: 9.59e-03 +2024-08-06 16:17:32,051 INFO [trainer.py:765] (2/8) Epoch 9, batch 800, train_loss[loss=2.937, NarTop10Accuracy=0.7324, over 4335.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.649, over 5761.00 frames. ], batch size: 5, lr: 9.57e-03 +2024-08-06 16:18:07,815 INFO [trainer.py:765] (2/8) Epoch 9, batch 900, train_loss[loss=3.21, NarTop10Accuracy=0.6927, over 6300.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6512, over 5778.08 frames. ], batch size: 13, lr: 9.55e-03 +2024-08-06 16:18:39,344 INFO [trainer.py:765] (2/8) Epoch 9, batch 1000, train_loss[loss=3.237, NarTop10Accuracy=0.6778, over 6663.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6476, over 5886.77 frames. ], batch size: 14, lr: 9.53e-03 +2024-08-06 16:19:15,381 INFO [trainer.py:765] (2/8) Epoch 9, batch 1100, train_loss[loss=3.446, NarTop10Accuracy=0.6364, over 6930.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6459, over 5927.57 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 16:19:53,877 INFO [trainer.py:765] (2/8) Epoch 9, batch 1200, train_loss[loss=3.873, NarTop10Accuracy=0.5367, over 7119.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6453, over 5930.61 frames. ], batch size: 31, lr: 9.48e-03 +2024-08-06 16:20:24,906 INFO [trainer.py:765] (2/8) Epoch 9, batch 1300, train_loss[loss=3.075, NarTop10Accuracy=0.7022, over 4956.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6457, over 5992.94 frames. ], batch size: 6, lr: 9.46e-03 +2024-08-06 16:20:56,579 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 16:21:04,483 INFO [trainer.py:811] (2/8) Epoch 9, validation: loss=3.266, NarTop10Accuracy=0.6725, over 1905321.00 frames. +2024-08-06 16:21:04,484 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29439MB +2024-08-06 16:21:05,035 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 1.808e+02 1.967e+02 2.142e+02 6.126e+02, threshold=3.935e+02, percent-clipped=0.5 +2024-08-06 16:21:06,690 INFO [trainer.py:765] (2/8) Epoch 9, batch 1400, train_loss[loss=3.605, NarTop10Accuracy=0.5912, over 6102.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6453, over 6017.86 frames. ], batch size: 11, lr: 9.44e-03 +2024-08-06 16:21:38,895 INFO [trainer.py:765] (2/8) Epoch 9, batch 1500, train_loss[loss=3.331, NarTop10Accuracy=0.6623, over 5991.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6493, over 5947.74 frames. ], batch size: 50, lr: 9.42e-03 +2024-08-06 16:22:06,720 INFO [trainer.py:765] (2/8) Epoch 9, batch 1600, train_loss[loss=3.272, NarTop10Accuracy=0.667, over 7158.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6514, over 5947.62 frames. ], batch size: 22, lr: 9.40e-03 +2024-08-06 16:22:33,470 INFO [trainer.py:765] (2/8) Epoch 9, batch 1700, train_loss[loss=3.573, NarTop10Accuracy=0.6074, over 6597.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6475, over 5928.31 frames. ], batch size: 14, lr: 9.38e-03 +2024-08-06 16:23:00,063 INFO [trainer.py:765] (2/8) Epoch 9, batch 1800, train_loss[loss=3.124, NarTop10Accuracy=0.7072, over 7044.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6498, over 5995.89 frames. ], batch size: 22, lr: 9.36e-03 +2024-08-06 16:23:26,783 INFO [trainer.py:765] (2/8) Epoch 9, batch 1900, train_loss[loss=3.476, NarTop10Accuracy=0.6356, over 5694.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6478, over 6030.93 frames. ], batch size: 51, lr: 9.34e-03 +2024-08-06 16:23:52,485 INFO [trainer.py:765] (2/8) Epoch 9, batch 2000, train_loss[loss=3.901, NarTop10Accuracy=0.5528, over 6429.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6479, over 6003.25 frames. ], batch size: 50, lr: 9.32e-03 +2024-08-06 16:24:17,965 INFO [trainer.py:765] (2/8) Epoch 9, batch 2100, train_loss[loss=3.114, NarTop10Accuracy=0.7111, over 4794.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6474, over 5984.90 frames. ], batch size: 5, lr: 9.30e-03 +2024-08-06 16:24:43,421 INFO [trainer.py:765] (2/8) Epoch 9, batch 2200, train_loss[loss=3.563, NarTop10Accuracy=0.6062, over 7080.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6457, over 6017.63 frames. ], batch size: 31, lr: 9.28e-03 +2024-08-06 16:25:08,720 INFO [trainer.py:765] (2/8) Epoch 9, batch 2300, train_loss[loss=3.284, NarTop10Accuracy=0.6664, over 5733.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6438, over 6031.20 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 16:25:33,163 INFO [trainer.py:765] (2/8) Epoch 9, batch 2400, train_loss[loss=3.178, NarTop10Accuracy=0.6843, over 5025.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6457, over 5789.17 frames. ], batch size: 7, lr: 9.25e-03 +2024-08-06 16:25:56,767 INFO [trainer.py:765] (2/8) Epoch 9, batch 2500, train_loss[loss=3.166, NarTop10Accuracy=0.6887, over 5109.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6517, over 5483.12 frames. ], batch size: 7, lr: 9.23e-03 +2024-08-06 16:26:16,445 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 16:27:19,583 INFO [trainer.py:765] (2/8) Epoch 10, batch 100, train_loss[loss=3.171, NarTop10Accuracy=0.6944, over 7434.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6531, over 2376.66 frames. ], batch size: 31, lr: 8.76e-03 +2024-08-06 16:27:52,627 INFO [trainer.py:765] (2/8) Epoch 10, batch 200, train_loss[loss=3.176, NarTop10Accuracy=0.6932, over 6945.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6563, over 3863.17 frames. ], batch size: 17, lr: 8.74e-03 +2024-08-06 16:28:23,056 INFO [trainer.py:765] (2/8) Epoch 10, batch 300, train_loss[loss=3.096, NarTop10Accuracy=0.7083, over 7011.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6565, over 4655.11 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 16:28:59,199 INFO [trainer.py:765] (2/8) Epoch 10, batch 400, train_loss[loss=3.303, NarTop10Accuracy=0.6667, over 5241.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6581, over 5093.26 frames. ], batch size: 7, lr: 8.71e-03 +2024-08-06 16:29:29,217 INFO [trainer.py:765] (2/8) Epoch 10, batch 500, train_loss[loss=3.024, NarTop10Accuracy=0.7325, over 6192.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6587, over 5384.92 frames. ], batch size: 11, lr: 8.69e-03 +2024-08-06 16:30:02,764 INFO [trainer.py:765] (2/8) Epoch 10, batch 600, train_loss[loss=3.473, NarTop10Accuracy=0.6288, over 5679.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.657, over 5656.91 frames. ], batch size: 9, lr: 8.67e-03 +2024-08-06 16:30:34,264 INFO [trainer.py:765] (2/8) Epoch 10, batch 700, train_loss[loss=3.377, NarTop10Accuracy=0.6489, over 5127.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6557, over 5718.01 frames. ], batch size: 6, lr: 8.65e-03 +2024-08-06 16:31:09,842 INFO [trainer.py:765] (2/8) Epoch 10, batch 800, train_loss[loss=3.482, NarTop10Accuracy=0.6228, over 4317.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6538, over 5791.86 frames. ], batch size: 5, lr: 8.64e-03 +2024-08-06 16:31:16,256 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 16:31:24,565 INFO [trainer.py:811] (2/8) Epoch 10, validation: loss=3.184, NarTop10Accuracy=0.6898, over 1905321.00 frames. +2024-08-06 16:31:24,566 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29440MB +2024-08-06 16:31:25,154 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.851e+02 2.012e+02 2.196e+02 4.599e+02, threshold=4.024e+02, percent-clipped=0.1 +2024-08-06 16:31:50,345 INFO [trainer.py:765] (2/8) Epoch 10, batch 900, train_loss[loss=3.262, NarTop10Accuracy=0.6703, over 6225.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.659, over 5797.12 frames. ], batch size: 13, lr: 8.62e-03 +2024-08-06 16:32:28,589 INFO [trainer.py:765] (2/8) Epoch 10, batch 1000, train_loss[loss=3.139, NarTop10Accuracy=0.7036, over 6198.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6571, over 5882.64 frames. ], batch size: 13, lr: 8.60e-03 +2024-08-06 16:33:06,376 INFO [trainer.py:765] (2/8) Epoch 10, batch 1100, train_loss[loss=3.122, NarTop10Accuracy=0.7081, over 6675.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.656, over 5931.93 frames. ], batch size: 17, lr: 8.59e-03 +2024-08-06 16:33:40,960 INFO [trainer.py:765] (2/8) Epoch 10, batch 1200, train_loss[loss=3.218, NarTop10Accuracy=0.6755, over 7344.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6582, over 5937.65 frames. ], batch size: 31, lr: 8.57e-03 +2024-08-06 16:34:16,170 INFO [trainer.py:765] (2/8) Epoch 10, batch 1300, train_loss[loss=3.144, NarTop10Accuracy=0.6946, over 5193.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6588, over 6000.12 frames. ], batch size: 6, lr: 8.55e-03 +2024-08-06 16:34:51,201 INFO [trainer.py:765] (2/8) Epoch 10, batch 1400, train_loss[loss=3.408, NarTop10Accuracy=0.6516, over 6084.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6528, over 6006.16 frames. ], batch size: 11, lr: 8.54e-03 +2024-08-06 16:35:22,159 INFO [trainer.py:765] (2/8) Epoch 10, batch 1500, train_loss[loss=3.641, NarTop10Accuracy=0.5921, over 5775.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6561, over 5956.58 frames. ], batch size: 50, lr: 8.52e-03 +2024-08-06 16:35:50,136 INFO [trainer.py:765] (2/8) Epoch 10, batch 1600, train_loss[loss=3.681, NarTop10Accuracy=0.5796, over 7044.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6588, over 5920.95 frames. ], batch size: 22, lr: 8.50e-03 +2024-08-06 16:36:16,976 INFO [trainer.py:765] (2/8) Epoch 10, batch 1700, train_loss[loss=3.409, NarTop10Accuracy=0.6385, over 6246.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.657, over 5915.52 frames. ], batch size: 13, lr: 8.49e-03 +2024-08-06 16:36:43,647 INFO [trainer.py:765] (2/8) Epoch 10, batch 1800, train_loss[loss=3.193, NarTop10Accuracy=0.6854, over 7140.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6591, over 5976.02 frames. ], batch size: 22, lr: 8.47e-03 +2024-08-06 16:37:10,290 INFO [trainer.py:765] (2/8) Epoch 10, batch 1900, train_loss[loss=3.274, NarTop10Accuracy=0.6773, over 5433.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6599, over 6021.99 frames. ], batch size: 50, lr: 8.45e-03 +2024-08-06 16:37:36,089 INFO [trainer.py:765] (2/8) Epoch 10, batch 2000, train_loss[loss=3.182, NarTop10Accuracy=0.696, over 6018.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6622, over 5993.82 frames. ], batch size: 50, lr: 8.44e-03 +2024-08-06 16:38:01,650 INFO [trainer.py:765] (2/8) Epoch 10, batch 2100, train_loss[loss=3.488, NarTop10Accuracy=0.6282, over 4068.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6597, over 5958.03 frames. ], batch size: 4, lr: 8.42e-03 +2024-08-06 16:38:27,120 INFO [trainer.py:765] (2/8) Epoch 10, batch 2200, train_loss[loss=3.714, NarTop10Accuracy=0.5809, over 7443.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6583, over 5999.22 frames. ], batch size: 31, lr: 8.41e-03 +2024-08-06 16:38:52,447 INFO [trainer.py:765] (2/8) Epoch 10, batch 2300, train_loss[loss=3.281, NarTop10Accuracy=0.6707, over 5625.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6567, over 6011.96 frames. ], batch size: 9, lr: 8.39e-03 +2024-08-06 16:39:17,005 INFO [trainer.py:765] (2/8) Epoch 10, batch 2400, train_loss[loss=3.314, NarTop10Accuracy=0.6661, over 5082.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6618, over 5761.29 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 16:39:40,801 INFO [trainer.py:765] (2/8) Epoch 10, batch 2500, train_loss[loss=3.776, NarTop10Accuracy=0.5644, over 5172.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6669, over 5455.58 frames. ], batch size: 7, lr: 8.36e-03 +2024-08-06 16:40:00,680 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 16:41:06,234 INFO [trainer.py:765] (2/8) Epoch 11, batch 100, train_loss[loss=3.64, NarTop10Accuracy=0.5948, over 7401.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6542, over 2357.55 frames. ], batch size: 32, lr: 7.97e-03 +2024-08-06 16:41:39,020 INFO [trainer.py:765] (2/8) Epoch 11, batch 200, train_loss[loss=3.69, NarTop10Accuracy=0.5708, over 6924.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6589, over 3842.15 frames. ], batch size: 17, lr: 7.95e-03 +2024-08-06 16:41:53,189 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 16:42:01,355 INFO [trainer.py:811] (2/8) Epoch 11, validation: loss=3.116, NarTop10Accuracy=0.7034, over 1905321.00 frames. +2024-08-06 16:42:01,356 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29440MB +2024-08-06 16:42:01,879 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 1.889e+02 2.046e+02 2.249e+02 5.417e+02, threshold=4.093e+02, percent-clipped=0.2 +2024-08-06 16:42:17,975 INFO [trainer.py:765] (2/8) Epoch 11, batch 300, train_loss[loss=3.142, NarTop10Accuracy=0.7099, over 7149.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6646, over 4661.17 frames. ], batch size: 22, lr: 7.94e-03 +2024-08-06 16:42:55,153 INFO [trainer.py:765] (2/8) Epoch 11, batch 400, train_loss[loss=3.284, NarTop10Accuracy=0.6671, over 5304.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6651, over 5126.48 frames. ], batch size: 7, lr: 7.92e-03 +2024-08-06 16:43:25,718 INFO [trainer.py:765] (2/8) Epoch 11, batch 500, train_loss[loss=3.082, NarTop10Accuracy=0.7177, over 6081.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6666, over 5378.25 frames. ], batch size: 11, lr: 7.91e-03 +2024-08-06 16:44:02,241 INFO [trainer.py:765] (2/8) Epoch 11, batch 600, train_loss[loss=3.5, NarTop10Accuracy=0.6222, over 5670.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.6646, over 5647.51 frames. ], batch size: 9, lr: 7.89e-03 +2024-08-06 16:44:35,715 INFO [trainer.py:765] (2/8) Epoch 11, batch 700, train_loss[loss=3.634, NarTop10Accuracy=0.5926, over 5106.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.666, over 5719.74 frames. ], batch size: 6, lr: 7.88e-03 +2024-08-06 16:45:10,467 INFO [trainer.py:765] (2/8) Epoch 11, batch 800, train_loss[loss=3.169, NarTop10Accuracy=0.6987, over 5037.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6619, over 5772.58 frames. ], batch size: 6, lr: 7.86e-03 +2024-08-06 16:45:46,456 INFO [trainer.py:765] (2/8) Epoch 11, batch 900, train_loss[loss=3.599, NarTop10Accuracy=0.596, over 6309.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6637, over 5787.45 frames. ], batch size: 13, lr: 7.85e-03 +2024-08-06 16:46:20,310 INFO [trainer.py:765] (2/8) Epoch 11, batch 1000, train_loss[loss=3.376, NarTop10Accuracy=0.6457, over 6570.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6645, over 5907.05 frames. ], batch size: 14, lr: 7.84e-03 +2024-08-06 16:46:53,456 INFO [trainer.py:765] (2/8) Epoch 11, batch 1100, train_loss[loss=3.053, NarTop10Accuracy=0.7184, over 6834.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.6667, over 5921.86 frames. ], batch size: 17, lr: 7.82e-03 +2024-08-06 16:47:33,030 INFO [trainer.py:765] (2/8) Epoch 11, batch 1200, train_loss[loss=3.515, NarTop10Accuracy=0.6194, over 7188.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.665, over 5912.99 frames. ], batch size: 31, lr: 7.81e-03 +2024-08-06 16:48:06,481 INFO [trainer.py:765] (2/8) Epoch 11, batch 1300, train_loss[loss=3.181, NarTop10Accuracy=0.6832, over 5274.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6629, over 5991.11 frames. ], batch size: 6, lr: 7.79e-03 +2024-08-06 16:48:41,353 INFO [trainer.py:765] (2/8) Epoch 11, batch 1400, train_loss[loss=3.364, NarTop10Accuracy=0.657, over 6081.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6592, over 6029.09 frames. ], batch size: 11, lr: 7.78e-03 +2024-08-06 16:49:09,344 INFO [trainer.py:765] (2/8) Epoch 11, batch 1500, train_loss[loss=3.242, NarTop10Accuracy=0.6767, over 6225.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6599, over 5958.97 frames. ], batch size: 50, lr: 7.77e-03 +2024-08-06 16:49:37,102 INFO [trainer.py:765] (2/8) Epoch 11, batch 1600, train_loss[loss=3.319, NarTop10Accuracy=0.6611, over 7176.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6626, over 5924.50 frames. ], batch size: 22, lr: 7.75e-03 +2024-08-06 16:50:03,791 INFO [trainer.py:765] (2/8) Epoch 11, batch 1700, train_loss[loss=3.383, NarTop10Accuracy=0.6411, over 6726.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6647, over 5916.83 frames. ], batch size: 14, lr: 7.74e-03 +2024-08-06 16:50:30,352 INFO [trainer.py:765] (2/8) Epoch 11, batch 1800, train_loss[loss=3.377, NarTop10Accuracy=0.6476, over 7323.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6621, over 5988.05 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 16:50:56,820 INFO [trainer.py:765] (2/8) Epoch 11, batch 1900, train_loss[loss=3.872, NarTop10Accuracy=0.5435, over 6246.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6603, over 6028.77 frames. ], batch size: 51, lr: 7.71e-03 +2024-08-06 16:51:22,404 INFO [trainer.py:765] (2/8) Epoch 11, batch 2000, train_loss[loss=3.816, NarTop10Accuracy=0.5611, over 5883.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.662, over 6022.26 frames. ], batch size: 51, lr: 7.70e-03 +2024-08-06 16:51:47,793 INFO [trainer.py:765] (2/8) Epoch 11, batch 2100, train_loss[loss=2.934, NarTop10Accuracy=0.7354, over 4833.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6632, over 5996.87 frames. ], batch size: 5, lr: 7.68e-03 +2024-08-06 16:52:13,117 INFO [trainer.py:765] (2/8) Epoch 11, batch 2200, train_loss[loss=3.286, NarTop10Accuracy=0.6718, over 7359.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6635, over 6010.62 frames. ], batch size: 31, lr: 7.67e-03 +2024-08-06 16:52:23,898 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 16:52:32,079 INFO [trainer.py:811] (2/8) Epoch 11, validation: loss=3.101, NarTop10Accuracy=0.7058, over 1905321.00 frames. +2024-08-06 16:52:32,079 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29440MB +2024-08-06 16:52:32,593 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.920e+02 2.088e+02 2.244e+02 3.599e+02, threshold=4.177e+02, percent-clipped=0.0 +2024-08-06 16:52:46,445 INFO [trainer.py:765] (2/8) Epoch 11, batch 2300, train_loss[loss=3.245, NarTop10Accuracy=0.6779, over 5577.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6625, over 6014.71 frames. ], batch size: 9, lr: 7.66e-03 +2024-08-06 16:53:10,887 INFO [trainer.py:765] (2/8) Epoch 11, batch 2400, train_loss[loss=3.527, NarTop10Accuracy=0.6189, over 5307.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6647, over 5776.01 frames. ], batch size: 7, lr: 7.64e-03 +2024-08-06 16:53:34,371 INFO [trainer.py:765] (2/8) Epoch 11, batch 2500, train_loss[loss=3.486, NarTop10Accuracy=0.6217, over 5127.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6658, over 5471.61 frames. ], batch size: 7, lr: 7.63e-03 +2024-08-06 16:53:54,343 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 16:54:58,525 INFO [trainer.py:765] (2/8) Epoch 12, batch 100, train_loss[loss=3.66, NarTop10Accuracy=0.5895, over 6948.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6667, over 2373.39 frames. ], batch size: 31, lr: 7.30e-03 +2024-08-06 16:55:32,432 INFO [trainer.py:765] (2/8) Epoch 12, batch 200, train_loss[loss=3.174, NarTop10Accuracy=0.6971, over 6849.00 frames. ], tot_loss[loss=3.266, NarTop10Accuracy=0.6732, over 3865.50 frames. ], batch size: 17, lr: 7.29e-03 +2024-08-06 16:56:05,096 INFO [trainer.py:765] (2/8) Epoch 12, batch 300, train_loss[loss=2.981, NarTop10Accuracy=0.7338, over 7053.00 frames. ], tot_loss[loss=3.25, NarTop10Accuracy=0.6763, over 4651.69 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 16:56:36,426 INFO [trainer.py:765] (2/8) Epoch 12, batch 400, train_loss[loss=3.049, NarTop10Accuracy=0.7118, over 5211.00 frames. ], tot_loss[loss=3.265, NarTop10Accuracy=0.6732, over 5093.35 frames. ], batch size: 7, lr: 7.26e-03 +2024-08-06 16:57:10,503 INFO [trainer.py:765] (2/8) Epoch 12, batch 500, train_loss[loss=3.655, NarTop10Accuracy=0.5957, over 6093.00 frames. ], tot_loss[loss=3.275, NarTop10Accuracy=0.6708, over 5367.25 frames. ], batch size: 11, lr: 7.25e-03 +2024-08-06 16:57:45,484 INFO [trainer.py:765] (2/8) Epoch 12, batch 600, train_loss[loss=2.976, NarTop10Accuracy=0.7358, over 5763.00 frames. ], tot_loss[loss=3.269, NarTop10Accuracy=0.6723, over 5646.74 frames. ], batch size: 9, lr: 7.24e-03 +2024-08-06 16:58:17,005 INFO [trainer.py:765] (2/8) Epoch 12, batch 700, train_loss[loss=3.76, NarTop10Accuracy=0.5692, over 5229.00 frames. ], tot_loss[loss=3.284, NarTop10Accuracy=0.669, over 5716.16 frames. ], batch size: 6, lr: 7.22e-03 +2024-08-06 16:58:53,469 INFO [trainer.py:765] (2/8) Epoch 12, batch 800, train_loss[loss=3.306, NarTop10Accuracy=0.6614, over 5043.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6668, over 5782.92 frames. ], batch size: 6, lr: 7.21e-03 +2024-08-06 16:59:27,206 INFO [trainer.py:765] (2/8) Epoch 12, batch 900, train_loss[loss=3.132, NarTop10Accuracy=0.6954, over 6042.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6707, over 5792.02 frames. ], batch size: 13, lr: 7.20e-03 +2024-08-06 17:00:01,574 INFO [trainer.py:765] (2/8) Epoch 12, batch 1000, train_loss[loss=3.109, NarTop10Accuracy=0.7046, over 6186.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6686, over 5898.89 frames. ], batch size: 13, lr: 7.19e-03 +2024-08-06 17:00:39,189 INFO [trainer.py:765] (2/8) Epoch 12, batch 1100, train_loss[loss=3.604, NarTop10Accuracy=0.6023, over 6804.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6656, over 5948.11 frames. ], batch size: 17, lr: 7.18e-03 +2024-08-06 17:01:13,963 INFO [trainer.py:765] (2/8) Epoch 12, batch 1200, train_loss[loss=3.126, NarTop10Accuracy=0.7013, over 7071.00 frames. ], tot_loss[loss=3.266, NarTop10Accuracy=0.6724, over 5941.18 frames. ], batch size: 31, lr: 7.17e-03 +2024-08-06 17:01:48,108 INFO [trainer.py:765] (2/8) Epoch 12, batch 1300, train_loss[loss=3.284, NarTop10Accuracy=0.6688, over 5049.00 frames. ], tot_loss[loss=3.282, NarTop10Accuracy=0.669, over 6008.06 frames. ], batch size: 6, lr: 7.15e-03 +2024-08-06 17:02:22,323 INFO [trainer.py:765] (2/8) Epoch 12, batch 1400, train_loss[loss=3.709, NarTop10Accuracy=0.5846, over 6234.00 frames. ], tot_loss[loss=3.29, NarTop10Accuracy=0.6679, over 6035.28 frames. ], batch size: 11, lr: 7.14e-03 +2024-08-06 17:02:52,877 INFO [trainer.py:765] (2/8) Epoch 12, batch 1500, train_loss[loss=3.418, NarTop10Accuracy=0.6441, over 5928.00 frames. ], tot_loss[loss=3.273, NarTop10Accuracy=0.6715, over 5978.85 frames. ], batch size: 51, lr: 7.13e-03 +2024-08-06 17:03:20,691 INFO [trainer.py:765] (2/8) Epoch 12, batch 1600, train_loss[loss=3.195, NarTop10Accuracy=0.6836, over 7197.00 frames. ], tot_loss[loss=3.278, NarTop10Accuracy=0.67, over 5936.95 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 17:03:38,297 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 17:03:46,474 INFO [trainer.py:811] (2/8) Epoch 12, validation: loss=3.054, NarTop10Accuracy=0.7153, over 1905321.00 frames. +2024-08-06 17:03:46,474 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29440MB +2024-08-06 17:03:46,988 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.899e+02 2.078e+02 2.276e+02 5.455e+02, threshold=4.157e+02, percent-clipped=0.1 +2024-08-06 17:03:55,602 INFO [trainer.py:765] (2/8) Epoch 12, batch 1700, train_loss[loss=3.365, NarTop10Accuracy=0.6533, over 6132.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6692, over 5922.69 frames. ], batch size: 13, lr: 7.11e-03 +2024-08-06 17:04:22,119 INFO [trainer.py:765] (2/8) Epoch 12, batch 1800, train_loss[loss=3.772, NarTop10Accuracy=0.5763, over 7302.00 frames. ], tot_loss[loss=3.279, NarTop10Accuracy=0.6699, over 5984.71 frames. ], batch size: 22, lr: 7.10e-03 +2024-08-06 17:04:48,590 INFO [trainer.py:765] (2/8) Epoch 12, batch 1900, train_loss[loss=3.222, NarTop10Accuracy=0.6813, over 6159.00 frames. ], tot_loss[loss=3.278, NarTop10Accuracy=0.6703, over 6014.28 frames. ], batch size: 52, lr: 7.08e-03 +2024-08-06 17:05:14,196 INFO [trainer.py:765] (2/8) Epoch 12, batch 2000, train_loss[loss=3.484, NarTop10Accuracy=0.6259, over 6438.00 frames. ], tot_loss[loss=3.265, NarTop10Accuracy=0.6726, over 6009.92 frames. ], batch size: 50, lr: 7.07e-03 +2024-08-06 17:05:39,467 INFO [trainer.py:765] (2/8) Epoch 12, batch 2100, train_loss[loss=3.357, NarTop10Accuracy=0.6574, over 4797.00 frames. ], tot_loss[loss=3.271, NarTop10Accuracy=0.6714, over 5999.48 frames. ], batch size: 5, lr: 7.06e-03 +2024-08-06 17:06:04,689 INFO [trainer.py:765] (2/8) Epoch 12, batch 2200, train_loss[loss=3.445, NarTop10Accuracy=0.6362, over 7461.00 frames. ], tot_loss[loss=3.289, NarTop10Accuracy=0.6676, over 6032.09 frames. ], batch size: 31, lr: 7.05e-03 +2024-08-06 17:06:29,846 INFO [trainer.py:765] (2/8) Epoch 12, batch 2300, train_loss[loss=3.446, NarTop10Accuracy=0.631, over 6225.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6688, over 6034.30 frames. ], batch size: 10, lr: 7.04e-03 +2024-08-06 17:06:54,199 INFO [trainer.py:765] (2/8) Epoch 12, batch 2400, train_loss[loss=3.218, NarTop10Accuracy=0.6906, over 5085.00 frames. ], tot_loss[loss=3.269, NarTop10Accuracy=0.6717, over 5774.91 frames. ], batch size: 7, lr: 7.03e-03 +2024-08-06 17:07:17,645 INFO [trainer.py:765] (2/8) Epoch 12, batch 2500, train_loss[loss=3.245, NarTop10Accuracy=0.6787, over 5235.00 frames. ], tot_loss[loss=3.25, NarTop10Accuracy=0.6754, over 5485.59 frames. ], batch size: 7, lr: 7.02e-03 +2024-08-06 17:07:37,546 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 17:08:40,079 INFO [trainer.py:765] (2/8) Epoch 13, batch 100, train_loss[loss=3.054, NarTop10Accuracy=0.7246, over 7458.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6677, over 2376.43 frames. ], batch size: 31, lr: 6.73e-03 +2024-08-06 17:09:14,120 INFO [trainer.py:765] (2/8) Epoch 13, batch 200, train_loss[loss=2.938, NarTop10Accuracy=0.7438, over 6852.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6685, over 3862.95 frames. ], batch size: 17, lr: 6.72e-03 +2024-08-06 17:09:46,276 INFO [trainer.py:765] (2/8) Epoch 13, batch 300, train_loss[loss=3.509, NarTop10Accuracy=0.6166, over 7014.00 frames. ], tot_loss[loss=3.261, NarTop10Accuracy=0.6734, over 4651.97 frames. ], batch size: 22, lr: 6.71e-03 +2024-08-06 17:10:19,164 INFO [trainer.py:765] (2/8) Epoch 13, batch 400, train_loss[loss=2.915, NarTop10Accuracy=0.7455, over 5103.00 frames. ], tot_loss[loss=3.25, NarTop10Accuracy=0.6755, over 5103.00 frames. ], batch size: 7, lr: 6.70e-03 +2024-08-06 17:10:49,336 INFO [trainer.py:765] (2/8) Epoch 13, batch 500, train_loss[loss=3.184, NarTop10Accuracy=0.6949, over 6237.00 frames. ], tot_loss[loss=3.235, NarTop10Accuracy=0.6783, over 5398.09 frames. ], batch size: 11, lr: 6.69e-03 +2024-08-06 17:11:26,244 INFO [trainer.py:765] (2/8) Epoch 13, batch 600, train_loss[loss=2.991, NarTop10Accuracy=0.7232, over 5754.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6792, over 5662.55 frames. ], batch size: 9, lr: 6.68e-03 +2024-08-06 17:11:57,381 INFO [trainer.py:765] (2/8) Epoch 13, batch 700, train_loss[loss=3.259, NarTop10Accuracy=0.6698, over 5157.00 frames. ], tot_loss[loss=3.242, NarTop10Accuracy=0.6771, over 5724.27 frames. ], batch size: 6, lr: 6.67e-03 +2024-08-06 17:12:33,442 INFO [trainer.py:765] (2/8) Epoch 13, batch 800, train_loss[loss=3.041, NarTop10Accuracy=0.7212, over 5016.00 frames. ], tot_loss[loss=3.25, NarTop10Accuracy=0.6757, over 5780.18 frames. ], batch size: 6, lr: 6.66e-03 +2024-08-06 17:13:10,032 INFO [trainer.py:765] (2/8) Epoch 13, batch 900, train_loss[loss=3.235, NarTop10Accuracy=0.6824, over 6576.00 frames. ], tot_loss[loss=3.24, NarTop10Accuracy=0.6778, over 5792.83 frames. ], batch size: 14, lr: 6.65e-03 +2024-08-06 17:13:41,442 INFO [trainer.py:765] (2/8) Epoch 13, batch 1000, train_loss[loss=3.648, NarTop10Accuracy=0.5925, over 6675.00 frames. ], tot_loss[loss=3.245, NarTop10Accuracy=0.677, over 5887.75 frames. ], batch size: 14, lr: 6.64e-03 +2024-08-06 17:14:15,537 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 17:14:23,644 INFO [trainer.py:811] (2/8) Epoch 13, validation: loss=3.099, NarTop10Accuracy=0.7062, over 1905321.00 frames. +2024-08-06 17:14:23,645 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29440MB +2024-08-06 17:14:24,470 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 1.948e+02 2.091e+02 2.295e+02 3.353e+02, threshold=4.181e+02, percent-clipped=0.0 +2024-08-06 17:14:26,697 INFO [trainer.py:765] (2/8) Epoch 13, batch 1100, train_loss[loss=3.579, NarTop10Accuracy=0.6117, over 6747.00 frames. ], tot_loss[loss=3.254, NarTop10Accuracy=0.675, over 5914.06 frames. ], batch size: 17, lr: 6.63e-03 +2024-08-06 17:15:03,475 INFO [trainer.py:765] (2/8) Epoch 13, batch 1200, train_loss[loss=3.402, NarTop10Accuracy=0.6461, over 7275.00 frames. ], tot_loss[loss=3.256, NarTop10Accuracy=0.6745, over 5906.73 frames. ], batch size: 31, lr: 6.62e-03 +2024-08-06 17:15:35,514 INFO [trainer.py:765] (2/8) Epoch 13, batch 1300, train_loss[loss=3.165, NarTop10Accuracy=0.705, over 5106.00 frames. ], tot_loss[loss=3.261, NarTop10Accuracy=0.6736, over 5978.93 frames. ], batch size: 6, lr: 6.61e-03 +2024-08-06 17:16:11,782 INFO [trainer.py:765] (2/8) Epoch 13, batch 1400, train_loss[loss=3.148, NarTop10Accuracy=0.6944, over 6027.00 frames. ], tot_loss[loss=3.265, NarTop10Accuracy=0.6726, over 6002.90 frames. ], batch size: 11, lr: 6.60e-03 +2024-08-06 17:16:39,787 INFO [trainer.py:765] (2/8) Epoch 13, batch 1500, train_loss[loss=3.493, NarTop10Accuracy=0.6257, over 6648.00 frames. ], tot_loss[loss=3.257, NarTop10Accuracy=0.6741, over 5954.51 frames. ], batch size: 50, lr: 6.59e-03 +2024-08-06 17:17:07,603 INFO [trainer.py:765] (2/8) Epoch 13, batch 1600, train_loss[loss=3.034, NarTop10Accuracy=0.7147, over 6969.00 frames. ], tot_loss[loss=3.264, NarTop10Accuracy=0.6732, over 5922.48 frames. ], batch size: 22, lr: 6.58e-03 +2024-08-06 17:17:34,259 INFO [trainer.py:765] (2/8) Epoch 13, batch 1700, train_loss[loss=3.185, NarTop10Accuracy=0.6916, over 6201.00 frames. ], tot_loss[loss=3.264, NarTop10Accuracy=0.6731, over 5915.81 frames. ], batch size: 13, lr: 6.57e-03 +2024-08-06 17:18:00,761 INFO [trainer.py:765] (2/8) Epoch 13, batch 1800, train_loss[loss=3.11, NarTop10Accuracy=0.706, over 6915.00 frames. ], tot_loss[loss=3.252, NarTop10Accuracy=0.6754, over 5983.15 frames. ], batch size: 22, lr: 6.56e-03 +2024-08-06 17:18:27,244 INFO [trainer.py:765] (2/8) Epoch 13, batch 1900, train_loss[loss=3.563, NarTop10Accuracy=0.6137, over 5625.00 frames. ], tot_loss[loss=3.251, NarTop10Accuracy=0.6756, over 6021.57 frames. ], batch size: 50, lr: 6.55e-03 +2024-08-06 17:18:52,777 INFO [trainer.py:765] (2/8) Epoch 13, batch 2000, train_loss[loss=3.581, NarTop10Accuracy=0.6056, over 5772.00 frames. ], tot_loss[loss=3.236, NarTop10Accuracy=0.6787, over 5989.22 frames. ], batch size: 50, lr: 6.54e-03 +2024-08-06 17:19:18,147 INFO [trainer.py:765] (2/8) Epoch 13, batch 2100, train_loss[loss=2.776, NarTop10Accuracy=0.7706, over 4866.00 frames. ], tot_loss[loss=3.233, NarTop10Accuracy=0.6793, over 5977.29 frames. ], batch size: 5, lr: 6.53e-03 +2024-08-06 17:19:43,412 INFO [trainer.py:765] (2/8) Epoch 13, batch 2200, train_loss[loss=3.467, NarTop10Accuracy=0.6339, over 7323.00 frames. ], tot_loss[loss=3.247, NarTop10Accuracy=0.6764, over 6034.04 frames. ], batch size: 31, lr: 6.52e-03 +2024-08-06 17:20:08,542 INFO [trainer.py:765] (2/8) Epoch 13, batch 2300, train_loss[loss=3.622, NarTop10Accuracy=0.5986, over 5652.00 frames. ], tot_loss[loss=3.266, NarTop10Accuracy=0.673, over 6043.13 frames. ], batch size: 9, lr: 6.51e-03 +2024-08-06 17:20:32,939 INFO [trainer.py:765] (2/8) Epoch 13, batch 2400, train_loss[loss=3.595, NarTop10Accuracy=0.6067, over 5721.00 frames. ], tot_loss[loss=3.237, NarTop10Accuracy=0.6792, over 5792.22 frames. ], batch size: 8, lr: 6.50e-03 +2024-08-06 17:20:56,408 INFO [trainer.py:765] (2/8) Epoch 13, batch 2500, train_loss[loss=3.529, NarTop10Accuracy=0.6231, over 5043.00 frames. ], tot_loss[loss=3.227, NarTop10Accuracy=0.6806, over 5490.73 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 17:21:16,079 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 17:22:19,315 INFO [trainer.py:765] (2/8) Epoch 14, batch 100, train_loss[loss=3.057, NarTop10Accuracy=0.7174, over 7182.00 frames. ], tot_loss[loss=3.215, NarTop10Accuracy=0.6834, over 2358.00 frames. ], batch size: 31, lr: 6.24e-03 +2024-08-06 17:22:50,378 INFO [trainer.py:765] (2/8) Epoch 14, batch 200, train_loss[loss=3.149, NarTop10Accuracy=0.7058, over 6723.00 frames. ], tot_loss[loss=3.239, NarTop10Accuracy=0.6782, over 3848.09 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 17:23:23,879 INFO [trainer.py:765] (2/8) Epoch 14, batch 300, train_loss[loss=3.199, NarTop10Accuracy=0.6858, over 7278.00 frames. ], tot_loss[loss=3.21, NarTop10Accuracy=0.6843, over 4662.98 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 17:23:57,484 INFO [trainer.py:765] (2/8) Epoch 14, batch 400, train_loss[loss=2.88, NarTop10Accuracy=0.742, over 5040.00 frames. ], tot_loss[loss=3.229, NarTop10Accuracy=0.6804, over 5111.33 frames. ], batch size: 7, lr: 6.22e-03 +2024-08-06 17:24:32,113 INFO [trainer.py:765] (2/8) Epoch 14, batch 500, train_loss[loss=3.355, NarTop10Accuracy=0.6582, over 6129.00 frames. ], tot_loss[loss=3.232, NarTop10Accuracy=0.6794, over 5376.10 frames. ], batch size: 11, lr: 6.21e-03 +2024-08-06 17:24:36,213 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 17:24:44,275 INFO [trainer.py:811] (2/8) Epoch 14, validation: loss=3.004, NarTop10Accuracy=0.726, over 1905321.00 frames. +2024-08-06 17:24:44,276 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29440MB +2024-08-06 17:24:44,823 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 1.969e+02 2.114e+02 2.287e+02 4.406e+02, threshold=4.227e+02, percent-clipped=0.1 +2024-08-06 17:25:12,914 INFO [trainer.py:765] (2/8) Epoch 14, batch 600, train_loss[loss=3.031, NarTop10Accuracy=0.7175, over 5781.00 frames. ], tot_loss[loss=3.236, NarTop10Accuracy=0.6784, over 5647.77 frames. ], batch size: 9, lr: 6.20e-03 +2024-08-06 17:25:48,548 INFO [trainer.py:765] (2/8) Epoch 14, batch 700, train_loss[loss=3.293, NarTop10Accuracy=0.659, over 4980.00 frames. ], tot_loss[loss=3.223, NarTop10Accuracy=0.6808, over 5720.73 frames. ], batch size: 6, lr: 6.19e-03 +2024-08-06 17:26:25,279 INFO [trainer.py:765] (2/8) Epoch 14, batch 800, train_loss[loss=3.068, NarTop10Accuracy=0.719, over 5076.00 frames. ], tot_loss[loss=3.207, NarTop10Accuracy=0.6842, over 5776.22 frames. ], batch size: 6, lr: 6.18e-03 +2024-08-06 17:26:57,659 INFO [trainer.py:765] (2/8) Epoch 14, batch 900, train_loss[loss=3.34, NarTop10Accuracy=0.6505, over 6333.00 frames. ], tot_loss[loss=3.204, NarTop10Accuracy=0.6845, over 5795.83 frames. ], batch size: 13, lr: 6.17e-03 +2024-08-06 17:27:31,716 INFO [trainer.py:765] (2/8) Epoch 14, batch 1000, train_loss[loss=3.404, NarTop10Accuracy=0.6487, over 6207.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.6811, over 5905.89 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 17:28:11,597 INFO [trainer.py:765] (2/8) Epoch 14, batch 1100, train_loss[loss=3.001, NarTop10Accuracy=0.7232, over 6672.00 frames. ], tot_loss[loss=3.223, NarTop10Accuracy=0.6809, over 5931.86 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 17:28:40,733 INFO [trainer.py:765] (2/8) Epoch 14, batch 1200, train_loss[loss=3.527, NarTop10Accuracy=0.6177, over 7185.00 frames. ], tot_loss[loss=3.216, NarTop10Accuracy=0.6822, over 5934.10 frames. ], batch size: 31, lr: 6.15e-03 +2024-08-06 17:29:16,214 INFO [trainer.py:765] (2/8) Epoch 14, batch 1300, train_loss[loss=3.481, NarTop10Accuracy=0.628, over 5079.00 frames. ], tot_loss[loss=3.221, NarTop10Accuracy=0.6812, over 5989.78 frames. ], batch size: 6, lr: 6.14e-03 +2024-08-06 17:29:54,602 INFO [trainer.py:765] (2/8) Epoch 14, batch 1400, train_loss[loss=3.362, NarTop10Accuracy=0.6589, over 5994.00 frames. ], tot_loss[loss=3.228, NarTop10Accuracy=0.6799, over 6021.32 frames. ], batch size: 11, lr: 6.13e-03 +2024-08-06 17:30:25,315 INFO [trainer.py:765] (2/8) Epoch 14, batch 1500, train_loss[loss=3.784, NarTop10Accuracy=0.5708, over 6153.00 frames. ], tot_loss[loss=3.236, NarTop10Accuracy=0.6784, over 5957.38 frames. ], batch size: 51, lr: 6.12e-03 +2024-08-06 17:30:53,043 INFO [trainer.py:765] (2/8) Epoch 14, batch 1600, train_loss[loss=3.009, NarTop10Accuracy=0.7312, over 7350.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.6809, over 5930.11 frames. ], batch size: 23, lr: 6.11e-03 +2024-08-06 17:31:19,728 INFO [trainer.py:765] (2/8) Epoch 14, batch 1700, train_loss[loss=3.075, NarTop10Accuracy=0.7117, over 6234.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.6846, over 5920.44 frames. ], batch size: 13, lr: 6.10e-03 +2024-08-06 17:31:46,289 INFO [trainer.py:765] (2/8) Epoch 14, batch 1800, train_loss[loss=3.052, NarTop10Accuracy=0.7193, over 6921.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.6879, over 5987.01 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 17:32:12,727 INFO [trainer.py:765] (2/8) Epoch 14, batch 1900, train_loss[loss=3.585, NarTop10Accuracy=0.6054, over 5889.00 frames. ], tot_loss[loss=3.211, NarTop10Accuracy=0.6842, over 6028.54 frames. ], batch size: 50, lr: 6.09e-03 +2024-08-06 17:32:38,282 INFO [trainer.py:765] (2/8) Epoch 14, batch 2000, train_loss[loss=3.238, NarTop10Accuracy=0.6792, over 6078.00 frames. ], tot_loss[loss=3.221, NarTop10Accuracy=0.6819, over 6002.66 frames. ], batch size: 50, lr: 6.08e-03 +2024-08-06 17:33:03,646 INFO [trainer.py:765] (2/8) Epoch 14, batch 2100, train_loss[loss=2.865, NarTop10Accuracy=0.7557, over 3945.00 frames. ], tot_loss[loss=3.225, NarTop10Accuracy=0.681, over 5959.87 frames. ], batch size: 4, lr: 6.07e-03 +2024-08-06 17:33:28,999 INFO [trainer.py:765] (2/8) Epoch 14, batch 2200, train_loss[loss=3.18, NarTop10Accuracy=0.6907, over 7398.00 frames. ], tot_loss[loss=3.222, NarTop10Accuracy=0.6821, over 6006.60 frames. ], batch size: 31, lr: 6.06e-03 +2024-08-06 17:33:54,087 INFO [trainer.py:765] (2/8) Epoch 14, batch 2300, train_loss[loss=3.03, NarTop10Accuracy=0.7266, over 5763.00 frames. ], tot_loss[loss=3.237, NarTop10Accuracy=0.6787, over 6018.58 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 17:34:18,535 INFO [trainer.py:765] (2/8) Epoch 14, batch 2400, train_loss[loss=3.041, NarTop10Accuracy=0.7326, over 5109.00 frames. ], tot_loss[loss=3.233, NarTop10Accuracy=0.6791, over 5768.80 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:42,116 INFO [trainer.py:765] (2/8) Epoch 14, batch 2500, train_loss[loss=2.971, NarTop10Accuracy=0.7267, over 4989.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.6856, over 5492.97 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:45,395 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 17:34:53,209 INFO [trainer.py:811] (2/8) Epoch 14, validation: loss=3.062, NarTop10Accuracy=0.7136, over 1905321.00 frames. +2024-08-06 17:34:53,209 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29440MB +2024-08-06 17:34:53,679 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 1.975e+02 2.132e+02 2.304e+02 3.875e+02, threshold=4.265e+02, percent-clipped=0.0 +2024-08-06 17:35:09,835 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 17:36:11,739 INFO [trainer.py:765] (2/8) Epoch 15, batch 100, train_loss[loss=3.024, NarTop10Accuracy=0.7247, over 7095.00 frames. ], tot_loss[loss=3.218, NarTop10Accuracy=0.6828, over 2364.64 frames. ], batch size: 31, lr: 5.82e-03 +2024-08-06 17:36:44,334 INFO [trainer.py:765] (2/8) Epoch 15, batch 200, train_loss[loss=3.464, NarTop10Accuracy=0.619, over 6843.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6873, over 3861.10 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 17:37:17,715 INFO [trainer.py:765] (2/8) Epoch 15, batch 300, train_loss[loss=3.251, NarTop10Accuracy=0.6644, over 6972.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6868, over 4670.64 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 17:37:48,904 INFO [trainer.py:765] (2/8) Epoch 15, batch 400, train_loss[loss=2.939, NarTop10Accuracy=0.7421, over 5112.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.6885, over 5121.35 frames. ], batch size: 7, lr: 5.80e-03 +2024-08-06 17:38:22,354 INFO [trainer.py:765] (2/8) Epoch 15, batch 500, train_loss[loss=2.949, NarTop10Accuracy=0.7395, over 6210.00 frames. ], tot_loss[loss=3.186, NarTop10Accuracy=0.6889, over 5392.92 frames. ], batch size: 11, lr: 5.79e-03 +2024-08-06 17:38:53,094 INFO [trainer.py:765] (2/8) Epoch 15, batch 600, train_loss[loss=2.877, NarTop10Accuracy=0.7352, over 5715.00 frames. ], tot_loss[loss=3.201, NarTop10Accuracy=0.6857, over 5640.80 frames. ], batch size: 9, lr: 5.78e-03 +2024-08-06 17:39:27,922 INFO [trainer.py:765] (2/8) Epoch 15, batch 700, train_loss[loss=2.818, NarTop10Accuracy=0.7607, over 5169.00 frames. ], tot_loss[loss=3.207, NarTop10Accuracy=0.6847, over 5722.14 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 17:40:05,565 INFO [trainer.py:765] (2/8) Epoch 15, batch 800, train_loss[loss=3.288, NarTop10Accuracy=0.6699, over 4227.00 frames. ], tot_loss[loss=3.229, NarTop10Accuracy=0.68, over 5782.49 frames. ], batch size: 5, lr: 5.76e-03 +2024-08-06 17:40:35,791 INFO [trainer.py:765] (2/8) Epoch 15, batch 900, train_loss[loss=3.471, NarTop10Accuracy=0.6213, over 6252.00 frames. ], tot_loss[loss=3.207, NarTop10Accuracy=0.6845, over 5805.97 frames. ], batch size: 13, lr: 5.76e-03 +2024-08-06 17:41:11,251 INFO [trainer.py:765] (2/8) Epoch 15, batch 1000, train_loss[loss=3.182, NarTop10Accuracy=0.6959, over 6258.00 frames. ], tot_loss[loss=3.198, NarTop10Accuracy=0.6863, over 5905.07 frames. ], batch size: 13, lr: 5.75e-03 +2024-08-06 17:41:46,452 INFO [trainer.py:765] (2/8) Epoch 15, batch 1100, train_loss[loss=3.184, NarTop10Accuracy=0.6961, over 6879.00 frames. ], tot_loss[loss=3.197, NarTop10Accuracy=0.6865, over 5936.53 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 17:42:19,456 INFO [trainer.py:765] (2/8) Epoch 15, batch 1200, train_loss[loss=3.358, NarTop10Accuracy=0.6594, over 7188.00 frames. ], tot_loss[loss=3.227, NarTop10Accuracy=0.6802, over 5927.74 frames. ], batch size: 31, lr: 5.73e-03 +2024-08-06 17:42:54,428 INFO [trainer.py:765] (2/8) Epoch 15, batch 1300, train_loss[loss=2.918, NarTop10Accuracy=0.7297, over 5028.00 frames. ], tot_loss[loss=3.204, NarTop10Accuracy=0.6847, over 5998.60 frames. ], batch size: 6, lr: 5.73e-03 +2024-08-06 17:43:26,607 INFO [trainer.py:765] (2/8) Epoch 15, batch 1400, train_loss[loss=3.345, NarTop10Accuracy=0.6584, over 6108.00 frames. ], tot_loss[loss=3.216, NarTop10Accuracy=0.6823, over 6015.64 frames. ], batch size: 11, lr: 5.72e-03 +2024-08-06 17:43:56,558 INFO [trainer.py:765] (2/8) Epoch 15, batch 1500, train_loss[loss=3.188, NarTop10Accuracy=0.6949, over 5640.00 frames. ], tot_loss[loss=3.222, NarTop10Accuracy=0.6813, over 5955.51 frames. ], batch size: 50, lr: 5.71e-03 +2024-08-06 17:44:24,241 INFO [trainer.py:765] (2/8) Epoch 15, batch 1600, train_loss[loss=3.574, NarTop10Accuracy=0.6004, over 7089.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.6855, over 5935.59 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 17:44:50,856 INFO [trainer.py:765] (2/8) Epoch 15, batch 1700, train_loss[loss=2.986, NarTop10Accuracy=0.7221, over 6648.00 frames. ], tot_loss[loss=3.186, NarTop10Accuracy=0.6883, over 5913.57 frames. ], batch size: 14, lr: 5.70e-03 +2024-08-06 17:45:17,294 INFO [trainer.py:765] (2/8) Epoch 15, batch 1800, train_loss[loss=3.253, NarTop10Accuracy=0.6725, over 7161.00 frames. ], tot_loss[loss=3.186, NarTop10Accuracy=0.6884, over 5993.56 frames. ], batch size: 22, lr: 5.69e-03 +2024-08-06 17:45:43,679 INFO [trainer.py:765] (2/8) Epoch 15, batch 1900, train_loss[loss=3.112, NarTop10Accuracy=0.7068, over 6603.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.6839, over 6023.54 frames. ], batch size: 52, lr: 5.68e-03 +2024-08-06 17:45:53,541 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 17:46:01,743 INFO [trainer.py:811] (2/8) Epoch 15, validation: loss=3.006, NarTop10Accuracy=0.725, over 1905321.00 frames. +2024-08-06 17:46:01,743 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29440MB +2024-08-06 17:46:02,217 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.004e+02 2.149e+02 2.324e+02 3.721e+02, threshold=4.298e+02, percent-clipped=0.0 +2024-08-06 17:46:17,371 INFO [trainer.py:765] (2/8) Epoch 15, batch 2000, train_loss[loss=3.227, NarTop10Accuracy=0.6779, over 6447.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.6843, over 5982.31 frames. ], batch size: 51, lr: 5.67e-03 +2024-08-06 17:46:42,773 INFO [trainer.py:765] (2/8) Epoch 15, batch 2100, train_loss[loss=3.027, NarTop10Accuracy=0.7203, over 3993.00 frames. ], tot_loss[loss=3.199, NarTop10Accuracy=0.6861, over 5957.40 frames. ], batch size: 4, lr: 5.67e-03 +2024-08-06 17:47:08,033 INFO [trainer.py:765] (2/8) Epoch 15, batch 2200, train_loss[loss=3.071, NarTop10Accuracy=0.7199, over 7440.00 frames. ], tot_loss[loss=3.201, NarTop10Accuracy=0.6856, over 5990.05 frames. ], batch size: 31, lr: 5.66e-03 +2024-08-06 17:47:33,291 INFO [trainer.py:765] (2/8) Epoch 15, batch 2300, train_loss[loss=3.518, NarTop10Accuracy=0.6198, over 5685.00 frames. ], tot_loss[loss=3.212, NarTop10Accuracy=0.6837, over 6009.55 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 17:47:57,640 INFO [trainer.py:765] (2/8) Epoch 15, batch 2400, train_loss[loss=3.16, NarTop10Accuracy=0.6872, over 5130.00 frames. ], tot_loss[loss=3.187, NarTop10Accuracy=0.6887, over 5753.09 frames. ], batch size: 7, lr: 5.65e-03 +2024-08-06 17:48:21,162 INFO [trainer.py:765] (2/8) Epoch 15, batch 2500, train_loss[loss=3.004, NarTop10Accuracy=0.7217, over 5283.00 frames. ], tot_loss[loss=3.167, NarTop10Accuracy=0.6928, over 5471.66 frames. ], batch size: 7, lr: 5.64e-03 +2024-08-06 17:48:41,285 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 17:49:41,221 INFO [trainer.py:765] (2/8) Epoch 16, batch 100, train_loss[loss=3.538, NarTop10Accuracy=0.6179, over 7527.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6934, over 2372.92 frames. ], batch size: 31, lr: 5.45e-03 +2024-08-06 17:50:12,157 INFO [trainer.py:765] (2/8) Epoch 16, batch 200, train_loss[loss=2.935, NarTop10Accuracy=0.7384, over 6726.00 frames. ], tot_loss[loss=3.211, NarTop10Accuracy=0.6838, over 3845.15 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 17:50:45,159 INFO [trainer.py:765] (2/8) Epoch 16, batch 300, train_loss[loss=3.156, NarTop10Accuracy=0.7001, over 7128.00 frames. ], tot_loss[loss=3.198, NarTop10Accuracy=0.6865, over 4635.07 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 17:51:15,976 INFO [trainer.py:765] (2/8) Epoch 16, batch 400, train_loss[loss=3.483, NarTop10Accuracy=0.6231, over 5154.00 frames. ], tot_loss[loss=3.202, NarTop10Accuracy=0.6857, over 5078.40 frames. ], batch size: 7, lr: 5.43e-03 +2024-08-06 17:51:50,323 INFO [trainer.py:765] (2/8) Epoch 16, batch 500, train_loss[loss=3.04, NarTop10Accuracy=0.7189, over 6219.00 frames. ], tot_loss[loss=3.187, NarTop10Accuracy=0.6884, over 5359.95 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 17:52:24,251 INFO [trainer.py:765] (2/8) Epoch 16, batch 600, train_loss[loss=2.854, NarTop10Accuracy=0.7486, over 5712.00 frames. ], tot_loss[loss=3.202, NarTop10Accuracy=0.6857, over 5643.87 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 17:52:55,386 INFO [trainer.py:765] (2/8) Epoch 16, batch 700, train_loss[loss=2.916, NarTop10Accuracy=0.7607, over 4305.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.6867, over 5721.16 frames. ], batch size: 5, lr: 5.41e-03 +2024-08-06 17:53:33,815 INFO [trainer.py:765] (2/8) Epoch 16, batch 800, train_loss[loss=3.273, NarTop10Accuracy=0.6716, over 5190.00 frames. ], tot_loss[loss=3.185, NarTop10Accuracy=0.6887, over 5772.38 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 17:54:03,922 INFO [trainer.py:765] (2/8) Epoch 16, batch 900, train_loss[loss=3.462, NarTop10Accuracy=0.6295, over 6531.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6917, over 5801.06 frames. ], batch size: 14, lr: 5.39e-03 +2024-08-06 17:54:37,607 INFO [trainer.py:765] (2/8) Epoch 16, batch 1000, train_loss[loss=3.016, NarTop10Accuracy=0.7211, over 6669.00 frames. ], tot_loss[loss=3.17, NarTop10Accuracy=0.6922, over 5905.70 frames. ], batch size: 14, lr: 5.39e-03 +2024-08-06 17:55:17,196 INFO [trainer.py:765] (2/8) Epoch 16, batch 1100, train_loss[loss=3.198, NarTop10Accuracy=0.6918, over 6765.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.687, over 5942.20 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 17:55:46,209 INFO [trainer.py:765] (2/8) Epoch 16, batch 1200, train_loss[loss=3.472, NarTop10Accuracy=0.63, over 7284.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6859, over 5930.31 frames. ], batch size: 31, lr: 5.37e-03 +2024-08-06 17:56:22,775 INFO [trainer.py:765] (2/8) Epoch 16, batch 1300, train_loss[loss=3.454, NarTop10Accuracy=0.6346, over 5025.00 frames. ], tot_loss[loss=3.191, NarTop10Accuracy=0.6874, over 6002.63 frames. ], batch size: 6, lr: 5.37e-03 +2024-08-06 17:56:44,648 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 17:56:53,428 INFO [trainer.py:811] (2/8) Epoch 16, validation: loss=3.112, NarTop10Accuracy=0.703, over 1905321.00 frames. +2024-08-06 17:56:53,429 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29440MB +2024-08-06 17:56:54,007 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 1.974e+02 2.136e+02 2.310e+02 5.351e+02, threshold=4.271e+02, percent-clipped=0.2 +2024-08-06 17:57:06,171 INFO [trainer.py:765] (2/8) Epoch 16, batch 1400, train_loss[loss=3.14, NarTop10Accuracy=0.6952, over 6084.00 frames. ], tot_loss[loss=3.187, NarTop10Accuracy=0.6884, over 6018.61 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 17:57:34,033 INFO [trainer.py:765] (2/8) Epoch 16, batch 1500, train_loss[loss=3.358, NarTop10Accuracy=0.6583, over 6501.00 frames. ], tot_loss[loss=3.185, NarTop10Accuracy=0.6887, over 5943.09 frames. ], batch size: 51, lr: 5.35e-03 +2024-08-06 17:58:01,774 INFO [trainer.py:765] (2/8) Epoch 16, batch 1600, train_loss[loss=3.066, NarTop10Accuracy=0.7158, over 7119.00 frames. ], tot_loss[loss=3.18, NarTop10Accuracy=0.6898, over 5928.23 frames. ], batch size: 22, lr: 5.35e-03 +2024-08-06 17:58:28,475 INFO [trainer.py:765] (2/8) Epoch 16, batch 1700, train_loss[loss=2.998, NarTop10Accuracy=0.7302, over 6255.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.687, over 5925.14 frames. ], batch size: 13, lr: 5.34e-03 +2024-08-06 17:58:54,975 INFO [trainer.py:765] (2/8) Epoch 16, batch 1800, train_loss[loss=3.154, NarTop10Accuracy=0.6926, over 7515.00 frames. ], tot_loss[loss=3.178, NarTop10Accuracy=0.6905, over 5999.60 frames. ], batch size: 23, lr: 5.33e-03 +2024-08-06 17:59:21,359 INFO [trainer.py:765] (2/8) Epoch 16, batch 1900, train_loss[loss=3.384, NarTop10Accuracy=0.6452, over 6078.00 frames. ], tot_loss[loss=3.206, NarTop10Accuracy=0.6846, over 6049.91 frames. ], batch size: 50, lr: 5.33e-03 +2024-08-06 17:59:46,857 INFO [trainer.py:765] (2/8) Epoch 16, batch 2000, train_loss[loss=3.071, NarTop10Accuracy=0.7184, over 5763.00 frames. ], tot_loss[loss=3.178, NarTop10Accuracy=0.6904, over 6011.37 frames. ], batch size: 53, lr: 5.32e-03 +2024-08-06 18:00:12,117 INFO [trainer.py:765] (2/8) Epoch 16, batch 2100, train_loss[loss=3.368, NarTop10Accuracy=0.644, over 3897.00 frames. ], tot_loss[loss=3.202, NarTop10Accuracy=0.6854, over 5965.51 frames. ], batch size: 4, lr: 5.32e-03 +2024-08-06 18:00:37,333 INFO [trainer.py:765] (2/8) Epoch 16, batch 2200, train_loss[loss=3.352, NarTop10Accuracy=0.6619, over 7392.00 frames. ], tot_loss[loss=3.213, NarTop10Accuracy=0.6832, over 6006.60 frames. ], batch size: 32, lr: 5.31e-03 +2024-08-06 18:01:02,502 INFO [trainer.py:765] (2/8) Epoch 16, batch 2300, train_loss[loss=2.976, NarTop10Accuracy=0.7234, over 5709.00 frames. ], tot_loss[loss=3.216, NarTop10Accuracy=0.6829, over 6002.69 frames. ], batch size: 9, lr: 5.30e-03 +2024-08-06 18:01:26,883 INFO [trainer.py:765] (2/8) Epoch 16, batch 2400, train_loss[loss=3.05, NarTop10Accuracy=0.716, over 5145.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.6871, over 5778.23 frames. ], batch size: 7, lr: 5.30e-03 +2024-08-06 18:01:50,406 INFO [trainer.py:765] (2/8) Epoch 16, batch 2500, train_loss[loss=3.034, NarTop10Accuracy=0.724, over 5046.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6937, over 5473.81 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 18:02:10,589 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 18:03:08,531 INFO [trainer.py:765] (2/8) Epoch 17, batch 100, train_loss[loss=3.2, NarTop10Accuracy=0.687, over 7176.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.6998, over 2358.63 frames. ], batch size: 31, lr: 5.12e-03 +2024-08-06 18:03:45,145 INFO [trainer.py:765] (2/8) Epoch 17, batch 200, train_loss[loss=3.402, NarTop10Accuracy=0.6428, over 6906.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6972, over 3855.26 frames. ], batch size: 17, lr: 5.12e-03 +2024-08-06 18:04:19,590 INFO [trainer.py:765] (2/8) Epoch 17, batch 300, train_loss[loss=3.359, NarTop10Accuracy=0.6534, over 7221.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6928, over 4656.74 frames. ], batch size: 22, lr: 5.11e-03 +2024-08-06 18:04:48,402 INFO [trainer.py:765] (2/8) Epoch 17, batch 400, train_loss[loss=3.423, NarTop10Accuracy=0.6355, over 5070.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6914, over 5127.32 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 18:05:24,680 INFO [trainer.py:765] (2/8) Epoch 17, batch 500, train_loss[loss=2.908, NarTop10Accuracy=0.7435, over 6006.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6961, over 5393.74 frames. ], batch size: 11, lr: 5.10e-03 +2024-08-06 18:05:58,739 INFO [trainer.py:765] (2/8) Epoch 17, batch 600, train_loss[loss=3.089, NarTop10Accuracy=0.7023, over 5847.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6933, over 5652.21 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 18:06:32,475 INFO [trainer.py:765] (2/8) Epoch 17, batch 700, train_loss[loss=3.083, NarTop10Accuracy=0.7142, over 5199.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6924, over 5729.84 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 18:07:02,724 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 18:07:10,763 INFO [trainer.py:811] (2/8) Epoch 17, validation: loss=3.018, NarTop10Accuracy=0.7223, over 1905321.00 frames. +2024-08-06 18:07:10,764 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29440MB +2024-08-06 18:07:11,312 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.005e+02 2.161e+02 2.341e+02 3.806e+02, threshold=4.323e+02, percent-clipped=0.0 +2024-08-06 18:07:14,353 INFO [trainer.py:765] (2/8) Epoch 17, batch 800, train_loss[loss=3.115, NarTop10Accuracy=0.7006, over 5178.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6886, over 5777.16 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 18:07:49,721 INFO [trainer.py:765] (2/8) Epoch 17, batch 900, train_loss[loss=3.567, NarTop10Accuracy=0.6087, over 6153.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6937, over 5801.26 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 18:08:21,598 INFO [trainer.py:765] (2/8) Epoch 17, batch 1000, train_loss[loss=3.227, NarTop10Accuracy=0.6804, over 6096.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6925, over 5900.33 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 18:09:03,106 INFO [trainer.py:765] (2/8) Epoch 17, batch 1100, train_loss[loss=2.957, NarTop10Accuracy=0.7391, over 6756.00 frames. ], tot_loss[loss=3.181, NarTop10Accuracy=0.6898, over 5930.01 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 18:09:36,746 INFO [trainer.py:765] (2/8) Epoch 17, batch 1200, train_loss[loss=3.012, NarTop10Accuracy=0.7299, over 7305.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6913, over 5927.33 frames. ], batch size: 32, lr: 5.06e-03 +2024-08-06 18:10:10,688 INFO [trainer.py:765] (2/8) Epoch 17, batch 1300, train_loss[loss=3.312, NarTop10Accuracy=0.6627, over 4155.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6913, over 5981.05 frames. ], batch size: 5, lr: 5.05e-03 +2024-08-06 18:10:48,026 INFO [trainer.py:765] (2/8) Epoch 17, batch 1400, train_loss[loss=3.379, NarTop10Accuracy=0.6534, over 6129.00 frames. ], tot_loss[loss=3.177, NarTop10Accuracy=0.6904, over 5984.87 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 18:11:19,105 INFO [trainer.py:765] (2/8) Epoch 17, batch 1500, train_loss[loss=3.483, NarTop10Accuracy=0.6374, over 6618.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6932, over 5942.80 frames. ], batch size: 51, lr: 5.04e-03 +2024-08-06 18:11:46,855 INFO [trainer.py:765] (2/8) Epoch 17, batch 1600, train_loss[loss=3.088, NarTop10Accuracy=0.7093, over 6855.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6947, over 5923.27 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 18:12:13,508 INFO [trainer.py:765] (2/8) Epoch 17, batch 1700, train_loss[loss=3.545, NarTop10Accuracy=0.6153, over 6312.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.6914, over 5904.41 frames. ], batch size: 13, lr: 5.03e-03 +2024-08-06 18:12:40,001 INFO [trainer.py:765] (2/8) Epoch 17, batch 1800, train_loss[loss=2.897, NarTop10Accuracy=0.7519, over 7104.00 frames. ], tot_loss[loss=3.185, NarTop10Accuracy=0.6884, over 5973.33 frames. ], batch size: 22, lr: 5.02e-03 +2024-08-06 18:13:06,380 INFO [trainer.py:765] (2/8) Epoch 17, batch 1900, train_loss[loss=3.073, NarTop10Accuracy=0.7196, over 6102.00 frames. ], tot_loss[loss=3.195, NarTop10Accuracy=0.6866, over 6023.55 frames. ], batch size: 51, lr: 5.01e-03 +2024-08-06 18:13:31,923 INFO [trainer.py:765] (2/8) Epoch 17, batch 2000, train_loss[loss=3.646, NarTop10Accuracy=0.5894, over 5982.00 frames. ], tot_loss[loss=3.175, NarTop10Accuracy=0.6911, over 5996.77 frames. ], batch size: 51, lr: 5.01e-03 +2024-08-06 18:13:57,228 INFO [trainer.py:765] (2/8) Epoch 17, batch 2100, train_loss[loss=2.976, NarTop10Accuracy=0.7263, over 4731.00 frames. ], tot_loss[loss=3.176, NarTop10Accuracy=0.6908, over 5987.46 frames. ], batch size: 5, lr: 5.00e-03 +2024-08-06 18:14:22,434 INFO [trainer.py:765] (2/8) Epoch 17, batch 2200, train_loss[loss=2.944, NarTop10Accuracy=0.7468, over 7329.00 frames. ], tot_loss[loss=3.195, NarTop10Accuracy=0.6869, over 6022.52 frames. ], batch size: 31, lr: 5.00e-03 +2024-08-06 18:14:47,592 INFO [trainer.py:765] (2/8) Epoch 17, batch 2300, train_loss[loss=3.019, NarTop10Accuracy=0.7262, over 5655.00 frames. ], tot_loss[loss=3.187, NarTop10Accuracy=0.6884, over 6016.69 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 18:15:12,061 INFO [trainer.py:765] (2/8) Epoch 17, batch 2400, train_loss[loss=3.022, NarTop10Accuracy=0.7264, over 5070.00 frames. ], tot_loss[loss=3.181, NarTop10Accuracy=0.6895, over 5779.31 frames. ], batch size: 7, lr: 4.99e-03 +2024-08-06 18:15:35,515 INFO [trainer.py:765] (2/8) Epoch 17, batch 2500, train_loss[loss=2.836, NarTop10Accuracy=0.7633, over 5094.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6915, over 5488.00 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 18:15:56,062 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 18:16:49,908 INFO [trainer.py:765] (2/8) Epoch 18, batch 100, train_loss[loss=3.028, NarTop10Accuracy=0.718, over 7395.00 frames. ], tot_loss[loss=3.179, NarTop10Accuracy=0.6905, over 2373.08 frames. ], batch size: 31, lr: 4.83e-03 +2024-08-06 18:17:24,749 INFO [trainer.py:765] (2/8) Epoch 18, batch 200, train_loss[loss=2.953, NarTop10Accuracy=0.7342, over 6801.00 frames. ], tot_loss[loss=3.162, NarTop10Accuracy=0.6936, over 3869.50 frames. ], batch size: 17, lr: 4.83e-03 +2024-08-06 18:17:27,717 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 18:17:35,927 INFO [trainer.py:811] (2/8) Epoch 18, validation: loss=3.062, NarTop10Accuracy=0.7137, over 1905321.00 frames. +2024-08-06 18:17:35,927 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29440MB +2024-08-06 18:17:36,529 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.024e+02 2.164e+02 2.334e+02 7.024e+02, threshold=4.329e+02, percent-clipped=0.1 +2024-08-06 18:18:06,912 INFO [trainer.py:765] (2/8) Epoch 18, batch 300, train_loss[loss=3.455, NarTop10Accuracy=0.6298, over 7044.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6921, over 4655.92 frames. ], batch size: 22, lr: 4.82e-03 +2024-08-06 18:18:38,183 INFO [trainer.py:765] (2/8) Epoch 18, batch 400, train_loss[loss=3.148, NarTop10Accuracy=0.6935, over 5154.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6957, over 5091.61 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 18:19:13,599 INFO [trainer.py:765] (2/8) Epoch 18, batch 500, train_loss[loss=3.057, NarTop10Accuracy=0.7148, over 6216.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6947, over 5380.84 frames. ], batch size: 11, lr: 4.81e-03 +2024-08-06 18:19:48,151 INFO [trainer.py:765] (2/8) Epoch 18, batch 600, train_loss[loss=3.406, NarTop10Accuracy=0.6412, over 6192.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6941, over 5640.78 frames. ], batch size: 10, lr: 4.80e-03 +2024-08-06 18:20:23,869 INFO [trainer.py:765] (2/8) Epoch 18, batch 700, train_loss[loss=3.484, NarTop10Accuracy=0.6185, over 5070.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6924, over 5712.17 frames. ], batch size: 6, lr: 4.80e-03 +2024-08-06 18:21:01,026 INFO [trainer.py:765] (2/8) Epoch 18, batch 800, train_loss[loss=2.785, NarTop10Accuracy=0.7698, over 5121.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.6913, over 5779.49 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 18:21:32,409 INFO [trainer.py:765] (2/8) Epoch 18, batch 900, train_loss[loss=3.014, NarTop10Accuracy=0.7252, over 6213.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.6959, over 5816.96 frames. ], batch size: 13, lr: 4.79e-03 +2024-08-06 18:22:11,192 INFO [trainer.py:765] (2/8) Epoch 18, batch 1000, train_loss[loss=2.918, NarTop10Accuracy=0.7442, over 6342.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6935, over 5904.50 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 18:22:46,969 INFO [trainer.py:765] (2/8) Epoch 18, batch 1100, train_loss[loss=3.456, NarTop10Accuracy=0.6318, over 6942.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6931, over 5924.61 frames. ], batch size: 17, lr: 4.78e-03 +2024-08-06 18:23:18,605 INFO [trainer.py:765] (2/8) Epoch 18, batch 1200, train_loss[loss=3.439, NarTop10Accuracy=0.628, over 7272.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.6906, over 5921.87 frames. ], batch size: 31, lr: 4.77e-03 +2024-08-06 18:24:00,099 INFO [trainer.py:765] (2/8) Epoch 18, batch 1300, train_loss[loss=2.836, NarTop10Accuracy=0.7669, over 5112.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.695, over 5982.11 frames. ], batch size: 6, lr: 4.77e-03 +2024-08-06 18:24:29,574 INFO [trainer.py:765] (2/8) Epoch 18, batch 1400, train_loss[loss=2.991, NarTop10Accuracy=0.727, over 5925.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6937, over 5988.87 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 18:25:00,307 INFO [trainer.py:765] (2/8) Epoch 18, batch 1500, train_loss[loss=3.109, NarTop10Accuracy=0.7103, over 6240.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6941, over 5942.82 frames. ], batch size: 50, lr: 4.76e-03 +2024-08-06 18:25:28,085 INFO [trainer.py:765] (2/8) Epoch 18, batch 1600, train_loss[loss=3, NarTop10Accuracy=0.7244, over 7170.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6935, over 5928.46 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 18:25:54,687 INFO [trainer.py:765] (2/8) Epoch 18, batch 1700, train_loss[loss=3.042, NarTop10Accuracy=0.7177, over 6642.00 frames. ], tot_loss[loss=3.162, NarTop10Accuracy=0.6932, over 5894.36 frames. ], batch size: 14, lr: 4.75e-03 +2024-08-06 18:26:21,196 INFO [trainer.py:765] (2/8) Epoch 18, batch 1800, train_loss[loss=3.384, NarTop10Accuracy=0.6476, over 7146.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6956, over 5961.74 frames. ], batch size: 22, lr: 4.74e-03 +2024-08-06 18:26:47,567 INFO [trainer.py:765] (2/8) Epoch 18, batch 1900, train_loss[loss=3.087, NarTop10Accuracy=0.715, over 6321.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6933, over 6015.54 frames. ], batch size: 51, lr: 4.74e-03 +2024-08-06 18:27:13,176 INFO [trainer.py:765] (2/8) Epoch 18, batch 2000, train_loss[loss=3.064, NarTop10Accuracy=0.7105, over 6030.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6945, over 5995.48 frames. ], batch size: 50, lr: 4.73e-03 +2024-08-06 18:27:38,529 INFO [trainer.py:765] (2/8) Epoch 18, batch 2100, train_loss[loss=3.227, NarTop10Accuracy=0.6779, over 4854.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6949, over 5955.49 frames. ], batch size: 5, lr: 4.73e-03 +2024-08-06 18:28:03,812 INFO [trainer.py:765] (2/8) Epoch 18, batch 2200, train_loss[loss=3.023, NarTop10Accuracy=0.7234, over 7086.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6947, over 5987.32 frames. ], batch size: 31, lr: 4.72e-03 +2024-08-06 18:28:06,571 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 18:28:14,650 INFO [trainer.py:811] (2/8) Epoch 18, validation: loss=3.028, NarTop10Accuracy=0.7201, over 1905321.00 frames. +2024-08-06 18:28:14,650 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29440MB +2024-08-06 18:28:15,147 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.054e+02 2.220e+02 2.384e+02 3.992e+02, threshold=4.441e+02, percent-clipped=0.0 +2024-08-06 18:28:37,096 INFO [trainer.py:765] (2/8) Epoch 18, batch 2300, train_loss[loss=2.806, NarTop10Accuracy=0.7563, over 5691.00 frames. ], tot_loss[loss=3.17, NarTop10Accuracy=0.6921, over 6016.71 frames. ], batch size: 9, lr: 4.72e-03 +2024-08-06 18:29:01,592 INFO [trainer.py:765] (2/8) Epoch 18, batch 2400, train_loss[loss=3.039, NarTop10Accuracy=0.7143, over 4935.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6971, over 5754.38 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 18:29:25,027 INFO [trainer.py:765] (2/8) Epoch 18, batch 2500, train_loss[loss=2.964, NarTop10Accuracy=0.7295, over 5124.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7011, over 5453.51 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 18:29:45,351 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 18:30:41,232 INFO [trainer.py:765] (2/8) Epoch 19, batch 100, train_loss[loss=2.906, NarTop10Accuracy=0.745, over 7179.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.695, over 2351.38 frames. ], batch size: 31, lr: 4.57e-03 +2024-08-06 18:31:15,603 INFO [trainer.py:765] (2/8) Epoch 19, batch 200, train_loss[loss=2.936, NarTop10Accuracy=0.7426, over 6861.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6956, over 3844.92 frames. ], batch size: 17, lr: 4.57e-03 +2024-08-06 18:31:47,468 INFO [trainer.py:765] (2/8) Epoch 19, batch 300, train_loss[loss=3.535, NarTop10Accuracy=0.6219, over 7215.00 frames. ], tot_loss[loss=3.136, NarTop10Accuracy=0.6987, over 4643.28 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 18:32:20,355 INFO [trainer.py:765] (2/8) Epoch 19, batch 400, train_loss[loss=3.217, NarTop10Accuracy=0.6791, over 5220.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6991, over 5097.89 frames. ], batch size: 7, lr: 4.56e-03 +2024-08-06 18:32:50,335 INFO [trainer.py:765] (2/8) Epoch 19, batch 500, train_loss[loss=3.09, NarTop10Accuracy=0.7109, over 6156.00 frames. ], tot_loss[loss=3.136, NarTop10Accuracy=0.6989, over 5388.63 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 18:33:29,610 INFO [trainer.py:765] (2/8) Epoch 19, batch 600, train_loss[loss=3.025, NarTop10Accuracy=0.7159, over 5748.00 frames. ], tot_loss[loss=3.141, NarTop10Accuracy=0.6974, over 5660.39 frames. ], batch size: 9, lr: 4.55e-03 +2024-08-06 18:34:03,592 INFO [trainer.py:765] (2/8) Epoch 19, batch 700, train_loss[loss=2.845, NarTop10Accuracy=0.7621, over 5091.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6968, over 5708.00 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 18:34:35,179 INFO [trainer.py:765] (2/8) Epoch 19, batch 800, train_loss[loss=3.175, NarTop10Accuracy=0.6923, over 5022.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.695, over 5776.14 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 18:35:10,263 INFO [trainer.py:765] (2/8) Epoch 19, batch 900, train_loss[loss=2.759, NarTop10Accuracy=0.7702, over 6177.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6969, over 5791.03 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 18:35:48,637 INFO [trainer.py:765] (2/8) Epoch 19, batch 1000, train_loss[loss=3.309, NarTop10Accuracy=0.6617, over 6180.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6969, over 5900.25 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 18:36:20,939 INFO [trainer.py:765] (2/8) Epoch 19, batch 1100, train_loss[loss=3.03, NarTop10Accuracy=0.7204, over 6789.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6937, over 5935.31 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 18:36:57,130 INFO [trainer.py:765] (2/8) Epoch 19, batch 1200, train_loss[loss=2.964, NarTop10Accuracy=0.735, over 7212.00 frames. ], tot_loss[loss=3.17, NarTop10Accuracy=0.6918, over 5942.96 frames. ], batch size: 31, lr: 4.52e-03 +2024-08-06 18:37:35,315 INFO [trainer.py:765] (2/8) Epoch 19, batch 1300, train_loss[loss=2.816, NarTop10Accuracy=0.7683, over 4254.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6926, over 6008.80 frames. ], batch size: 5, lr: 4.51e-03 +2024-08-06 18:38:04,679 INFO [trainer.py:765] (2/8) Epoch 19, batch 1400, train_loss[loss=3.088, NarTop10Accuracy=0.7157, over 6078.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.692, over 6024.58 frames. ], batch size: 11, lr: 4.51e-03 +2024-08-06 18:38:34,550 INFO [trainer.py:765] (2/8) Epoch 19, batch 1500, train_loss[loss=3.443, NarTop10Accuracy=0.6398, over 6699.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6973, over 5961.24 frames. ], batch size: 55, lr: 4.50e-03 +2024-08-06 18:39:02,311 INFO [trainer.py:765] (2/8) Epoch 19, batch 1600, train_loss[loss=3.462, NarTop10Accuracy=0.6333, over 7092.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6969, over 5942.46 frames. ], batch size: 22, lr: 4.50e-03 +2024-08-06 18:39:11,591 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 18:39:19,795 INFO [trainer.py:811] (2/8) Epoch 19, validation: loss=2.958, NarTop10Accuracy=0.7345, over 1905321.00 frames. +2024-08-06 18:39:19,796 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29440MB +2024-08-06 18:39:20,378 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.040e+02 2.194e+02 2.364e+02 6.410e+02, threshold=4.387e+02, percent-clipped=0.2 +2024-08-06 18:39:37,191 INFO [trainer.py:765] (2/8) Epoch 19, batch 1700, train_loss[loss=3.619, NarTop10Accuracy=0.5964, over 6330.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.6974, over 5925.62 frames. ], batch size: 13, lr: 4.49e-03 +2024-08-06 18:40:03,789 INFO [trainer.py:765] (2/8) Epoch 19, batch 1800, train_loss[loss=3.529, NarTop10Accuracy=0.6128, over 7059.00 frames. ], tot_loss[loss=3.141, NarTop10Accuracy=0.6974, over 5976.41 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 18:40:30,217 INFO [trainer.py:765] (2/8) Epoch 19, batch 1900, train_loss[loss=3.131, NarTop10Accuracy=0.7093, over 5901.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.697, over 6016.48 frames. ], batch size: 51, lr: 4.49e-03 +2024-08-06 18:40:55,794 INFO [trainer.py:765] (2/8) Epoch 19, batch 2000, train_loss[loss=3.33, NarTop10Accuracy=0.6593, over 5748.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6971, over 5994.50 frames. ], batch size: 50, lr: 4.48e-03 +2024-08-06 18:41:21,183 INFO [trainer.py:765] (2/8) Epoch 19, batch 2100, train_loss[loss=2.941, NarTop10Accuracy=0.7446, over 4716.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.6984, over 5956.37 frames. ], batch size: 5, lr: 4.48e-03 +2024-08-06 18:41:46,455 INFO [trainer.py:765] (2/8) Epoch 19, batch 2200, train_loss[loss=3.19, NarTop10Accuracy=0.6887, over 7308.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6966, over 5996.84 frames. ], batch size: 32, lr: 4.47e-03 +2024-08-06 18:42:11,559 INFO [trainer.py:765] (2/8) Epoch 19, batch 2300, train_loss[loss=3.077, NarTop10Accuracy=0.7108, over 5628.00 frames. ], tot_loss[loss=3.162, NarTop10Accuracy=0.6934, over 5997.50 frames. ], batch size: 9, lr: 4.47e-03 +2024-08-06 18:42:35,987 INFO [trainer.py:765] (2/8) Epoch 19, batch 2400, train_loss[loss=3.004, NarTop10Accuracy=0.7179, over 5031.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6969, over 5768.66 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:42:59,690 INFO [trainer.py:765] (2/8) Epoch 19, batch 2500, train_loss[loss=2.895, NarTop10Accuracy=0.7489, over 5199.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.701, over 5485.06 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:43:19,650 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 18:44:22,974 INFO [trainer.py:765] (2/8) Epoch 20, batch 100, train_loss[loss=3.269, NarTop10Accuracy=0.6681, over 7140.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6948, over 2365.79 frames. ], batch size: 31, lr: 4.34e-03 +2024-08-06 18:44:58,379 INFO [trainer.py:765] (2/8) Epoch 20, batch 200, train_loss[loss=3.482, NarTop10Accuracy=0.6237, over 6903.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7017, over 3870.31 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 18:45:32,279 INFO [trainer.py:765] (2/8) Epoch 20, batch 300, train_loss[loss=3.402, NarTop10Accuracy=0.6412, over 6990.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7038, over 4680.65 frames. ], batch size: 22, lr: 4.33e-03 +2024-08-06 18:46:05,128 INFO [trainer.py:765] (2/8) Epoch 20, batch 400, train_loss[loss=2.873, NarTop10Accuracy=0.752, over 5196.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.703, over 5127.09 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 18:46:35,770 INFO [trainer.py:765] (2/8) Epoch 20, batch 500, train_loss[loss=2.791, NarTop10Accuracy=0.77, over 6297.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7003, over 5410.01 frames. ], batch size: 11, lr: 4.32e-03 +2024-08-06 18:47:13,255 INFO [trainer.py:765] (2/8) Epoch 20, batch 600, train_loss[loss=3.083, NarTop10Accuracy=0.7036, over 5814.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.7009, over 5665.04 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 18:47:44,481 INFO [trainer.py:765] (2/8) Epoch 20, batch 700, train_loss[loss=2.755, NarTop10Accuracy=0.7759, over 5040.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7037, over 5724.20 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 18:48:21,016 INFO [trainer.py:765] (2/8) Epoch 20, batch 800, train_loss[loss=2.781, NarTop10Accuracy=0.7747, over 5217.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.7001, over 5785.16 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 18:48:56,535 INFO [trainer.py:765] (2/8) Epoch 20, batch 900, train_loss[loss=2.932, NarTop10Accuracy=0.749, over 6312.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7004, over 5810.16 frames. ], batch size: 13, lr: 4.30e-03 +2024-08-06 18:49:29,805 INFO [trainer.py:765] (2/8) Epoch 20, batch 1000, train_loss[loss=3.186, NarTop10Accuracy=0.6926, over 6630.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6961, over 5911.04 frames. ], batch size: 14, lr: 4.30e-03 +2024-08-06 18:49:52,237 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 18:50:00,327 INFO [trainer.py:811] (2/8) Epoch 20, validation: loss=2.962, NarTop10Accuracy=0.7336, over 1905321.00 frames. +2024-08-06 18:50:00,328 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 18:50:00,875 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.061e+02 2.223e+02 2.401e+02 3.871e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 18:50:15,426 INFO [trainer.py:765] (2/8) Epoch 20, batch 1100, train_loss[loss=3.311, NarTop10Accuracy=0.6618, over 6630.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6969, over 5927.58 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 18:50:53,776 INFO [trainer.py:765] (2/8) Epoch 20, batch 1200, train_loss[loss=3.019, NarTop10Accuracy=0.7269, over 7332.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6953, over 5920.22 frames. ], batch size: 31, lr: 4.29e-03 +2024-08-06 18:51:25,129 INFO [trainer.py:765] (2/8) Epoch 20, batch 1300, train_loss[loss=3.327, NarTop10Accuracy=0.6538, over 5088.00 frames. ], tot_loss[loss=3.136, NarTop10Accuracy=0.6984, over 5988.77 frames. ], batch size: 6, lr: 4.29e-03 +2024-08-06 18:51:59,314 INFO [trainer.py:765] (2/8) Epoch 20, batch 1400, train_loss[loss=3.047, NarTop10Accuracy=0.7293, over 6177.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7007, over 6002.73 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 18:52:32,805 INFO [trainer.py:765] (2/8) Epoch 20, batch 1500, train_loss[loss=3.209, NarTop10Accuracy=0.687, over 6327.00 frames. ], tot_loss[loss=3.136, NarTop10Accuracy=0.6984, over 5934.76 frames. ], batch size: 50, lr: 4.28e-03 +2024-08-06 18:53:00,635 INFO [trainer.py:765] (2/8) Epoch 20, batch 1600, train_loss[loss=2.898, NarTop10Accuracy=0.752, over 6975.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6966, over 5911.78 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 18:53:27,328 INFO [trainer.py:765] (2/8) Epoch 20, batch 1700, train_loss[loss=3.583, NarTop10Accuracy=0.6107, over 6207.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6967, over 5908.28 frames. ], batch size: 13, lr: 4.27e-03 +2024-08-06 18:53:53,850 INFO [trainer.py:765] (2/8) Epoch 20, batch 1800, train_loss[loss=3.062, NarTop10Accuracy=0.718, over 7026.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.6992, over 5983.12 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 18:54:20,315 INFO [trainer.py:765] (2/8) Epoch 20, batch 1900, train_loss[loss=3.102, NarTop10Accuracy=0.7055, over 6123.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6932, over 6030.60 frames. ], batch size: 50, lr: 4.26e-03 +2024-08-06 18:54:45,890 INFO [trainer.py:765] (2/8) Epoch 20, batch 2000, train_loss[loss=3.582, NarTop10Accuracy=0.6048, over 6273.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.692, over 6016.25 frames. ], batch size: 50, lr: 4.26e-03 +2024-08-06 18:55:11,182 INFO [trainer.py:765] (2/8) Epoch 20, batch 2100, train_loss[loss=3.348, NarTop10Accuracy=0.6705, over 4800.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6946, over 5969.45 frames. ], batch size: 5, lr: 4.25e-03 +2024-08-06 18:55:36,414 INFO [trainer.py:765] (2/8) Epoch 20, batch 2200, train_loss[loss=2.937, NarTop10Accuracy=0.7387, over 7092.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6926, over 6011.53 frames. ], batch size: 31, lr: 4.25e-03 +2024-08-06 18:56:01,635 INFO [trainer.py:765] (2/8) Epoch 20, batch 2300, train_loss[loss=3.273, NarTop10Accuracy=0.675, over 5856.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6919, over 6024.86 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 18:56:26,049 INFO [trainer.py:765] (2/8) Epoch 20, batch 2400, train_loss[loss=2.874, NarTop10Accuracy=0.7553, over 5121.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6954, over 5777.39 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:56:49,566 INFO [trainer.py:765] (2/8) Epoch 20, batch 2500, train_loss[loss=2.987, NarTop10Accuracy=0.7314, over 5784.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7024, over 5490.10 frames. ], batch size: 8, lr: 4.24e-03 +2024-08-06 18:57:09,627 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 18:58:09,585 INFO [trainer.py:765] (2/8) Epoch 21, batch 100, train_loss[loss=3.201, NarTop10Accuracy=0.6839, over 7479.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7054, over 2377.69 frames. ], batch size: 32, lr: 4.13e-03 +2024-08-06 18:58:40,417 INFO [trainer.py:765] (2/8) Epoch 21, batch 200, train_loss[loss=2.819, NarTop10Accuracy=0.7715, over 6837.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7019, over 3866.70 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 18:59:13,333 INFO [trainer.py:765] (2/8) Epoch 21, batch 300, train_loss[loss=2.941, NarTop10Accuracy=0.7416, over 6909.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7002, over 4662.67 frames. ], batch size: 22, lr: 4.12e-03 +2024-08-06 18:59:48,151 INFO [trainer.py:765] (2/8) Epoch 21, batch 400, train_loss[loss=2.794, NarTop10Accuracy=0.7678, over 5076.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7029, over 5126.11 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 19:00:16,839 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 19:00:25,075 INFO [trainer.py:811] (2/8) Epoch 21, validation: loss=2.992, NarTop10Accuracy=0.7268, over 1905321.00 frames. +2024-08-06 19:00:25,076 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 19:00:25,622 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.071e+02 2.224e+02 2.387e+02 3.839e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 19:00:29,891 INFO [trainer.py:765] (2/8) Epoch 21, batch 500, train_loss[loss=2.8, NarTop10Accuracy=0.7677, over 6108.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.7021, over 5389.39 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 19:01:03,329 INFO [trainer.py:765] (2/8) Epoch 21, batch 600, train_loss[loss=3.519, NarTop10Accuracy=0.6189, over 5715.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7043, over 5668.98 frames. ], batch size: 9, lr: 4.11e-03 +2024-08-06 19:01:39,388 INFO [trainer.py:765] (2/8) Epoch 21, batch 700, train_loss[loss=2.631, NarTop10Accuracy=0.7974, over 5052.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7023, over 5737.46 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 19:02:18,047 INFO [trainer.py:765] (2/8) Epoch 21, batch 800, train_loss[loss=3.149, NarTop10Accuracy=0.7055, over 5004.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.6997, over 5794.79 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 19:02:48,663 INFO [trainer.py:765] (2/8) Epoch 21, batch 900, train_loss[loss=3.012, NarTop10Accuracy=0.7292, over 6117.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7, over 5812.32 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 19:03:25,801 INFO [trainer.py:765] (2/8) Epoch 21, batch 1000, train_loss[loss=2.982, NarTop10Accuracy=0.7301, over 6603.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.6993, over 5917.86 frames. ], batch size: 14, lr: 4.09e-03 +2024-08-06 19:04:07,207 INFO [trainer.py:765] (2/8) Epoch 21, batch 1100, train_loss[loss=3.385, NarTop10Accuracy=0.6479, over 6732.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6954, over 5966.31 frames. ], batch size: 17, lr: 4.09e-03 +2024-08-06 19:04:38,463 INFO [trainer.py:765] (2/8) Epoch 21, batch 1200, train_loss[loss=3.287, NarTop10Accuracy=0.6652, over 7278.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.6992, over 5957.18 frames. ], batch size: 31, lr: 4.08e-03 +2024-08-06 19:05:15,316 INFO [trainer.py:765] (2/8) Epoch 21, batch 1300, train_loss[loss=2.782, NarTop10Accuracy=0.7643, over 4977.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7028, over 6005.05 frames. ], batch size: 6, lr: 4.08e-03 +2024-08-06 19:05:55,560 INFO [trainer.py:765] (2/8) Epoch 21, batch 1400, train_loss[loss=3.442, NarTop10Accuracy=0.6309, over 6108.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7025, over 6030.22 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 19:06:23,600 INFO [trainer.py:765] (2/8) Epoch 21, batch 1500, train_loss[loss=3.294, NarTop10Accuracy=0.6713, over 6627.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6993, over 5951.06 frames. ], batch size: 50, lr: 4.07e-03 +2024-08-06 19:06:51,462 INFO [trainer.py:765] (2/8) Epoch 21, batch 1600, train_loss[loss=2.907, NarTop10Accuracy=0.7476, over 7128.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6991, over 5925.02 frames. ], batch size: 22, lr: 4.07e-03 +2024-08-06 19:07:18,212 INFO [trainer.py:765] (2/8) Epoch 21, batch 1700, train_loss[loss=3.256, NarTop10Accuracy=0.6792, over 6330.00 frames. ], tot_loss[loss=3.136, NarTop10Accuracy=0.6988, over 5902.02 frames. ], batch size: 13, lr: 4.06e-03 +2024-08-06 19:07:44,809 INFO [trainer.py:765] (2/8) Epoch 21, batch 1800, train_loss[loss=2.949, NarTop10Accuracy=0.7386, over 7038.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.7001, over 5976.75 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 19:08:11,369 INFO [trainer.py:765] (2/8) Epoch 21, batch 1900, train_loss[loss=3.634, NarTop10Accuracy=0.5926, over 6591.00 frames. ], tot_loss[loss=3.141, NarTop10Accuracy=0.6972, over 6035.97 frames. ], batch size: 50, lr: 4.06e-03 +2024-08-06 19:08:37,105 INFO [trainer.py:765] (2/8) Epoch 21, batch 2000, train_loss[loss=3.493, NarTop10Accuracy=0.6209, over 6198.00 frames. ], tot_loss[loss=3.139, NarTop10Accuracy=0.6979, over 6003.31 frames. ], batch size: 51, lr: 4.05e-03 +2024-08-06 19:09:02,507 INFO [trainer.py:765] (2/8) Epoch 21, batch 2100, train_loss[loss=2.748, NarTop10Accuracy=0.78, over 3927.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6962, over 5964.35 frames. ], batch size: 4, lr: 4.05e-03 +2024-08-06 19:09:27,891 INFO [trainer.py:765] (2/8) Epoch 21, batch 2200, train_loss[loss=2.919, NarTop10Accuracy=0.7422, over 7263.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6955, over 6022.45 frames. ], batch size: 31, lr: 4.04e-03 +2024-08-06 19:09:53,223 INFO [trainer.py:765] (2/8) Epoch 21, batch 2300, train_loss[loss=3.042, NarTop10Accuracy=0.718, over 5709.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6927, over 6026.20 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 19:10:17,597 INFO [trainer.py:765] (2/8) Epoch 21, batch 2400, train_loss[loss=3.286, NarTop10Accuracy=0.6629, over 5250.00 frames. ], tot_loss[loss=3.139, NarTop10Accuracy=0.6975, over 5786.39 frames. ], batch size: 7, lr: 4.04e-03 +2024-08-06 19:10:37,229 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 19:10:45,275 INFO [trainer.py:811] (2/8) Epoch 21, validation: loss=2.971, NarTop10Accuracy=0.7316, over 1905321.00 frames. +2024-08-06 19:10:45,275 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 19:10:45,741 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.100e+02 2.242e+02 2.407e+02 6.546e+02, threshold=4.484e+02, percent-clipped=0.1 +2024-08-06 19:10:49,272 INFO [trainer.py:765] (2/8) Epoch 21, batch 2500, train_loss[loss=3.283, NarTop10Accuracy=0.6526, over 5208.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7046, over 5481.23 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 19:11:09,181 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 19:12:09,054 INFO [trainer.py:765] (2/8) Epoch 22, batch 100, train_loss[loss=2.909, NarTop10Accuracy=0.7449, over 7077.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7074, over 2360.75 frames. ], batch size: 31, lr: 3.93e-03 +2024-08-06 19:12:44,462 INFO [trainer.py:765] (2/8) Epoch 22, batch 200, train_loss[loss=3.259, NarTop10Accuracy=0.6667, over 6537.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7068, over 3859.11 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 19:13:14,533 INFO [trainer.py:765] (2/8) Epoch 22, batch 300, train_loss[loss=2.902, NarTop10Accuracy=0.7461, over 7203.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7066, over 4652.54 frames. ], batch size: 22, lr: 3.93e-03 +2024-08-06 19:13:49,229 INFO [trainer.py:765] (2/8) Epoch 22, batch 400, train_loss[loss=3.031, NarTop10Accuracy=0.7243, over 5046.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7085, over 5102.95 frames. ], batch size: 7, lr: 3.92e-03 +2024-08-06 19:14:24,850 INFO [trainer.py:765] (2/8) Epoch 22, batch 500, train_loss[loss=3.243, NarTop10Accuracy=0.6741, over 5982.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7083, over 5368.18 frames. ], batch size: 11, lr: 3.92e-03 +2024-08-06 19:14:55,701 INFO [trainer.py:765] (2/8) Epoch 22, batch 600, train_loss[loss=2.96, NarTop10Accuracy=0.7349, over 5715.00 frames. ], tot_loss[loss=3.121, NarTop10Accuracy=0.7012, over 5645.64 frames. ], batch size: 9, lr: 3.92e-03 +2024-08-06 19:15:30,867 INFO [trainer.py:765] (2/8) Epoch 22, batch 700, train_loss[loss=3.246, NarTop10Accuracy=0.6698, over 4380.00 frames. ], tot_loss[loss=3.119, NarTop10Accuracy=0.7019, over 5704.81 frames. ], batch size: 5, lr: 3.91e-03 +2024-08-06 19:16:10,665 INFO [trainer.py:765] (2/8) Epoch 22, batch 800, train_loss[loss=3.003, NarTop10Accuracy=0.7266, over 5007.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.703, over 5759.79 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 19:16:40,952 INFO [trainer.py:765] (2/8) Epoch 22, batch 900, train_loss[loss=2.876, NarTop10Accuracy=0.7521, over 6570.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7028, over 5798.36 frames. ], batch size: 14, lr: 3.90e-03 +2024-08-06 19:17:16,433 INFO [trainer.py:765] (2/8) Epoch 22, batch 1000, train_loss[loss=3.09, NarTop10Accuracy=0.7111, over 6726.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7041, over 5904.17 frames. ], batch size: 14, lr: 3.90e-03 +2024-08-06 19:17:52,086 INFO [trainer.py:765] (2/8) Epoch 22, batch 1100, train_loss[loss=2.961, NarTop10Accuracy=0.741, over 6915.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7024, over 5929.23 frames. ], batch size: 17, lr: 3.90e-03 +2024-08-06 19:18:25,927 INFO [trainer.py:765] (2/8) Epoch 22, batch 1200, train_loss[loss=2.945, NarTop10Accuracy=0.7273, over 7218.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7062, over 5922.75 frames. ], batch size: 31, lr: 3.89e-03 +2024-08-06 19:19:01,253 INFO [trainer.py:765] (2/8) Epoch 22, batch 1300, train_loss[loss=3.037, NarTop10Accuracy=0.7238, over 5097.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7071, over 5990.48 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 19:19:33,317 INFO [trainer.py:765] (2/8) Epoch 22, batch 1400, train_loss[loss=2.808, NarTop10Accuracy=0.7642, over 6078.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.7049, over 5992.14 frames. ], batch size: 11, lr: 3.89e-03 +2024-08-06 19:20:03,830 INFO [trainer.py:765] (2/8) Epoch 22, batch 1500, train_loss[loss=3.442, NarTop10Accuracy=0.6368, over 6279.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7043, over 5936.06 frames. ], batch size: 50, lr: 3.88e-03 +2024-08-06 19:20:31,647 INFO [trainer.py:765] (2/8) Epoch 22, batch 1600, train_loss[loss=3.112, NarTop10Accuracy=0.7012, over 7053.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7004, over 5908.11 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 19:20:58,418 INFO [trainer.py:765] (2/8) Epoch 22, batch 1700, train_loss[loss=3.155, NarTop10Accuracy=0.7009, over 6096.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7006, over 5905.12 frames. ], batch size: 13, lr: 3.88e-03 +2024-08-06 19:21:25,010 INFO [trainer.py:765] (2/8) Epoch 22, batch 1800, train_loss[loss=2.823, NarTop10Accuracy=0.7671, over 7059.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.701, over 5968.99 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 19:21:51,372 INFO [trainer.py:765] (2/8) Epoch 22, batch 1900, train_loss[loss=3.043, NarTop10Accuracy=0.7205, over 6414.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.6975, over 6018.95 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 19:21:53,110 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 19:22:01,088 INFO [trainer.py:811] (2/8) Epoch 22, validation: loss=3.009, NarTop10Accuracy=0.7241, over 1905321.00 frames. +2024-08-06 19:22:01,089 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 19:22:01,575 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.114e+02 2.276e+02 2.445e+02 4.438e+02, threshold=4.551e+02, percent-clipped=0.0 +2024-08-06 19:22:24,819 INFO [trainer.py:765] (2/8) Epoch 22, batch 2000, train_loss[loss=3.513, NarTop10Accuracy=0.6239, over 6204.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7029, over 5993.41 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 19:22:50,041 INFO [trainer.py:765] (2/8) Epoch 22, batch 2100, train_loss[loss=3.076, NarTop10Accuracy=0.7137, over 3810.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7038, over 5962.31 frames. ], batch size: 4, lr: 3.86e-03 +2024-08-06 19:23:15,230 INFO [trainer.py:765] (2/8) Epoch 22, batch 2200, train_loss[loss=3.006, NarTop10Accuracy=0.7219, over 7116.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7034, over 5996.16 frames. ], batch size: 31, lr: 3.86e-03 +2024-08-06 19:23:40,315 INFO [trainer.py:765] (2/8) Epoch 22, batch 2300, train_loss[loss=2.909, NarTop10Accuracy=0.7317, over 5802.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.6994, over 6013.72 frames. ], batch size: 9, lr: 3.86e-03 +2024-08-06 19:24:04,602 INFO [trainer.py:765] (2/8) Epoch 22, batch 2400, train_loss[loss=3.147, NarTop10Accuracy=0.6953, over 5085.00 frames. ], tot_loss[loss=3.121, NarTop10Accuracy=0.7013, over 5778.88 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:28,024 INFO [trainer.py:765] (2/8) Epoch 22, batch 2500, train_loss[loss=3.247, NarTop10Accuracy=0.6818, over 5211.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7046, over 5484.28 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:47,489 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 19:25:45,385 INFO [trainer.py:765] (2/8) Epoch 23, batch 100, train_loss[loss=3.043, NarTop10Accuracy=0.715, over 7104.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7031, over 2362.25 frames. ], batch size: 31, lr: 3.76e-03 +2024-08-06 19:26:21,309 INFO [trainer.py:765] (2/8) Epoch 23, batch 200, train_loss[loss=3.506, NarTop10Accuracy=0.6192, over 6864.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7018, over 3860.31 frames. ], batch size: 17, lr: 3.76e-03 +2024-08-06 19:26:57,603 INFO [trainer.py:765] (2/8) Epoch 23, batch 300, train_loss[loss=3, NarTop10Accuracy=0.7286, over 7032.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7065, over 4669.10 frames. ], batch size: 22, lr: 3.75e-03 +2024-08-06 19:27:26,540 INFO [trainer.py:765] (2/8) Epoch 23, batch 400, train_loss[loss=3.353, NarTop10Accuracy=0.6528, over 5766.00 frames. ], tot_loss[loss=3.105, NarTop10Accuracy=0.7045, over 5129.71 frames. ], batch size: 8, lr: 3.75e-03 +2024-08-06 19:27:59,713 INFO [trainer.py:765] (2/8) Epoch 23, batch 500, train_loss[loss=3.285, NarTop10Accuracy=0.6627, over 6123.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.7042, over 5392.11 frames. ], batch size: 11, lr: 3.75e-03 +2024-08-06 19:28:35,883 INFO [trainer.py:765] (2/8) Epoch 23, batch 600, train_loss[loss=3.273, NarTop10Accuracy=0.6828, over 5631.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7062, over 5648.54 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 19:29:11,367 INFO [trainer.py:765] (2/8) Epoch 23, batch 700, train_loss[loss=3.103, NarTop10Accuracy=0.7038, over 4386.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7084, over 5715.14 frames. ], batch size: 5, lr: 3.74e-03 +2024-08-06 19:29:43,613 INFO [trainer.py:765] (2/8) Epoch 23, batch 800, train_loss[loss=2.823, NarTop10Accuracy=0.7626, over 4212.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7053, over 5782.13 frames. ], batch size: 5, lr: 3.74e-03 +2024-08-06 19:30:19,390 INFO [trainer.py:765] (2/8) Epoch 23, batch 900, train_loss[loss=3.333, NarTop10Accuracy=0.6571, over 6534.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7064, over 5801.20 frames. ], batch size: 14, lr: 3.73e-03 +2024-08-06 19:30:58,195 INFO [trainer.py:765] (2/8) Epoch 23, batch 1000, train_loss[loss=3.049, NarTop10Accuracy=0.7123, over 6171.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7072, over 5895.28 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 19:31:31,521 INFO [trainer.py:765] (2/8) Epoch 23, batch 1100, train_loss[loss=3.101, NarTop10Accuracy=0.717, over 6840.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7061, over 5933.33 frames. ], batch size: 17, lr: 3.73e-03 +2024-08-06 19:32:08,518 INFO [trainer.py:765] (2/8) Epoch 23, batch 1200, train_loss[loss=3.013, NarTop10Accuracy=0.723, over 7134.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7041, over 5921.93 frames. ], batch size: 31, lr: 3.72e-03 +2024-08-06 19:32:46,937 INFO [trainer.py:765] (2/8) Epoch 23, batch 1300, train_loss[loss=3.094, NarTop10Accuracy=0.7, over 5070.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7039, over 5969.65 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 19:32:56,402 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 19:33:04,722 INFO [trainer.py:811] (2/8) Epoch 23, validation: loss=2.893, NarTop10Accuracy=0.7468, over 1905321.00 frames. +2024-08-06 19:33:04,723 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 19:33:05,262 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.108e+02 2.273e+02 2.457e+02 3.966e+02, threshold=4.546e+02, percent-clipped=0.0 +2024-08-06 19:33:27,407 INFO [trainer.py:765] (2/8) Epoch 23, batch 1400, train_loss[loss=2.79, NarTop10Accuracy=0.7723, over 6198.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7035, over 5987.66 frames. ], batch size: 11, lr: 3.72e-03 +2024-08-06 19:33:58,216 INFO [trainer.py:765] (2/8) Epoch 23, batch 1500, train_loss[loss=3.237, NarTop10Accuracy=0.6851, over 5901.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7065, over 5939.66 frames. ], batch size: 52, lr: 3.71e-03 +2024-08-06 19:34:26,015 INFO [trainer.py:765] (2/8) Epoch 23, batch 1600, train_loss[loss=2.957, NarTop10Accuracy=0.7356, over 6987.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7058, over 5915.55 frames. ], batch size: 22, lr: 3.71e-03 +2024-08-06 19:34:52,783 INFO [trainer.py:765] (2/8) Epoch 23, batch 1700, train_loss[loss=3.351, NarTop10Accuracy=0.6536, over 6549.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.7012, over 5928.22 frames. ], batch size: 14, lr: 3.71e-03 +2024-08-06 19:35:19,262 INFO [trainer.py:765] (2/8) Epoch 23, batch 1800, train_loss[loss=2.988, NarTop10Accuracy=0.727, over 7038.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7024, over 5985.92 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 19:35:45,597 INFO [trainer.py:765] (2/8) Epoch 23, batch 1900, train_loss[loss=3.377, NarTop10Accuracy=0.6489, over 5697.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7006, over 6008.94 frames. ], batch size: 52, lr: 3.70e-03 +2024-08-06 19:36:11,171 INFO [trainer.py:765] (2/8) Epoch 23, batch 2000, train_loss[loss=3.655, NarTop10Accuracy=0.5943, over 6015.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7032, over 5994.07 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 19:36:36,517 INFO [trainer.py:765] (2/8) Epoch 23, batch 2100, train_loss[loss=3.451, NarTop10Accuracy=0.6478, over 4020.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7037, over 5966.80 frames. ], batch size: 4, lr: 3.69e-03 +2024-08-06 19:37:01,908 INFO [trainer.py:765] (2/8) Epoch 23, batch 2200, train_loss[loss=3.067, NarTop10Accuracy=0.7186, over 7362.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.6999, over 5986.88 frames. ], batch size: 31, lr: 3.69e-03 +2024-08-06 19:37:27,061 INFO [trainer.py:765] (2/8) Epoch 23, batch 2300, train_loss[loss=2.959, NarTop10Accuracy=0.7408, over 5649.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7009, over 6008.06 frames. ], batch size: 9, lr: 3.69e-03 +2024-08-06 19:37:51,424 INFO [trainer.py:765] (2/8) Epoch 23, batch 2400, train_loss[loss=2.974, NarTop10Accuracy=0.7368, over 5124.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7013, over 5773.93 frames. ], batch size: 7, lr: 3.69e-03 +2024-08-06 19:38:15,053 INFO [trainer.py:765] (2/8) Epoch 23, batch 2500, train_loss[loss=3.321, NarTop10Accuracy=0.6561, over 5160.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7066, over 5477.30 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 19:38:35,062 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 19:39:37,632 INFO [trainer.py:765] (2/8) Epoch 24, batch 100, train_loss[loss=3.472, NarTop10Accuracy=0.6281, over 7380.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.7007, over 2378.09 frames. ], batch size: 31, lr: 3.60e-03 +2024-08-06 19:40:10,190 INFO [trainer.py:765] (2/8) Epoch 24, batch 200, train_loss[loss=2.796, NarTop10Accuracy=0.7725, over 6768.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7063, over 3864.48 frames. ], batch size: 17, lr: 3.60e-03 +2024-08-06 19:40:40,556 INFO [trainer.py:765] (2/8) Epoch 24, batch 300, train_loss[loss=2.924, NarTop10Accuracy=0.7407, over 7167.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.707, over 4649.64 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 19:41:18,234 INFO [trainer.py:765] (2/8) Epoch 24, batch 400, train_loss[loss=2.97, NarTop10Accuracy=0.7287, over 5151.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7048, over 5109.26 frames. ], batch size: 7, lr: 3.59e-03 +2024-08-06 19:41:50,322 INFO [trainer.py:765] (2/8) Epoch 24, batch 500, train_loss[loss=2.948, NarTop10Accuracy=0.7383, over 6228.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7077, over 5387.72 frames. ], batch size: 11, lr: 3.59e-03 +2024-08-06 19:42:21,452 INFO [trainer.py:765] (2/8) Epoch 24, batch 600, train_loss[loss=2.798, NarTop10Accuracy=0.7631, over 5745.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7078, over 5650.21 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 19:42:52,843 INFO [trainer.py:765] (2/8) Epoch 24, batch 700, train_loss[loss=2.817, NarTop10Accuracy=0.7575, over 5106.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7078, over 5728.81 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 19:43:17,381 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 19:43:25,410 INFO [trainer.py:811] (2/8) Epoch 24, validation: loss=3.021, NarTop10Accuracy=0.7204, over 1905321.00 frames. +2024-08-06 19:43:25,411 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 19:43:28,562 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.113e+02 2.282e+02 2.472e+02 2.357e+03, threshold=4.564e+02, percent-clipped=0.2 +2024-08-06 19:43:40,814 INFO [trainer.py:765] (2/8) Epoch 24, batch 800, train_loss[loss=2.701, NarTop10Accuracy=0.7793, over 4401.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7085, over 5785.11 frames. ], batch size: 5, lr: 3.58e-03 +2024-08-06 19:44:11,410 INFO [trainer.py:765] (2/8) Epoch 24, batch 900, train_loss[loss=2.795, NarTop10Accuracy=0.7615, over 6123.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7094, over 5783.74 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 19:44:47,489 INFO [trainer.py:765] (2/8) Epoch 24, batch 1000, train_loss[loss=3.265, NarTop10Accuracy=0.6739, over 6231.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7068, over 5873.93 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 19:45:27,107 INFO [trainer.py:765] (2/8) Epoch 24, batch 1100, train_loss[loss=3.427, NarTop10Accuracy=0.6382, over 6888.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7037, over 5908.93 frames. ], batch size: 17, lr: 3.57e-03 +2024-08-06 19:45:58,437 INFO [trainer.py:765] (2/8) Epoch 24, batch 1200, train_loss[loss=3.016, NarTop10Accuracy=0.7259, over 7119.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7047, over 5922.07 frames. ], batch size: 31, lr: 3.57e-03 +2024-08-06 19:46:30,294 INFO [trainer.py:765] (2/8) Epoch 24, batch 1300, train_loss[loss=3.447, NarTop10Accuracy=0.6339, over 5040.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7061, over 6001.22 frames. ], batch size: 6, lr: 3.56e-03 +2024-08-06 19:47:07,859 INFO [trainer.py:765] (2/8) Epoch 24, batch 1400, train_loss[loss=3.304, NarTop10Accuracy=0.6647, over 6027.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.704, over 6011.75 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 19:47:40,956 INFO [trainer.py:765] (2/8) Epoch 24, batch 1500, train_loss[loss=3.431, NarTop10Accuracy=0.6428, over 6186.00 frames. ], tot_loss[loss=3.119, NarTop10Accuracy=0.7019, over 5943.60 frames. ], batch size: 50, lr: 3.56e-03 +2024-08-06 19:48:08,676 INFO [trainer.py:765] (2/8) Epoch 24, batch 1600, train_loss[loss=3.563, NarTop10Accuracy=0.6098, over 7068.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7011, over 5914.27 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:48:35,267 INFO [trainer.py:765] (2/8) Epoch 24, batch 1700, train_loss[loss=2.905, NarTop10Accuracy=0.7559, over 6333.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7019, over 5912.78 frames. ], batch size: 13, lr: 3.55e-03 +2024-08-06 19:49:01,637 INFO [trainer.py:765] (2/8) Epoch 24, batch 1800, train_loss[loss=2.881, NarTop10Accuracy=0.7565, over 7437.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7007, over 5983.34 frames. ], batch size: 23, lr: 3.55e-03 +2024-08-06 19:49:28,042 INFO [trainer.py:765] (2/8) Epoch 24, batch 1900, train_loss[loss=3.587, NarTop10Accuracy=0.6095, over 6336.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.6982, over 6022.19 frames. ], batch size: 52, lr: 3.55e-03 +2024-08-06 19:49:53,533 INFO [trainer.py:765] (2/8) Epoch 24, batch 2000, train_loss[loss=3.573, NarTop10Accuracy=0.6128, over 6180.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7028, over 5993.01 frames. ], batch size: 50, lr: 3.54e-03 +2024-08-06 19:50:18,819 INFO [trainer.py:765] (2/8) Epoch 24, batch 2100, train_loss[loss=2.831, NarTop10Accuracy=0.7572, over 4038.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.704, over 5974.95 frames. ], batch size: 4, lr: 3.54e-03 +2024-08-06 19:50:43,942 INFO [trainer.py:765] (2/8) Epoch 24, batch 2200, train_loss[loss=3.465, NarTop10Accuracy=0.632, over 7461.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7037, over 6013.93 frames. ], batch size: 31, lr: 3.54e-03 +2024-08-06 19:51:09,024 INFO [trainer.py:765] (2/8) Epoch 24, batch 2300, train_loss[loss=3.004, NarTop10Accuracy=0.7242, over 5724.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7039, over 6020.73 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 19:51:33,348 INFO [trainer.py:765] (2/8) Epoch 24, batch 2400, train_loss[loss=2.95, NarTop10Accuracy=0.7409, over 5775.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7062, over 5772.35 frames. ], batch size: 8, lr: 3.53e-03 +2024-08-06 19:51:56,783 INFO [trainer.py:765] (2/8) Epoch 24, batch 2500, train_loss[loss=2.835, NarTop10Accuracy=0.761, over 5211.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7095, over 5474.06 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 19:52:16,971 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 19:53:22,198 INFO [trainer.py:765] (2/8) Epoch 25, batch 100, train_loss[loss=3.341, NarTop10Accuracy=0.659, over 7059.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7095, over 2362.24 frames. ], batch size: 31, lr: 3.45e-03 +2024-08-06 19:53:47,262 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 19:53:55,329 INFO [trainer.py:811] (2/8) Epoch 25, validation: loss=2.96, NarTop10Accuracy=0.7332, over 1905321.00 frames. +2024-08-06 19:53:55,329 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 19:53:55,916 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.155e+02 2.306e+02 2.475e+02 6.485e+02, threshold=4.611e+02, percent-clipped=0.1 +2024-08-06 19:54:01,176 INFO [trainer.py:765] (2/8) Epoch 25, batch 200, train_loss[loss=2.899, NarTop10Accuracy=0.7497, over 6789.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7078, over 3858.05 frames. ], batch size: 17, lr: 3.45e-03 +2024-08-06 19:54:35,646 INFO [trainer.py:765] (2/8) Epoch 25, batch 300, train_loss[loss=3.16, NarTop10Accuracy=0.6911, over 7020.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7094, over 4645.76 frames. ], batch size: 22, lr: 3.45e-03 +2024-08-06 19:55:12,958 INFO [trainer.py:765] (2/8) Epoch 25, batch 400, train_loss[loss=2.973, NarTop10Accuracy=0.7328, over 5025.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7099, over 5104.35 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 19:55:43,737 INFO [trainer.py:765] (2/8) Epoch 25, batch 500, train_loss[loss=2.773, NarTop10Accuracy=0.7754, over 6081.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7104, over 5373.57 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 19:56:14,814 INFO [trainer.py:765] (2/8) Epoch 25, batch 600, train_loss[loss=2.927, NarTop10Accuracy=0.7359, over 5793.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7105, over 5641.69 frames. ], batch size: 9, lr: 3.44e-03 +2024-08-06 19:56:55,496 INFO [trainer.py:765] (2/8) Epoch 25, batch 700, train_loss[loss=2.735, NarTop10Accuracy=0.7781, over 5010.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.711, over 5705.78 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 19:57:30,136 INFO [trainer.py:765] (2/8) Epoch 25, batch 800, train_loss[loss=2.896, NarTop10Accuracy=0.7499, over 5109.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7095, over 5776.74 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 19:58:00,678 INFO [trainer.py:765] (2/8) Epoch 25, batch 900, train_loss[loss=3.084, NarTop10Accuracy=0.705, over 6423.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7101, over 5788.93 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 19:58:37,640 INFO [trainer.py:765] (2/8) Epoch 25, batch 1000, train_loss[loss=2.759, NarTop10Accuracy=0.7687, over 6213.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7066, over 5885.17 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 19:59:14,854 INFO [trainer.py:765] (2/8) Epoch 25, batch 1100, train_loss[loss=3.445, NarTop10Accuracy=0.6332, over 6843.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7053, over 5915.90 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 19:59:49,039 INFO [trainer.py:765] (2/8) Epoch 25, batch 1200, train_loss[loss=3.344, NarTop10Accuracy=0.6487, over 7296.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.7059, over 5921.75 frames. ], batch size: 31, lr: 3.42e-03 +2024-08-06 20:00:25,598 INFO [trainer.py:765] (2/8) Epoch 25, batch 1300, train_loss[loss=2.994, NarTop10Accuracy=0.7323, over 4989.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7081, over 5977.84 frames. ], batch size: 6, lr: 3.42e-03 +2024-08-06 20:01:02,015 INFO [trainer.py:765] (2/8) Epoch 25, batch 1400, train_loss[loss=2.828, NarTop10Accuracy=0.7641, over 6078.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7096, over 6018.23 frames. ], batch size: 11, lr: 3.42e-03 +2024-08-06 20:01:32,822 INFO [trainer.py:765] (2/8) Epoch 25, batch 1500, train_loss[loss=3.218, NarTop10Accuracy=0.6875, over 6738.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7075, over 5959.17 frames. ], batch size: 52, lr: 3.41e-03 +2024-08-06 20:02:00,624 INFO [trainer.py:765] (2/8) Epoch 25, batch 1600, train_loss[loss=2.846, NarTop10Accuracy=0.7496, over 6882.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7078, over 5934.43 frames. ], batch size: 22, lr: 3.41e-03 +2024-08-06 20:02:27,359 INFO [trainer.py:765] (2/8) Epoch 25, batch 1700, train_loss[loss=3.027, NarTop10Accuracy=0.7217, over 6354.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7078, over 5915.27 frames. ], batch size: 13, lr: 3.41e-03 +2024-08-06 20:02:53,853 INFO [trainer.py:765] (2/8) Epoch 25, batch 1800, train_loss[loss=3.315, NarTop10Accuracy=0.6568, over 7236.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7041, over 5977.16 frames. ], batch size: 23, lr: 3.40e-03 +2024-08-06 20:03:20,340 INFO [trainer.py:765] (2/8) Epoch 25, batch 1900, train_loss[loss=3.182, NarTop10Accuracy=0.6892, over 5643.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.703, over 6028.29 frames. ], batch size: 50, lr: 3.40e-03 +2024-08-06 20:03:45,933 INFO [trainer.py:765] (2/8) Epoch 25, batch 2000, train_loss[loss=3.486, NarTop10Accuracy=0.6274, over 6444.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7011, over 5995.00 frames. ], batch size: 50, lr: 3.40e-03 +2024-08-06 20:04:11,245 INFO [trainer.py:765] (2/8) Epoch 25, batch 2100, train_loss[loss=2.776, NarTop10Accuracy=0.7657, over 3900.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7037, over 5950.01 frames. ], batch size: 4, lr: 3.40e-03 +2024-08-06 20:04:31,409 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 20:04:39,344 INFO [trainer.py:811] (2/8) Epoch 25, validation: loss=2.999, NarTop10Accuracy=0.7251, over 1905321.00 frames. +2024-08-06 20:04:39,344 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 20:04:39,840 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.185e+02 2.339e+02 2.507e+02 3.640e+02, threshold=4.678e+02, percent-clipped=0.0 +2024-08-06 20:04:44,512 INFO [trainer.py:765] (2/8) Epoch 25, batch 2200, train_loss[loss=3.302, NarTop10Accuracy=0.6617, over 7311.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7024, over 6010.14 frames. ], batch size: 31, lr: 3.39e-03 +2024-08-06 20:05:09,645 INFO [trainer.py:765] (2/8) Epoch 25, batch 2300, train_loss[loss=2.883, NarTop10Accuracy=0.7437, over 5640.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.7022, over 6017.61 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 20:05:34,141 INFO [trainer.py:765] (2/8) Epoch 25, batch 2400, train_loss[loss=2.849, NarTop10Accuracy=0.7546, over 5145.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.705, over 5777.04 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:05:57,846 INFO [trainer.py:765] (2/8) Epoch 25, batch 2500, train_loss[loss=2.892, NarTop10Accuracy=0.7555, over 5061.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7116, over 5476.09 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:06:18,175 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 20:07:19,304 INFO [trainer.py:765] (2/8) Epoch 26, batch 100, train_loss[loss=3.013, NarTop10Accuracy=0.7144, over 7371.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7074, over 2357.20 frames. ], batch size: 31, lr: 3.32e-03 +2024-08-06 20:07:52,381 INFO [trainer.py:765] (2/8) Epoch 26, batch 200, train_loss[loss=2.78, NarTop10Accuracy=0.7681, over 6687.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7061, over 3859.29 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 20:08:24,733 INFO [trainer.py:765] (2/8) Epoch 26, batch 300, train_loss[loss=3.009, NarTop10Accuracy=0.7251, over 6930.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7084, over 4647.90 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 20:08:58,184 INFO [trainer.py:765] (2/8) Epoch 26, batch 400, train_loss[loss=3.008, NarTop10Accuracy=0.7076, over 5214.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7088, over 5095.07 frames. ], batch size: 7, lr: 3.31e-03 +2024-08-06 20:09:33,147 INFO [trainer.py:765] (2/8) Epoch 26, batch 500, train_loss[loss=2.824, NarTop10Accuracy=0.765, over 6084.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.708, over 5374.36 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 20:10:03,890 INFO [trainer.py:765] (2/8) Epoch 26, batch 600, train_loss[loss=2.734, NarTop10Accuracy=0.7818, over 5718.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7119, over 5645.44 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 20:10:39,872 INFO [trainer.py:765] (2/8) Epoch 26, batch 700, train_loss[loss=3.129, NarTop10Accuracy=0.6946, over 5181.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7075, over 5705.34 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 20:11:19,060 INFO [trainer.py:765] (2/8) Epoch 26, batch 800, train_loss[loss=3, NarTop10Accuracy=0.7278, over 5052.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7082, over 5770.41 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 20:11:49,315 INFO [trainer.py:765] (2/8) Epoch 26, batch 900, train_loss[loss=2.845, NarTop10Accuracy=0.7552, over 6042.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7092, over 5800.97 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 20:12:25,972 INFO [trainer.py:765] (2/8) Epoch 26, batch 1000, train_loss[loss=2.906, NarTop10Accuracy=0.7452, over 6183.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7062, over 5902.74 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 20:13:06,376 INFO [trainer.py:765] (2/8) Epoch 26, batch 1100, train_loss[loss=3.316, NarTop10Accuracy=0.6612, over 6723.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7047, over 5929.56 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 20:13:37,535 INFO [trainer.py:765] (2/8) Epoch 26, batch 1200, train_loss[loss=3.329, NarTop10Accuracy=0.6581, over 7314.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7074, over 5932.49 frames. ], batch size: 31, lr: 3.29e-03 +2024-08-06 20:14:13,695 INFO [trainer.py:765] (2/8) Epoch 26, batch 1300, train_loss[loss=2.829, NarTop10Accuracy=0.768, over 5118.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7088, over 6006.19 frames. ], batch size: 6, lr: 3.28e-03 +2024-08-06 20:14:50,538 INFO [trainer.py:765] (2/8) Epoch 26, batch 1400, train_loss[loss=2.921, NarTop10Accuracy=0.7479, over 6072.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.709, over 6019.38 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 20:15:21,155 INFO [trainer.py:765] (2/8) Epoch 26, batch 1500, train_loss[loss=3.153, NarTop10Accuracy=0.6994, over 6555.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7095, over 5952.76 frames. ], batch size: 51, lr: 3.28e-03 +2024-08-06 20:15:48,979 INFO [trainer.py:765] (2/8) Epoch 26, batch 1600, train_loss[loss=3.002, NarTop10Accuracy=0.7176, over 6939.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7108, over 5925.06 frames. ], batch size: 22, lr: 3.28e-03 +2024-08-06 20:15:50,001 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 20:15:58,238 INFO [trainer.py:811] (2/8) Epoch 26, validation: loss=2.899, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 20:15:58,239 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 20:15:58,778 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.166e+02 2.322e+02 2.511e+02 3.952e+02, threshold=4.644e+02, percent-clipped=0.0 +2024-08-06 20:16:23,951 INFO [trainer.py:765] (2/8) Epoch 26, batch 1700, train_loss[loss=3.137, NarTop10Accuracy=0.6919, over 6621.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7126, over 5929.30 frames. ], batch size: 14, lr: 3.28e-03 +2024-08-06 20:16:50,425 INFO [trainer.py:765] (2/8) Epoch 26, batch 1800, train_loss[loss=2.715, NarTop10Accuracy=0.7849, over 7074.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7117, over 5986.66 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 20:17:16,839 INFO [trainer.py:765] (2/8) Epoch 26, batch 1900, train_loss[loss=3.105, NarTop10Accuracy=0.7105, over 6069.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7077, over 6038.52 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 20:17:42,378 INFO [trainer.py:765] (2/8) Epoch 26, batch 2000, train_loss[loss=3.561, NarTop10Accuracy=0.6162, over 5913.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7066, over 5998.75 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 20:18:07,562 INFO [trainer.py:765] (2/8) Epoch 26, batch 2100, train_loss[loss=3.091, NarTop10Accuracy=0.7035, over 4815.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7049, over 5972.30 frames. ], batch size: 5, lr: 3.27e-03 +2024-08-06 20:18:32,775 INFO [trainer.py:765] (2/8) Epoch 26, batch 2200, train_loss[loss=2.974, NarTop10Accuracy=0.7377, over 7023.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7066, over 5996.89 frames. ], batch size: 31, lr: 3.26e-03 +2024-08-06 20:18:57,896 INFO [trainer.py:765] (2/8) Epoch 26, batch 2300, train_loss[loss=3.238, NarTop10Accuracy=0.6576, over 5664.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.705, over 6006.45 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 20:19:22,204 INFO [trainer.py:765] (2/8) Epoch 26, batch 2400, train_loss[loss=2.895, NarTop10Accuracy=0.7515, over 5172.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7093, over 5766.28 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:19:45,650 INFO [trainer.py:765] (2/8) Epoch 26, batch 2500, train_loss[loss=2.7, NarTop10Accuracy=0.7895, over 5115.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7155, over 5481.51 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:20:05,853 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 20:21:04,874 INFO [trainer.py:765] (2/8) Epoch 27, batch 100, train_loss[loss=3.182, NarTop10Accuracy=0.6939, over 7332.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7105, over 2367.74 frames. ], batch size: 32, lr: 3.19e-03 +2024-08-06 20:21:39,783 INFO [trainer.py:765] (2/8) Epoch 27, batch 200, train_loss[loss=2.79, NarTop10Accuracy=0.7733, over 7005.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7093, over 3854.98 frames. ], batch size: 17, lr: 3.19e-03 +2024-08-06 20:22:13,050 INFO [trainer.py:765] (2/8) Epoch 27, batch 300, train_loss[loss=2.887, NarTop10Accuracy=0.7494, over 6993.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7105, over 4657.25 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 20:22:43,557 INFO [trainer.py:765] (2/8) Epoch 27, batch 400, train_loss[loss=2.789, NarTop10Accuracy=0.7663, over 5190.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7129, over 5102.67 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 20:23:18,084 INFO [trainer.py:765] (2/8) Epoch 27, batch 500, train_loss[loss=2.776, NarTop10Accuracy=0.7641, over 6066.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7153, over 5393.35 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 20:23:51,435 INFO [trainer.py:765] (2/8) Epoch 27, batch 600, train_loss[loss=3.297, NarTop10Accuracy=0.6697, over 5679.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7143, over 5666.78 frames. ], batch size: 9, lr: 3.18e-03 +2024-08-06 20:24:24,975 INFO [trainer.py:765] (2/8) Epoch 27, batch 700, train_loss[loss=2.873, NarTop10Accuracy=0.7528, over 5136.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7152, over 5727.46 frames. ], batch size: 6, lr: 3.18e-03 +2024-08-06 20:25:03,408 INFO [trainer.py:765] (2/8) Epoch 27, batch 800, train_loss[loss=2.973, NarTop10Accuracy=0.7236, over 5217.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7105, over 5785.35 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 20:25:34,176 INFO [trainer.py:765] (2/8) Epoch 27, batch 900, train_loss[loss=3.199, NarTop10Accuracy=0.6937, over 6552.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7111, over 5808.56 frames. ], batch size: 14, lr: 3.17e-03 +2024-08-06 20:26:10,097 INFO [trainer.py:765] (2/8) Epoch 27, batch 1000, train_loss[loss=2.86, NarTop10Accuracy=0.7593, over 6240.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7103, over 5897.39 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 20:26:18,315 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 20:26:26,346 INFO [trainer.py:811] (2/8) Epoch 27, validation: loss=2.95, NarTop10Accuracy=0.735, over 1905321.00 frames. +2024-08-06 20:26:26,347 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 20:26:26,877 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.166e+02 2.331e+02 2.512e+02 4.284e+02, threshold=4.663e+02, percent-clipped=0.0 +2024-08-06 20:26:50,899 INFO [trainer.py:765] (2/8) Epoch 27, batch 1100, train_loss[loss=2.957, NarTop10Accuracy=0.7346, over 6912.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.71, over 5941.72 frames. ], batch size: 17, lr: 3.17e-03 +2024-08-06 20:27:24,545 INFO [trainer.py:765] (2/8) Epoch 27, batch 1200, train_loss[loss=2.865, NarTop10Accuracy=0.7491, over 7260.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7105, over 5935.14 frames. ], batch size: 31, lr: 3.16e-03 +2024-08-06 20:27:58,568 INFO [trainer.py:765] (2/8) Epoch 27, batch 1300, train_loss[loss=2.856, NarTop10Accuracy=0.7573, over 5058.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7124, over 5988.34 frames. ], batch size: 6, lr: 3.16e-03 +2024-08-06 20:28:36,745 INFO [trainer.py:765] (2/8) Epoch 27, batch 1400, train_loss[loss=3.25, NarTop10Accuracy=0.6675, over 5979.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7076, over 5992.76 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 20:29:04,632 INFO [trainer.py:765] (2/8) Epoch 27, batch 1500, train_loss[loss=3.006, NarTop10Accuracy=0.7277, over 5835.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7087, over 5930.07 frames. ], batch size: 50, lr: 3.16e-03 +2024-08-06 20:29:32,362 INFO [trainer.py:765] (2/8) Epoch 27, batch 1600, train_loss[loss=2.742, NarTop10Accuracy=0.7785, over 7176.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.707, over 5909.78 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:29:58,977 INFO [trainer.py:765] (2/8) Epoch 27, batch 1700, train_loss[loss=3.278, NarTop10Accuracy=0.6712, over 6651.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7095, over 5905.27 frames. ], batch size: 14, lr: 3.15e-03 +2024-08-06 20:30:25,463 INFO [trainer.py:765] (2/8) Epoch 27, batch 1800, train_loss[loss=3.403, NarTop10Accuracy=0.6434, over 6960.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7081, over 5971.29 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:30:51,845 INFO [trainer.py:765] (2/8) Epoch 27, batch 1900, train_loss[loss=3.101, NarTop10Accuracy=0.7005, over 6303.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7076, over 6015.16 frames. ], batch size: 50, lr: 3.15e-03 +2024-08-06 20:31:17,390 INFO [trainer.py:765] (2/8) Epoch 27, batch 2000, train_loss[loss=3.121, NarTop10Accuracy=0.7007, over 6477.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7105, over 5993.89 frames. ], batch size: 50, lr: 3.15e-03 +2024-08-06 20:31:42,659 INFO [trainer.py:765] (2/8) Epoch 27, batch 2100, train_loss[loss=2.58, NarTop10Accuracy=0.805, over 4704.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7108, over 5991.96 frames. ], batch size: 5, lr: 3.14e-03 +2024-08-06 20:32:07,804 INFO [trainer.py:765] (2/8) Epoch 27, batch 2200, train_loss[loss=3.323, NarTop10Accuracy=0.6547, over 7167.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7088, over 6018.48 frames. ], batch size: 32, lr: 3.14e-03 +2024-08-06 20:32:32,942 INFO [trainer.py:765] (2/8) Epoch 27, batch 2300, train_loss[loss=2.866, NarTop10Accuracy=0.7534, over 5841.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7083, over 6025.78 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 20:32:57,246 INFO [trainer.py:765] (2/8) Epoch 27, batch 2400, train_loss[loss=2.735, NarTop10Accuracy=0.7798, over 5031.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7067, over 5783.51 frames. ], batch size: 7, lr: 3.14e-03 +2024-08-06 20:33:20,615 INFO [trainer.py:765] (2/8) Epoch 27, batch 2500, train_loss[loss=3.347, NarTop10Accuracy=0.6446, over 5103.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7119, over 5473.59 frames. ], batch size: 7, lr: 3.13e-03 +2024-08-06 20:33:40,891 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 20:34:35,831 INFO [trainer.py:765] (2/8) Epoch 28, batch 100, train_loss[loss=3.035, NarTop10Accuracy=0.7259, over 7575.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7075, over 2370.80 frames. ], batch size: 31, lr: 3.07e-03 +2024-08-06 20:35:07,393 INFO [trainer.py:765] (2/8) Epoch 28, batch 200, train_loss[loss=2.694, NarTop10Accuracy=0.7811, over 6918.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7082, over 3860.88 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 20:35:45,422 INFO [trainer.py:765] (2/8) Epoch 28, batch 300, train_loss[loss=3.085, NarTop10Accuracy=0.7137, over 7245.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.711, over 4654.47 frames. ], batch size: 22, lr: 3.07e-03 +2024-08-06 20:36:15,865 INFO [trainer.py:765] (2/8) Epoch 28, batch 400, train_loss[loss=3.184, NarTop10Accuracy=0.6805, over 5193.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7078, over 5089.01 frames. ], batch size: 7, lr: 3.07e-03 +2024-08-06 20:36:32,406 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 20:36:40,530 INFO [trainer.py:811] (2/8) Epoch 28, validation: loss=2.963, NarTop10Accuracy=0.7327, over 1905321.00 frames. +2024-08-06 20:36:40,531 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 20:36:41,103 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.179e+02 2.348e+02 2.536e+02 3.573e+02, threshold=4.696e+02, percent-clipped=0.0 +2024-08-06 20:36:56,664 INFO [trainer.py:765] (2/8) Epoch 28, batch 500, train_loss[loss=3.121, NarTop10Accuracy=0.7012, over 6033.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7108, over 5370.24 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 20:37:29,463 INFO [trainer.py:765] (2/8) Epoch 28, batch 600, train_loss[loss=2.916, NarTop10Accuracy=0.7479, over 5679.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7097, over 5644.92 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 20:38:08,892 INFO [trainer.py:765] (2/8) Epoch 28, batch 700, train_loss[loss=3.166, NarTop10Accuracy=0.6837, over 4941.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7071, over 5734.23 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 20:38:42,490 INFO [trainer.py:765] (2/8) Epoch 28, batch 800, train_loss[loss=3.024, NarTop10Accuracy=0.7227, over 5100.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7125, over 5782.48 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 20:39:15,507 INFO [trainer.py:765] (2/8) Epoch 28, batch 900, train_loss[loss=3.209, NarTop10Accuracy=0.6846, over 6156.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7138, over 5804.26 frames. ], batch size: 13, lr: 3.06e-03 +2024-08-06 20:39:53,240 INFO [trainer.py:765] (2/8) Epoch 28, batch 1000, train_loss[loss=3.156, NarTop10Accuracy=0.6816, over 6489.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7132, over 5909.64 frames. ], batch size: 14, lr: 3.05e-03 +2024-08-06 20:40:25,868 INFO [trainer.py:765] (2/8) Epoch 28, batch 1100, train_loss[loss=2.845, NarTop10Accuracy=0.761, over 6855.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7094, over 5931.61 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 20:40:59,419 INFO [trainer.py:765] (2/8) Epoch 28, batch 1200, train_loss[loss=3.295, NarTop10Accuracy=0.6663, over 7347.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.708, over 5917.51 frames. ], batch size: 31, lr: 3.05e-03 +2024-08-06 20:41:38,681 INFO [trainer.py:765] (2/8) Epoch 28, batch 1300, train_loss[loss=3.247, NarTop10Accuracy=0.6537, over 4323.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7087, over 5984.69 frames. ], batch size: 5, lr: 3.05e-03 +2024-08-06 20:42:13,047 INFO [trainer.py:765] (2/8) Epoch 28, batch 1400, train_loss[loss=2.877, NarTop10Accuracy=0.751, over 6138.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7078, over 6003.42 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 20:42:43,171 INFO [trainer.py:765] (2/8) Epoch 28, batch 1500, train_loss[loss=3.419, NarTop10Accuracy=0.6368, over 5742.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7098, over 5953.63 frames. ], batch size: 50, lr: 3.04e-03 +2024-08-06 20:43:11,081 INFO [trainer.py:765] (2/8) Epoch 28, batch 1600, train_loss[loss=2.841, NarTop10Accuracy=0.7585, over 7071.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.71, over 5944.73 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 20:43:37,786 INFO [trainer.py:765] (2/8) Epoch 28, batch 1700, train_loss[loss=2.895, NarTop10Accuracy=0.7427, over 6411.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.709, over 5925.99 frames. ], batch size: 13, lr: 3.04e-03 +2024-08-06 20:44:04,326 INFO [trainer.py:765] (2/8) Epoch 28, batch 1800, train_loss[loss=3.177, NarTop10Accuracy=0.7009, over 7017.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.71, over 5985.51 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 20:44:30,758 INFO [trainer.py:765] (2/8) Epoch 28, batch 1900, train_loss[loss=3.123, NarTop10Accuracy=0.7006, over 5874.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7114, over 6023.49 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 20:44:56,328 INFO [trainer.py:765] (2/8) Epoch 28, batch 2000, train_loss[loss=2.969, NarTop10Accuracy=0.7321, over 6558.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7134, over 5994.61 frames. ], batch size: 51, lr: 3.03e-03 +2024-08-06 20:45:21,651 INFO [trainer.py:765] (2/8) Epoch 28, batch 2100, train_loss[loss=3.043, NarTop10Accuracy=0.7176, over 3984.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7141, over 5964.81 frames. ], batch size: 4, lr: 3.03e-03 +2024-08-06 20:45:47,076 INFO [trainer.py:765] (2/8) Epoch 28, batch 2200, train_loss[loss=2.913, NarTop10Accuracy=0.7367, over 7263.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7122, over 6013.79 frames. ], batch size: 31, lr: 3.03e-03 +2024-08-06 20:46:12,308 INFO [trainer.py:765] (2/8) Epoch 28, batch 2300, train_loss[loss=3.505, NarTop10Accuracy=0.6284, over 5649.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7085, over 6012.31 frames. ], batch size: 9, lr: 3.03e-03 +2024-08-06 20:46:36,807 INFO [trainer.py:765] (2/8) Epoch 28, batch 2400, train_loss[loss=2.923, NarTop10Accuracy=0.7473, over 5259.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7083, over 5771.78 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:46:48,595 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 20:46:56,604 INFO [trainer.py:811] (2/8) Epoch 28, validation: loss=2.931, NarTop10Accuracy=0.7396, over 1905321.00 frames. +2024-08-06 20:46:56,605 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 20:46:57,082 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.201e+02 2.381e+02 2.551e+02 4.872e+02, threshold=4.762e+02, percent-clipped=0.1 +2024-08-06 20:47:08,293 INFO [trainer.py:765] (2/8) Epoch 28, batch 2500, train_loss[loss=2.986, NarTop10Accuracy=0.7258, over 5226.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7119, over 5468.90 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:47:28,325 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 20:48:21,053 INFO [trainer.py:765] (2/8) Epoch 29, batch 100, train_loss[loss=2.971, NarTop10Accuracy=0.7296, over 7176.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7105, over 2358.94 frames. ], batch size: 32, lr: 2.96e-03 +2024-08-06 20:48:53,406 INFO [trainer.py:765] (2/8) Epoch 29, batch 200, train_loss[loss=3.313, NarTop10Accuracy=0.6665, over 6771.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7176, over 3847.78 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 20:49:27,477 INFO [trainer.py:765] (2/8) Epoch 29, batch 300, train_loss[loss=3.268, NarTop10Accuracy=0.6773, over 7155.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7172, over 4641.86 frames. ], batch size: 22, lr: 2.96e-03 +2024-08-06 20:49:56,053 INFO [trainer.py:765] (2/8) Epoch 29, batch 400, train_loss[loss=3.365, NarTop10Accuracy=0.6433, over 5088.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7126, over 5087.44 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 20:50:29,436 INFO [trainer.py:765] (2/8) Epoch 29, batch 500, train_loss[loss=3.207, NarTop10Accuracy=0.6757, over 6051.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.715, over 5380.87 frames. ], batch size: 11, lr: 2.96e-03 +2024-08-06 20:51:00,025 INFO [trainer.py:765] (2/8) Epoch 29, batch 600, train_loss[loss=2.775, NarTop10Accuracy=0.7678, over 5691.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7157, over 5638.66 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 20:51:35,678 INFO [trainer.py:765] (2/8) Epoch 29, batch 700, train_loss[loss=2.96, NarTop10Accuracy=0.7393, over 5094.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7109, over 5703.00 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 20:52:10,725 INFO [trainer.py:765] (2/8) Epoch 29, batch 800, train_loss[loss=2.753, NarTop10Accuracy=0.7854, over 4983.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7105, over 5776.24 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 20:52:40,743 INFO [trainer.py:765] (2/8) Epoch 29, batch 900, train_loss[loss=2.759, NarTop10Accuracy=0.7714, over 6594.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7088, over 5809.15 frames. ], batch size: 14, lr: 2.95e-03 +2024-08-06 20:53:16,862 INFO [trainer.py:765] (2/8) Epoch 29, batch 1000, train_loss[loss=3.405, NarTop10Accuracy=0.6413, over 6273.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7077, over 5904.56 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 20:53:52,903 INFO [trainer.py:765] (2/8) Epoch 29, batch 1100, train_loss[loss=3.18, NarTop10Accuracy=0.6912, over 6942.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.707, over 5928.07 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 20:54:23,691 INFO [trainer.py:765] (2/8) Epoch 29, batch 1200, train_loss[loss=3.096, NarTop10Accuracy=0.7107, over 7326.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7093, over 5928.02 frames. ], batch size: 31, lr: 2.94e-03 +2024-08-06 20:55:01,428 INFO [trainer.py:765] (2/8) Epoch 29, batch 1300, train_loss[loss=2.912, NarTop10Accuracy=0.7464, over 5013.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7109, over 5980.62 frames. ], batch size: 6, lr: 2.94e-03 +2024-08-06 20:55:32,557 INFO [trainer.py:765] (2/8) Epoch 29, batch 1400, train_loss[loss=3.303, NarTop10Accuracy=0.6642, over 6120.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7103, over 6004.76 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 20:56:04,360 INFO [trainer.py:765] (2/8) Epoch 29, batch 1500, train_loss[loss=3.374, NarTop10Accuracy=0.6494, over 5856.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7097, over 5926.46 frames. ], batch size: 50, lr: 2.94e-03 +2024-08-06 20:56:32,041 INFO [trainer.py:765] (2/8) Epoch 29, batch 1600, train_loss[loss=3.196, NarTop10Accuracy=0.6824, over 7023.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7085, over 5913.37 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:56:58,640 INFO [trainer.py:765] (2/8) Epoch 29, batch 1700, train_loss[loss=2.749, NarTop10Accuracy=0.7807, over 6159.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7098, over 5906.91 frames. ], batch size: 13, lr: 2.93e-03 +2024-08-06 20:57:25,001 INFO [trainer.py:765] (2/8) Epoch 29, batch 1800, train_loss[loss=3.035, NarTop10Accuracy=0.7222, over 6951.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7112, over 5976.33 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:57:44,622 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 20:57:52,863 INFO [trainer.py:811] (2/8) Epoch 29, validation: loss=2.897, NarTop10Accuracy=0.7458, over 1905321.00 frames. +2024-08-06 20:57:52,864 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 20:57:53,424 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.206e+02 2.380e+02 2.554e+02 4.464e+02, threshold=4.759e+02, percent-clipped=0.0 +2024-08-06 20:57:59,756 INFO [trainer.py:765] (2/8) Epoch 29, batch 1900, train_loss[loss=2.978, NarTop10Accuracy=0.7415, over 6522.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7079, over 6036.25 frames. ], batch size: 50, lr: 2.93e-03 +2024-08-06 20:58:25,308 INFO [trainer.py:765] (2/8) Epoch 29, batch 2000, train_loss[loss=3.393, NarTop10Accuracy=0.6486, over 6267.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7093, over 6005.82 frames. ], batch size: 50, lr: 2.93e-03 +2024-08-06 20:58:50,629 INFO [trainer.py:765] (2/8) Epoch 29, batch 2100, train_loss[loss=3.103, NarTop10Accuracy=0.705, over 4929.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7092, over 5986.02 frames. ], batch size: 5, lr: 2.92e-03 +2024-08-06 20:59:15,805 INFO [trainer.py:765] (2/8) Epoch 29, batch 2200, train_loss[loss=2.837, NarTop10Accuracy=0.7673, over 7164.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7104, over 6017.31 frames. ], batch size: 31, lr: 2.92e-03 +2024-08-06 20:59:40,910 INFO [trainer.py:765] (2/8) Epoch 29, batch 2300, train_loss[loss=2.887, NarTop10Accuracy=0.7446, over 5613.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7075, over 6028.25 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 21:00:05,155 INFO [trainer.py:765] (2/8) Epoch 29, batch 2400, train_loss[loss=2.692, NarTop10Accuracy=0.7863, over 5193.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7109, over 5791.27 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 21:00:28,742 INFO [trainer.py:765] (2/8) Epoch 29, batch 2500, train_loss[loss=3.392, NarTop10Accuracy=0.6485, over 5658.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7148, over 5492.73 frames. ], batch size: 8, lr: 2.92e-03 +2024-08-06 21:00:48,651 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 21:01:41,716 INFO [trainer.py:765] (2/8) Epoch 30, batch 100, train_loss[loss=2.823, NarTop10Accuracy=0.7625, over 7206.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7191, over 2366.56 frames. ], batch size: 31, lr: 2.86e-03 +2024-08-06 21:02:17,013 INFO [trainer.py:765] (2/8) Epoch 30, batch 200, train_loss[loss=2.953, NarTop10Accuracy=0.7324, over 6795.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7213, over 3842.56 frames. ], batch size: 17, lr: 2.86e-03 +2024-08-06 21:02:51,342 INFO [trainer.py:765] (2/8) Epoch 30, batch 300, train_loss[loss=2.893, NarTop10Accuracy=0.7491, over 7086.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.723, over 4656.62 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 21:03:21,642 INFO [trainer.py:765] (2/8) Epoch 30, batch 400, train_loss[loss=2.758, NarTop10Accuracy=0.7775, over 5091.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7192, over 5106.64 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 21:03:58,545 INFO [trainer.py:765] (2/8) Epoch 30, batch 500, train_loss[loss=3.36, NarTop10Accuracy=0.6517, over 6015.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7168, over 5381.33 frames. ], batch size: 11, lr: 2.86e-03 +2024-08-06 21:04:31,655 INFO [trainer.py:765] (2/8) Epoch 30, batch 600, train_loss[loss=2.962, NarTop10Accuracy=0.7399, over 5589.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7161, over 5644.54 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 21:05:03,525 INFO [trainer.py:765] (2/8) Epoch 30, batch 700, train_loss[loss=2.745, NarTop10Accuracy=0.7632, over 4965.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.719, over 5708.21 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 21:05:44,131 INFO [trainer.py:765] (2/8) Epoch 30, batch 800, train_loss[loss=2.89, NarTop10Accuracy=0.7459, over 5010.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.718, over 5754.53 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 21:06:14,843 INFO [trainer.py:765] (2/8) Epoch 30, batch 900, train_loss[loss=2.999, NarTop10Accuracy=0.7402, over 6237.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7184, over 5777.92 frames. ], batch size: 13, lr: 2.85e-03 +2024-08-06 21:06:48,951 INFO [trainer.py:765] (2/8) Epoch 30, batch 1000, train_loss[loss=2.911, NarTop10Accuracy=0.749, over 6150.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7117, over 5893.33 frames. ], batch size: 13, lr: 2.85e-03 +2024-08-06 21:07:25,936 INFO [trainer.py:765] (2/8) Epoch 30, batch 1100, train_loss[loss=3.354, NarTop10Accuracy=0.6515, over 6840.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7093, over 5946.16 frames. ], batch size: 17, lr: 2.84e-03 +2024-08-06 21:08:02,380 INFO [trainer.py:765] (2/8) Epoch 30, batch 1200, train_loss[loss=3.029, NarTop10Accuracy=0.7159, over 7152.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7104, over 5954.78 frames. ], batch size: 31, lr: 2.84e-03 +2024-08-06 21:08:35,370 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 21:08:43,457 INFO [trainer.py:811] (2/8) Epoch 30, validation: loss=2.93, NarTop10Accuracy=0.7391, over 1905321.00 frames. +2024-08-06 21:08:43,457 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 21:08:44,197 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.209e+02 2.377e+02 2.553e+02 3.956e+02, threshold=4.754e+02, percent-clipped=0.0 +2024-08-06 21:08:44,203 INFO [trainer.py:765] (2/8) Epoch 30, batch 1300, train_loss[loss=3.09, NarTop10Accuracy=0.7136, over 4329.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7112, over 5995.94 frames. ], batch size: 5, lr: 2.84e-03 +2024-08-06 21:09:22,396 INFO [trainer.py:765] (2/8) Epoch 30, batch 1400, train_loss[loss=2.838, NarTop10Accuracy=0.7616, over 6201.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7117, over 6030.04 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 21:09:52,372 INFO [trainer.py:765] (2/8) Epoch 30, batch 1500, train_loss[loss=3.068, NarTop10Accuracy=0.7125, over 5835.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.713, over 5960.96 frames. ], batch size: 50, lr: 2.84e-03 +2024-08-06 21:10:20,083 INFO [trainer.py:765] (2/8) Epoch 30, batch 1600, train_loss[loss=3.041, NarTop10Accuracy=0.7155, over 6990.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7123, over 5943.75 frames. ], batch size: 22, lr: 2.84e-03 +2024-08-06 21:10:46,679 INFO [trainer.py:765] (2/8) Epoch 30, batch 1700, train_loss[loss=3.078, NarTop10Accuracy=0.7079, over 6642.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7103, over 5930.16 frames. ], batch size: 14, lr: 2.83e-03 +2024-08-06 21:11:13,058 INFO [trainer.py:765] (2/8) Epoch 30, batch 1800, train_loss[loss=3.422, NarTop10Accuracy=0.6407, over 7113.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7113, over 5991.63 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 21:11:39,418 INFO [trainer.py:765] (2/8) Epoch 30, batch 1900, train_loss[loss=3.133, NarTop10Accuracy=0.7001, over 6045.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7105, over 6020.86 frames. ], batch size: 50, lr: 2.83e-03 +2024-08-06 21:12:04,825 INFO [trainer.py:765] (2/8) Epoch 30, batch 2000, train_loss[loss=3.379, NarTop10Accuracy=0.647, over 6069.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7129, over 6012.48 frames. ], batch size: 50, lr: 2.83e-03 +2024-08-06 21:12:30,088 INFO [trainer.py:765] (2/8) Epoch 30, batch 2100, train_loss[loss=2.919, NarTop10Accuracy=0.7436, over 3954.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7122, over 5980.76 frames. ], batch size: 4, lr: 2.83e-03 +2024-08-06 21:12:55,225 INFO [trainer.py:765] (2/8) Epoch 30, batch 2200, train_loss[loss=2.929, NarTop10Accuracy=0.7369, over 7344.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7133, over 6008.63 frames. ], batch size: 32, lr: 2.82e-03 +2024-08-06 21:13:20,297 INFO [trainer.py:765] (2/8) Epoch 30, batch 2300, train_loss[loss=2.779, NarTop10Accuracy=0.778, over 5700.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7092, over 6034.53 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 21:13:44,490 INFO [trainer.py:765] (2/8) Epoch 30, batch 2400, train_loss[loss=2.678, NarTop10Accuracy=0.7947, over 5097.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7163, over 5784.14 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:07,987 INFO [trainer.py:765] (2/8) Epoch 30, batch 2500, train_loss[loss=3.065, NarTop10Accuracy=0.7056, over 4968.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7168, over 5471.98 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:27,723 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 21:15:23,633 INFO [trainer.py:765] (2/8) Epoch 31, batch 100, train_loss[loss=3.519, NarTop10Accuracy=0.6285, over 7344.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7135, over 2363.73 frames. ], batch size: 31, lr: 2.77e-03 +2024-08-06 21:15:55,128 INFO [trainer.py:765] (2/8) Epoch 31, batch 200, train_loss[loss=2.93, NarTop10Accuracy=0.7446, over 6690.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7203, over 3856.35 frames. ], batch size: 17, lr: 2.77e-03 +2024-08-06 21:16:31,217 INFO [trainer.py:765] (2/8) Epoch 31, batch 300, train_loss[loss=2.852, NarTop10Accuracy=0.754, over 7215.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7183, over 4650.54 frames. ], batch size: 22, lr: 2.77e-03 +2024-08-06 21:17:01,625 INFO [trainer.py:765] (2/8) Epoch 31, batch 400, train_loss[loss=3.204, NarTop10Accuracy=0.6894, over 5088.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7163, over 5099.53 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 21:17:35,725 INFO [trainer.py:765] (2/8) Epoch 31, batch 500, train_loss[loss=2.769, NarTop10Accuracy=0.778, over 6123.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.717, over 5395.33 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 21:18:07,085 INFO [trainer.py:765] (2/8) Epoch 31, batch 600, train_loss[loss=2.806, NarTop10Accuracy=0.7698, over 5793.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7136, over 5661.86 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 21:18:44,611 INFO [trainer.py:765] (2/8) Epoch 31, batch 700, train_loss[loss=3.398, NarTop10Accuracy=0.6413, over 5172.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7133, over 5734.66 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 21:18:51,096 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 21:18:59,276 INFO [trainer.py:811] (2/8) Epoch 31, validation: loss=2.984, NarTop10Accuracy=0.7279, over 1905321.00 frames. +2024-08-06 21:18:59,277 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 21:18:59,986 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.222e+02 2.378e+02 2.557e+02 4.306e+02, threshold=4.755e+02, percent-clipped=0.0 +2024-08-06 21:19:24,246 INFO [trainer.py:765] (2/8) Epoch 31, batch 800, train_loss[loss=2.761, NarTop10Accuracy=0.7781, over 5037.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7157, over 5797.59 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 21:19:56,951 INFO [trainer.py:765] (2/8) Epoch 31, batch 900, train_loss[loss=3.455, NarTop10Accuracy=0.6266, over 6537.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7163, over 5810.34 frames. ], batch size: 14, lr: 2.76e-03 +2024-08-06 21:20:33,311 INFO [trainer.py:765] (2/8) Epoch 31, batch 1000, train_loss[loss=3.348, NarTop10Accuracy=0.6547, over 6312.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.717, over 5924.94 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 21:21:10,216 INFO [trainer.py:765] (2/8) Epoch 31, batch 1100, train_loss[loss=3.112, NarTop10Accuracy=0.6981, over 6741.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.717, over 5941.77 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 21:21:41,120 INFO [trainer.py:765] (2/8) Epoch 31, batch 1200, train_loss[loss=2.959, NarTop10Accuracy=0.7196, over 7428.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7201, over 5934.39 frames. ], batch size: 32, lr: 2.75e-03 +2024-08-06 21:22:19,742 INFO [trainer.py:765] (2/8) Epoch 31, batch 1300, train_loss[loss=2.969, NarTop10Accuracy=0.7384, over 5241.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7137, over 5997.16 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 21:22:53,535 INFO [trainer.py:765] (2/8) Epoch 31, batch 1400, train_loss[loss=2.809, NarTop10Accuracy=0.7643, over 6117.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.712, over 6025.56 frames. ], batch size: 11, lr: 2.75e-03 +2024-08-06 21:23:21,271 INFO [trainer.py:765] (2/8) Epoch 31, batch 1500, train_loss[loss=3.331, NarTop10Accuracy=0.6654, over 6342.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7156, over 5947.03 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 21:23:49,005 INFO [trainer.py:765] (2/8) Epoch 31, batch 1600, train_loss[loss=3.32, NarTop10Accuracy=0.6674, over 7296.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7146, over 5937.60 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 21:24:15,513 INFO [trainer.py:765] (2/8) Epoch 31, batch 1700, train_loss[loss=3.192, NarTop10Accuracy=0.689, over 6264.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7135, over 5917.32 frames. ], batch size: 13, lr: 2.74e-03 +2024-08-06 21:24:41,997 INFO [trainer.py:765] (2/8) Epoch 31, batch 1800, train_loss[loss=2.811, NarTop10Accuracy=0.7692, over 7158.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7163, over 5974.79 frames. ], batch size: 23, lr: 2.74e-03 +2024-08-06 21:25:08,358 INFO [trainer.py:765] (2/8) Epoch 31, batch 1900, train_loss[loss=3.271, NarTop10Accuracy=0.6753, over 6387.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.713, over 6020.51 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 21:25:33,774 INFO [trainer.py:765] (2/8) Epoch 31, batch 2000, train_loss[loss=3.002, NarTop10Accuracy=0.7324, over 6054.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7151, over 5978.35 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 21:25:59,107 INFO [trainer.py:765] (2/8) Epoch 31, batch 2100, train_loss[loss=2.888, NarTop10Accuracy=0.7514, over 4782.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7156, over 5954.10 frames. ], batch size: 5, lr: 2.73e-03 +2024-08-06 21:26:24,238 INFO [trainer.py:765] (2/8) Epoch 31, batch 2200, train_loss[loss=3.02, NarTop10Accuracy=0.7237, over 7116.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7179, over 5997.55 frames. ], batch size: 31, lr: 2.73e-03 +2024-08-06 21:26:49,322 INFO [trainer.py:765] (2/8) Epoch 31, batch 2300, train_loss[loss=2.791, NarTop10Accuracy=0.7701, over 5679.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.715, over 6008.69 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 21:27:13,608 INFO [trainer.py:765] (2/8) Epoch 31, batch 2400, train_loss[loss=2.824, NarTop10Accuracy=0.7609, over 5118.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7154, over 5766.80 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 21:27:37,028 INFO [trainer.py:765] (2/8) Epoch 31, batch 2500, train_loss[loss=2.913, NarTop10Accuracy=0.7411, over 5268.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7183, over 5478.74 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 21:27:56,943 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 21:28:49,393 INFO [trainer.py:765] (2/8) Epoch 32, batch 100, train_loss[loss=2.84, NarTop10Accuracy=0.7609, over 7257.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7137, over 2364.63 frames. ], batch size: 31, lr: 2.68e-03 +2024-08-06 21:29:08,161 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 21:29:16,392 INFO [trainer.py:811] (2/8) Epoch 32, validation: loss=2.919, NarTop10Accuracy=0.7409, over 1905321.00 frames. +2024-08-06 21:29:16,393 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 21:29:16,940 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.842e+02 2.253e+02 2.413e+02 2.600e+02 5.680e+02, threshold=4.826e+02, percent-clipped=0.1 +2024-08-06 21:29:32,273 INFO [trainer.py:765] (2/8) Epoch 32, batch 200, train_loss[loss=3.316, NarTop10Accuracy=0.6661, over 6816.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7128, over 3854.05 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 21:30:05,279 INFO [trainer.py:765] (2/8) Epoch 32, batch 300, train_loss[loss=3.119, NarTop10Accuracy=0.7026, over 6903.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7152, over 4646.00 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 21:30:34,104 INFO [trainer.py:765] (2/8) Epoch 32, batch 400, train_loss[loss=2.849, NarTop10Accuracy=0.7593, over 5073.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7123, over 5113.84 frames. ], batch size: 7, lr: 2.68e-03 +2024-08-06 21:31:13,531 INFO [trainer.py:765] (2/8) Epoch 32, batch 500, train_loss[loss=3.061, NarTop10Accuracy=0.7147, over 6183.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7139, over 5402.18 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 21:31:42,487 INFO [trainer.py:765] (2/8) Epoch 32, batch 600, train_loss[loss=3.235, NarTop10Accuracy=0.679, over 5640.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7148, over 5669.69 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 21:32:17,029 INFO [trainer.py:765] (2/8) Epoch 32, batch 700, train_loss[loss=2.679, NarTop10Accuracy=0.7871, over 5085.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7164, over 5735.72 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 21:33:00,647 INFO [trainer.py:765] (2/8) Epoch 32, batch 800, train_loss[loss=3.141, NarTop10Accuracy=0.6967, over 4326.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7165, over 5784.57 frames. ], batch size: 5, lr: 2.67e-03 +2024-08-06 21:33:28,992 INFO [trainer.py:765] (2/8) Epoch 32, batch 900, train_loss[loss=2.826, NarTop10Accuracy=0.7581, over 6828.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7193, over 5808.31 frames. ], batch size: 14, lr: 2.67e-03 +2024-08-06 21:34:04,050 INFO [trainer.py:765] (2/8) Epoch 32, batch 1000, train_loss[loss=3.206, NarTop10Accuracy=0.6873, over 6816.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7173, over 5913.35 frames. ], batch size: 14, lr: 2.67e-03 +2024-08-06 21:34:46,674 INFO [trainer.py:765] (2/8) Epoch 32, batch 1100, train_loss[loss=3.14, NarTop10Accuracy=0.6926, over 6840.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7165, over 5926.10 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 21:35:18,171 INFO [trainer.py:765] (2/8) Epoch 32, batch 1200, train_loss[loss=3.223, NarTop10Accuracy=0.6718, over 7071.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7148, over 5917.94 frames. ], batch size: 31, lr: 2.66e-03 +2024-08-06 21:35:52,801 INFO [trainer.py:765] (2/8) Epoch 32, batch 1300, train_loss[loss=3.102, NarTop10Accuracy=0.7032, over 5154.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7141, over 5991.44 frames. ], batch size: 6, lr: 2.66e-03 +2024-08-06 21:36:29,479 INFO [trainer.py:765] (2/8) Epoch 32, batch 1400, train_loss[loss=3.256, NarTop10Accuracy=0.6814, over 6144.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7138, over 6009.85 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 21:37:04,734 INFO [trainer.py:765] (2/8) Epoch 32, batch 1500, train_loss[loss=3.422, NarTop10Accuracy=0.6399, over 5925.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7137, over 5934.07 frames. ], batch size: 51, lr: 2.66e-03 +2024-08-06 21:37:32,522 INFO [trainer.py:765] (2/8) Epoch 32, batch 1600, train_loss[loss=3.155, NarTop10Accuracy=0.6986, over 7083.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7143, over 5929.83 frames. ], batch size: 22, lr: 2.66e-03 +2024-08-06 21:37:59,160 INFO [trainer.py:765] (2/8) Epoch 32, batch 1700, train_loss[loss=3.024, NarTop10Accuracy=0.718, over 6303.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7149, over 5910.62 frames. ], batch size: 13, lr: 2.65e-03 +2024-08-06 21:38:25,703 INFO [trainer.py:765] (2/8) Epoch 32, batch 1800, train_loss[loss=3.025, NarTop10Accuracy=0.7188, over 6987.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7147, over 5979.91 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 21:38:52,169 INFO [trainer.py:765] (2/8) Epoch 32, batch 1900, train_loss[loss=3.164, NarTop10Accuracy=0.6985, over 6420.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7105, over 6039.26 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 21:39:17,769 INFO [trainer.py:765] (2/8) Epoch 32, batch 2000, train_loss[loss=3.483, NarTop10Accuracy=0.6266, over 6072.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7134, over 6015.13 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 21:39:43,179 INFO [trainer.py:765] (2/8) Epoch 32, batch 2100, train_loss[loss=2.987, NarTop10Accuracy=0.7333, over 3855.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7147, over 5980.65 frames. ], batch size: 4, lr: 2.65e-03 +2024-08-06 21:39:54,782 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 21:40:02,941 INFO [trainer.py:811] (2/8) Epoch 32, validation: loss=2.886, NarTop10Accuracy=0.7482, over 1905321.00 frames. +2024-08-06 21:40:02,942 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 21:40:03,423 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.874e+02 2.278e+02 2.449e+02 2.609e+02 8.207e+02, threshold=4.898e+02, percent-clipped=0.3 +2024-08-06 21:40:16,628 INFO [trainer.py:765] (2/8) Epoch 32, batch 2200, train_loss[loss=3.155, NarTop10Accuracy=0.7006, over 7344.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7142, over 6023.69 frames. ], batch size: 31, lr: 2.65e-03 +2024-08-06 21:40:41,717 INFO [trainer.py:765] (2/8) Epoch 32, batch 2300, train_loss[loss=3.359, NarTop10Accuracy=0.6498, over 5646.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7102, over 6018.75 frames. ], batch size: 9, lr: 2.65e-03 +2024-08-06 21:41:06,073 INFO [trainer.py:765] (2/8) Epoch 32, batch 2400, train_loss[loss=3.227, NarTop10Accuracy=0.6676, over 5055.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7151, over 5762.10 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:29,538 INFO [trainer.py:765] (2/8) Epoch 32, batch 2500, train_loss[loss=2.687, NarTop10Accuracy=0.7868, over 5124.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7224, over 5457.29 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:49,590 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 21:42:47,616 INFO [trainer.py:765] (2/8) Epoch 33, batch 100, train_loss[loss=3.083, NarTop10Accuracy=0.7096, over 7599.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7236, over 2360.28 frames. ], batch size: 32, lr: 2.60e-03 +2024-08-06 21:43:22,368 INFO [trainer.py:765] (2/8) Epoch 33, batch 200, train_loss[loss=2.689, NarTop10Accuracy=0.7838, over 6732.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.72, over 3858.56 frames. ], batch size: 17, lr: 2.60e-03 +2024-08-06 21:43:56,513 INFO [trainer.py:765] (2/8) Epoch 33, batch 300, train_loss[loss=3.422, NarTop10Accuracy=0.6297, over 7080.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7163, over 4662.91 frames. ], batch size: 22, lr: 2.60e-03 +2024-08-06 21:44:30,316 INFO [trainer.py:765] (2/8) Epoch 33, batch 400, train_loss[loss=2.744, NarTop10Accuracy=0.7798, over 5172.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.717, over 5100.40 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 21:45:02,870 INFO [trainer.py:765] (2/8) Epoch 33, batch 500, train_loss[loss=2.777, NarTop10Accuracy=0.7666, over 6114.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7212, over 5377.51 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 21:45:36,226 INFO [trainer.py:765] (2/8) Epoch 33, batch 600, train_loss[loss=3.424, NarTop10Accuracy=0.6402, over 5724.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7159, over 5663.84 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 21:46:11,317 INFO [trainer.py:765] (2/8) Epoch 33, batch 700, train_loss[loss=2.709, NarTop10Accuracy=0.7894, over 5205.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7162, over 5723.23 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:46:46,169 INFO [trainer.py:765] (2/8) Epoch 33, batch 800, train_loss[loss=2.79, NarTop10Accuracy=0.759, over 5010.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7163, over 5776.53 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:47:18,908 INFO [trainer.py:765] (2/8) Epoch 33, batch 900, train_loss[loss=3.21, NarTop10Accuracy=0.6876, over 6756.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7156, over 5789.21 frames. ], batch size: 14, lr: 2.59e-03 +2024-08-06 21:47:57,316 INFO [trainer.py:765] (2/8) Epoch 33, batch 1000, train_loss[loss=2.931, NarTop10Accuracy=0.7307, over 6240.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7142, over 5890.66 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 21:48:30,908 INFO [trainer.py:765] (2/8) Epoch 33, batch 1100, train_loss[loss=2.907, NarTop10Accuracy=0.7483, over 6897.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7108, over 5926.85 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 21:49:06,660 INFO [trainer.py:765] (2/8) Epoch 33, batch 1200, train_loss[loss=2.801, NarTop10Accuracy=0.7629, over 7026.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7129, over 5922.82 frames. ], batch size: 31, lr: 2.58e-03 +2024-08-06 21:49:42,816 INFO [trainer.py:765] (2/8) Epoch 33, batch 1300, train_loss[loss=2.988, NarTop10Accuracy=0.7322, over 4995.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7134, over 5989.17 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 21:50:17,310 INFO [trainer.py:765] (2/8) Epoch 33, batch 1400, train_loss[loss=3.211, NarTop10Accuracy=0.682, over 6396.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7129, over 6017.87 frames. ], batch size: 12, lr: 2.58e-03 +2024-08-06 21:50:45,370 INFO [trainer.py:765] (2/8) Epoch 33, batch 1500, train_loss[loss=3.048, NarTop10Accuracy=0.7148, over 6552.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7143, over 5956.77 frames. ], batch size: 50, lr: 2.58e-03 +2024-08-06 21:51:04,607 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 21:51:12,661 INFO [trainer.py:811] (2/8) Epoch 33, validation: loss=2.938, NarTop10Accuracy=0.7372, over 1905321.00 frames. +2024-08-06 21:51:12,662 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 21:51:13,180 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.834e+02 2.250e+02 2.409e+02 2.586e+02 3.975e+02, threshold=4.818e+02, percent-clipped=0.0 +2024-08-06 21:51:21,261 INFO [trainer.py:765] (2/8) Epoch 33, batch 1600, train_loss[loss=3.166, NarTop10Accuracy=0.6969, over 6987.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7169, over 5941.64 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:51:47,923 INFO [trainer.py:765] (2/8) Epoch 33, batch 1700, train_loss[loss=2.771, NarTop10Accuracy=0.7623, over 6156.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7143, over 5937.39 frames. ], batch size: 13, lr: 2.57e-03 +2024-08-06 21:52:14,392 INFO [trainer.py:765] (2/8) Epoch 33, batch 1800, train_loss[loss=2.857, NarTop10Accuracy=0.7602, over 7086.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7152, over 5990.51 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:52:40,855 INFO [trainer.py:765] (2/8) Epoch 33, batch 1900, train_loss[loss=3.534, NarTop10Accuracy=0.6189, over 6396.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7117, over 6043.32 frames. ], batch size: 50, lr: 2.57e-03 +2024-08-06 21:53:06,352 INFO [trainer.py:765] (2/8) Epoch 33, batch 2000, train_loss[loss=3.444, NarTop10Accuracy=0.6336, over 6423.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7173, over 5996.07 frames. ], batch size: 50, lr: 2.57e-03 +2024-08-06 21:53:31,658 INFO [trainer.py:765] (2/8) Epoch 33, batch 2100, train_loss[loss=3.493, NarTop10Accuracy=0.6223, over 4707.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7158, over 5993.98 frames. ], batch size: 5, lr: 2.57e-03 +2024-08-06 21:53:56,890 INFO [trainer.py:765] (2/8) Epoch 33, batch 2200, train_loss[loss=3.329, NarTop10Accuracy=0.6475, over 7272.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7146, over 6020.29 frames. ], batch size: 31, lr: 2.57e-03 +2024-08-06 21:54:21,990 INFO [trainer.py:765] (2/8) Epoch 33, batch 2300, train_loss[loss=2.896, NarTop10Accuracy=0.7481, over 5733.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7157, over 6033.06 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 21:54:46,429 INFO [trainer.py:765] (2/8) Epoch 33, batch 2400, train_loss[loss=2.836, NarTop10Accuracy=0.7597, over 5112.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7185, over 5781.68 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:09,862 INFO [trainer.py:765] (2/8) Epoch 33, batch 2500, train_loss[loss=2.688, NarTop10Accuracy=0.7867, over 4998.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7235, over 5475.20 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:29,915 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 21:56:24,721 INFO [trainer.py:765] (2/8) Epoch 34, batch 100, train_loss[loss=3.382, NarTop10Accuracy=0.6476, over 7245.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7172, over 2368.84 frames. ], batch size: 31, lr: 2.52e-03 +2024-08-06 21:56:55,613 INFO [trainer.py:765] (2/8) Epoch 34, batch 200, train_loss[loss=3.249, NarTop10Accuracy=0.6778, over 6951.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7222, over 3863.61 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 21:57:31,776 INFO [trainer.py:765] (2/8) Epoch 34, batch 300, train_loss[loss=2.757, NarTop10Accuracy=0.7689, over 7104.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7188, over 4670.17 frames. ], batch size: 22, lr: 2.52e-03 +2024-08-06 21:58:02,724 INFO [trainer.py:765] (2/8) Epoch 34, batch 400, train_loss[loss=3.325, NarTop10Accuracy=0.6587, over 5073.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7224, over 5118.95 frames. ], batch size: 7, lr: 2.52e-03 +2024-08-06 21:58:34,690 INFO [trainer.py:765] (2/8) Epoch 34, batch 500, train_loss[loss=3.195, NarTop10Accuracy=0.686, over 5976.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7195, over 5401.09 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 21:59:09,616 INFO [trainer.py:765] (2/8) Epoch 34, batch 600, train_loss[loss=2.803, NarTop10Accuracy=0.7612, over 5721.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7189, over 5660.21 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 21:59:46,056 INFO [trainer.py:765] (2/8) Epoch 34, batch 700, train_loss[loss=3.131, NarTop10Accuracy=0.7014, over 5094.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7184, over 5732.83 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:17,575 INFO [trainer.py:765] (2/8) Epoch 34, batch 800, train_loss[loss=2.815, NarTop10Accuracy=0.761, over 5070.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.721, over 5792.26 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:49,874 INFO [trainer.py:765] (2/8) Epoch 34, batch 900, train_loss[loss=2.868, NarTop10Accuracy=0.7651, over 6651.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7202, over 5822.76 frames. ], batch size: 14, lr: 2.51e-03 +2024-08-06 22:01:25,342 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 22:01:33,386 INFO [trainer.py:811] (2/8) Epoch 34, validation: loss=2.9, NarTop10Accuracy=0.7444, over 1905321.00 frames. +2024-08-06 22:01:33,387 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 22:01:34,092 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.259e+02 2.434e+02 2.615e+02 5.125e+02, threshold=4.868e+02, percent-clipped=0.1 +2024-08-06 22:01:35,625 INFO [trainer.py:765] (2/8) Epoch 34, batch 1000, train_loss[loss=3.226, NarTop10Accuracy=0.6779, over 6651.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7176, over 5914.66 frames. ], batch size: 14, lr: 2.51e-03 +2024-08-06 22:02:10,829 INFO [trainer.py:765] (2/8) Epoch 34, batch 1100, train_loss[loss=3.177, NarTop10Accuracy=0.6906, over 6822.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7173, over 5939.80 frames. ], batch size: 17, lr: 2.51e-03 +2024-08-06 22:02:46,786 INFO [trainer.py:765] (2/8) Epoch 34, batch 1200, train_loss[loss=2.89, NarTop10Accuracy=0.7534, over 7302.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7168, over 5928.57 frames. ], batch size: 31, lr: 2.50e-03 +2024-08-06 22:03:20,814 INFO [trainer.py:765] (2/8) Epoch 34, batch 1300, train_loss[loss=2.795, NarTop10Accuracy=0.7805, over 5085.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7164, over 6001.04 frames. ], batch size: 6, lr: 2.50e-03 +2024-08-06 22:03:52,950 INFO [trainer.py:765] (2/8) Epoch 34, batch 1400, train_loss[loss=3.151, NarTop10Accuracy=0.6864, over 5967.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7167, over 6007.26 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 22:04:20,823 INFO [trainer.py:765] (2/8) Epoch 34, batch 1500, train_loss[loss=3.081, NarTop10Accuracy=0.7109, over 5268.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7176, over 5938.22 frames. ], batch size: 51, lr: 2.50e-03 +2024-08-06 22:04:48,600 INFO [trainer.py:765] (2/8) Epoch 34, batch 1600, train_loss[loss=3.004, NarTop10Accuracy=0.7204, over 6873.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7164, over 5947.06 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 22:05:15,241 INFO [trainer.py:765] (2/8) Epoch 34, batch 1700, train_loss[loss=3.027, NarTop10Accuracy=0.714, over 6615.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7185, over 5927.82 frames. ], batch size: 14, lr: 2.50e-03 +2024-08-06 22:05:41,721 INFO [trainer.py:765] (2/8) Epoch 34, batch 1800, train_loss[loss=3.376, NarTop10Accuracy=0.6391, over 6999.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7162, over 5995.50 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 22:06:08,207 INFO [trainer.py:765] (2/8) Epoch 34, batch 1900, train_loss[loss=3.09, NarTop10Accuracy=0.7074, over 6078.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7118, over 6033.33 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 22:06:33,770 INFO [trainer.py:765] (2/8) Epoch 34, batch 2000, train_loss[loss=3.127, NarTop10Accuracy=0.6987, over 6258.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7151, over 5993.78 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 22:06:59,126 INFO [trainer.py:765] (2/8) Epoch 34, batch 2100, train_loss[loss=3.406, NarTop10Accuracy=0.6368, over 4944.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7121, over 5980.02 frames. ], batch size: 5, lr: 2.49e-03 +2024-08-06 22:07:24,398 INFO [trainer.py:765] (2/8) Epoch 34, batch 2200, train_loss[loss=2.894, NarTop10Accuracy=0.7523, over 7422.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7123, over 6015.38 frames. ], batch size: 31, lr: 2.49e-03 +2024-08-06 22:07:49,535 INFO [trainer.py:765] (2/8) Epoch 34, batch 2300, train_loss[loss=2.844, NarTop10Accuracy=0.7634, over 5634.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7105, over 6032.38 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 22:08:14,059 INFO [trainer.py:765] (2/8) Epoch 34, batch 2400, train_loss[loss=3.262, NarTop10Accuracy=0.6716, over 5040.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7122, over 5769.39 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:37,648 INFO [trainer.py:765] (2/8) Epoch 34, batch 2500, train_loss[loss=2.731, NarTop10Accuracy=0.7907, over 5220.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7184, over 5478.16 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:57,693 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 22:09:52,641 INFO [trainer.py:765] (2/8) Epoch 35, batch 100, train_loss[loss=2.824, NarTop10Accuracy=0.7589, over 7200.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7161, over 2373.37 frames. ], batch size: 31, lr: 2.45e-03 +2024-08-06 22:10:29,698 INFO [trainer.py:765] (2/8) Epoch 35, batch 200, train_loss[loss=3.16, NarTop10Accuracy=0.6874, over 6813.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7132, over 3877.94 frames. ], batch size: 17, lr: 2.45e-03 +2024-08-06 22:11:04,942 INFO [trainer.py:765] (2/8) Epoch 35, batch 300, train_loss[loss=2.88, NarTop10Accuracy=0.7658, over 7071.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7187, over 4687.87 frames. ], batch size: 22, lr: 2.44e-03 +2024-08-06 22:11:35,333 INFO [trainer.py:765] (2/8) Epoch 35, batch 400, train_loss[loss=2.961, NarTop10Accuracy=0.7394, over 5220.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7194, over 5126.40 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 22:11:40,048 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 22:11:48,129 INFO [trainer.py:811] (2/8) Epoch 35, validation: loss=2.84, NarTop10Accuracy=0.7576, over 1905321.00 frames. +2024-08-06 22:11:48,130 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 22:11:48,702 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.275e+02 2.426e+02 2.615e+02 4.095e+02, threshold=4.852e+02, percent-clipped=0.0 +2024-08-06 22:12:17,723 INFO [trainer.py:765] (2/8) Epoch 35, batch 500, train_loss[loss=2.766, NarTop10Accuracy=0.7695, over 6036.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7208, over 5392.21 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 22:12:51,424 INFO [trainer.py:765] (2/8) Epoch 35, batch 600, train_loss[loss=3.32, NarTop10Accuracy=0.6623, over 5691.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7175, over 5649.78 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 22:13:24,940 INFO [trainer.py:765] (2/8) Epoch 35, batch 700, train_loss[loss=2.647, NarTop10Accuracy=0.7904, over 5181.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7177, over 5732.09 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 22:14:01,383 INFO [trainer.py:765] (2/8) Epoch 35, batch 800, train_loss[loss=2.708, NarTop10Accuracy=0.7824, over 4329.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7167, over 5803.49 frames. ], batch size: 5, lr: 2.44e-03 +2024-08-06 22:14:34,372 INFO [trainer.py:765] (2/8) Epoch 35, batch 900, train_loss[loss=3.25, NarTop10Accuracy=0.6768, over 6159.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7197, over 5815.07 frames. ], batch size: 13, lr: 2.44e-03 +2024-08-06 22:15:09,372 INFO [trainer.py:765] (2/8) Epoch 35, batch 1000, train_loss[loss=2.814, NarTop10Accuracy=0.7696, over 6246.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7177, over 5910.13 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 22:15:48,495 INFO [trainer.py:765] (2/8) Epoch 35, batch 1100, train_loss[loss=2.963, NarTop10Accuracy=0.7186, over 6795.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7164, over 5946.16 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 22:16:22,484 INFO [trainer.py:765] (2/8) Epoch 35, batch 1200, train_loss[loss=2.925, NarTop10Accuracy=0.7415, over 7170.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7196, over 5931.71 frames. ], batch size: 31, lr: 2.43e-03 +2024-08-06 22:16:57,060 INFO [trainer.py:765] (2/8) Epoch 35, batch 1300, train_loss[loss=2.792, NarTop10Accuracy=0.7638, over 5010.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7213, over 5998.76 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 22:17:31,060 INFO [trainer.py:765] (2/8) Epoch 35, batch 1400, train_loss[loss=3.109, NarTop10Accuracy=0.7106, over 6093.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7192, over 6026.50 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 22:18:03,062 INFO [trainer.py:765] (2/8) Epoch 35, batch 1500, train_loss[loss=3.129, NarTop10Accuracy=0.7077, over 5793.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7179, over 5961.97 frames. ], batch size: 51, lr: 2.43e-03 +2024-08-06 22:18:30,728 INFO [trainer.py:765] (2/8) Epoch 35, batch 1600, train_loss[loss=2.824, NarTop10Accuracy=0.7621, over 7143.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7159, over 5939.97 frames. ], batch size: 22, lr: 2.43e-03 +2024-08-06 22:18:57,320 INFO [trainer.py:765] (2/8) Epoch 35, batch 1700, train_loss[loss=2.967, NarTop10Accuracy=0.7339, over 6627.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7145, over 5903.24 frames. ], batch size: 14, lr: 2.42e-03 +2024-08-06 22:19:23,702 INFO [trainer.py:765] (2/8) Epoch 35, batch 1800, train_loss[loss=3.434, NarTop10Accuracy=0.6349, over 7224.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7152, over 5973.46 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 22:19:50,201 INFO [trainer.py:765] (2/8) Epoch 35, batch 1900, train_loss[loss=3.18, NarTop10Accuracy=0.6996, over 6177.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7153, over 6013.66 frames. ], batch size: 50, lr: 2.42e-03 +2024-08-06 22:20:15,762 INFO [trainer.py:765] (2/8) Epoch 35, batch 2000, train_loss[loss=3.102, NarTop10Accuracy=0.7069, over 6309.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7159, over 5997.80 frames. ], batch size: 50, lr: 2.42e-03 +2024-08-06 22:20:41,045 INFO [trainer.py:765] (2/8) Epoch 35, batch 2100, train_loss[loss=2.63, NarTop10Accuracy=0.7997, over 3960.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7158, over 5967.81 frames. ], batch size: 4, lr: 2.42e-03 +2024-08-06 22:21:06,226 INFO [trainer.py:765] (2/8) Epoch 35, batch 2200, train_loss[loss=2.932, NarTop10Accuracy=0.7485, over 7317.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7141, over 6002.21 frames. ], batch size: 31, lr: 2.42e-03 +2024-08-06 22:21:31,286 INFO [trainer.py:765] (2/8) Epoch 35, batch 2300, train_loss[loss=3.015, NarTop10Accuracy=0.7213, over 5742.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7142, over 6026.60 frames. ], batch size: 9, lr: 2.42e-03 +2024-08-06 22:21:55,648 INFO [trainer.py:765] (2/8) Epoch 35, batch 2400, train_loss[loss=3.334, NarTop10Accuracy=0.6639, over 5148.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7157, over 5776.63 frames. ], batch size: 7, lr: 2.42e-03 +2024-08-06 22:21:59,681 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 22:22:07,656 INFO [trainer.py:811] (2/8) Epoch 35, validation: loss=2.905, NarTop10Accuracy=0.7437, over 1905321.00 frames. +2024-08-06 22:22:07,657 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 22:22:08,116 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.895e+02 2.316e+02 2.462e+02 2.653e+02 5.566e+02, threshold=4.923e+02, percent-clipped=0.1 +2024-08-06 22:22:27,128 INFO [trainer.py:765] (2/8) Epoch 35, batch 2500, train_loss[loss=3.12, NarTop10Accuracy=0.7022, over 5124.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.719, over 5470.61 frames. ], batch size: 7, lr: 2.41e-03 +2024-08-06 22:22:46,976 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 22:23:47,172 INFO [trainer.py:765] (2/8) Epoch 36, batch 100, train_loss[loss=3.059, NarTop10Accuracy=0.7112, over 7449.00 frames. ], tot_loss[loss=2.99, NarTop10Accuracy=0.7275, over 2366.89 frames. ], batch size: 31, lr: 2.38e-03 +2024-08-06 22:24:22,494 INFO [trainer.py:765] (2/8) Epoch 36, batch 200, train_loss[loss=2.862, NarTop10Accuracy=0.752, over 6792.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7223, over 3850.65 frames. ], batch size: 17, lr: 2.38e-03 +2024-08-06 22:24:54,721 INFO [trainer.py:765] (2/8) Epoch 36, batch 300, train_loss[loss=3.261, NarTop10Accuracy=0.6726, over 7017.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7209, over 4646.74 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 22:25:29,276 INFO [trainer.py:765] (2/8) Epoch 36, batch 400, train_loss[loss=2.947, NarTop10Accuracy=0.735, over 5049.00 frames. ], tot_loss[loss=3.011, NarTop10Accuracy=0.723, over 5091.55 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 22:26:01,818 INFO [trainer.py:765] (2/8) Epoch 36, batch 500, train_loss[loss=3.254, NarTop10Accuracy=0.6678, over 6138.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.722, over 5361.46 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 22:26:35,026 INFO [trainer.py:765] (2/8) Epoch 36, batch 600, train_loss[loss=2.978, NarTop10Accuracy=0.7292, over 5733.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7219, over 5640.98 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 22:27:10,990 INFO [trainer.py:765] (2/8) Epoch 36, batch 700, train_loss[loss=3.176, NarTop10Accuracy=0.6908, over 5034.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7215, over 5699.94 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 22:27:44,914 INFO [trainer.py:765] (2/8) Epoch 36, batch 800, train_loss[loss=3.323, NarTop10Accuracy=0.6503, over 5070.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7188, over 5751.03 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 22:28:17,811 INFO [trainer.py:765] (2/8) Epoch 36, batch 900, train_loss[loss=2.815, NarTop10Accuracy=0.7695, over 6546.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7214, over 5798.99 frames. ], batch size: 14, lr: 2.37e-03 +2024-08-06 22:28:56,983 INFO [trainer.py:765] (2/8) Epoch 36, batch 1000, train_loss[loss=3.335, NarTop10Accuracy=0.6523, over 6078.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7191, over 5908.23 frames. ], batch size: 13, lr: 2.37e-03 +2024-08-06 22:29:29,364 INFO [trainer.py:765] (2/8) Epoch 36, batch 1100, train_loss[loss=2.853, NarTop10Accuracy=0.7586, over 6819.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7187, over 5931.98 frames. ], batch size: 17, lr: 2.36e-03 +2024-08-06 22:30:05,681 INFO [trainer.py:765] (2/8) Epoch 36, batch 1200, train_loss[loss=2.997, NarTop10Accuracy=0.7194, over 7434.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7197, over 5934.57 frames. ], batch size: 32, lr: 2.36e-03 +2024-08-06 22:30:42,575 INFO [trainer.py:765] (2/8) Epoch 36, batch 1300, train_loss[loss=2.823, NarTop10Accuracy=0.7557, over 5058.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7196, over 5988.09 frames. ], batch size: 6, lr: 2.36e-03 +2024-08-06 22:31:15,938 INFO [trainer.py:765] (2/8) Epoch 36, batch 1400, train_loss[loss=3.078, NarTop10Accuracy=0.7089, over 6087.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7212, over 5993.17 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 22:31:43,748 INFO [trainer.py:765] (2/8) Epoch 36, batch 1500, train_loss[loss=3.422, NarTop10Accuracy=0.6427, over 5958.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7202, over 5929.60 frames. ], batch size: 50, lr: 2.36e-03 +2024-08-06 22:32:11,459 INFO [trainer.py:765] (2/8) Epoch 36, batch 1600, train_loss[loss=3.318, NarTop10Accuracy=0.6644, over 7089.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7201, over 5928.54 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 22:32:38,108 INFO [trainer.py:765] (2/8) Epoch 36, batch 1700, train_loss[loss=3.259, NarTop10Accuracy=0.6635, over 6630.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7186, over 5917.05 frames. ], batch size: 14, lr: 2.36e-03 +2024-08-06 22:33:04,554 INFO [trainer.py:765] (2/8) Epoch 36, batch 1800, train_loss[loss=3.272, NarTop10Accuracy=0.6691, over 6834.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7186, over 5985.46 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 22:33:15,169 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 22:33:23,567 INFO [trainer.py:811] (2/8) Epoch 36, validation: loss=2.897, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 22:33:23,568 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 22:33:24,096 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.876e+02 2.309e+02 2.476e+02 2.664e+02 4.811e+02, threshold=4.951e+02, percent-clipped=0.0 +2024-08-06 22:33:39,456 INFO [trainer.py:765] (2/8) Epoch 36, batch 1900, train_loss[loss=2.958, NarTop10Accuracy=0.7315, over 6096.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7176, over 6045.77 frames. ], batch size: 53, lr: 2.35e-03 +2024-08-06 22:34:05,077 INFO [trainer.py:765] (2/8) Epoch 36, batch 2000, train_loss[loss=3.217, NarTop10Accuracy=0.6904, over 6597.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7181, over 6021.83 frames. ], batch size: 50, lr: 2.35e-03 +2024-08-06 22:34:30,514 INFO [trainer.py:765] (2/8) Epoch 36, batch 2100, train_loss[loss=2.688, NarTop10Accuracy=0.7829, over 4713.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7194, over 5995.24 frames. ], batch size: 5, lr: 2.35e-03 +2024-08-06 22:34:55,938 INFO [trainer.py:765] (2/8) Epoch 36, batch 2200, train_loss[loss=3.418, NarTop10Accuracy=0.641, over 7068.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7152, over 6031.13 frames. ], batch size: 31, lr: 2.35e-03 +2024-08-06 22:35:21,145 INFO [trainer.py:765] (2/8) Epoch 36, batch 2300, train_loss[loss=3.357, NarTop10Accuracy=0.6531, over 5637.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7133, over 6032.18 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 22:35:45,600 INFO [trainer.py:765] (2/8) Epoch 36, batch 2400, train_loss[loss=3.27, NarTop10Accuracy=0.675, over 5187.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.716, over 5801.87 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:09,182 INFO [trainer.py:765] (2/8) Epoch 36, batch 2500, train_loss[loss=2.675, NarTop10Accuracy=0.7842, over 5268.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7204, over 5503.40 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:28,955 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 22:37:29,724 INFO [trainer.py:765] (2/8) Epoch 37, batch 100, train_loss[loss=2.895, NarTop10Accuracy=0.7505, over 7128.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7152, over 2366.79 frames. ], batch size: 31, lr: 2.31e-03 +2024-08-06 22:38:01,272 INFO [trainer.py:765] (2/8) Epoch 37, batch 200, train_loss[loss=2.761, NarTop10Accuracy=0.774, over 6750.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7193, over 3855.18 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 22:38:35,956 INFO [trainer.py:765] (2/8) Epoch 37, batch 300, train_loss[loss=3.075, NarTop10Accuracy=0.7024, over 7068.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7202, over 4653.94 frames. ], batch size: 22, lr: 2.31e-03 +2024-08-06 22:39:09,306 INFO [trainer.py:765] (2/8) Epoch 37, batch 400, train_loss[loss=2.666, NarTop10Accuracy=0.7896, over 5097.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.723, over 5105.61 frames. ], batch size: 7, lr: 2.31e-03 +2024-08-06 22:39:43,860 INFO [trainer.py:765] (2/8) Epoch 37, batch 500, train_loss[loss=3.337, NarTop10Accuracy=0.6568, over 6111.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7231, over 5387.56 frames. ], batch size: 11, lr: 2.31e-03 +2024-08-06 22:40:17,332 INFO [trainer.py:765] (2/8) Epoch 37, batch 600, train_loss[loss=2.704, NarTop10Accuracy=0.793, over 5715.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7221, over 5642.39 frames. ], batch size: 9, lr: 2.31e-03 +2024-08-06 22:40:51,615 INFO [trainer.py:765] (2/8) Epoch 37, batch 700, train_loss[loss=3.204, NarTop10Accuracy=0.6841, over 4299.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7172, over 5714.21 frames. ], batch size: 5, lr: 2.30e-03 +2024-08-06 22:41:30,564 INFO [trainer.py:765] (2/8) Epoch 37, batch 800, train_loss[loss=2.755, NarTop10Accuracy=0.7697, over 5145.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7168, over 5761.36 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:41:59,082 INFO [trainer.py:765] (2/8) Epoch 37, batch 900, train_loss[loss=2.977, NarTop10Accuracy=0.7258, over 6687.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7208, over 5804.59 frames. ], batch size: 14, lr: 2.30e-03 +2024-08-06 22:42:38,267 INFO [trainer.py:765] (2/8) Epoch 37, batch 1000, train_loss[loss=3.253, NarTop10Accuracy=0.6706, over 6570.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7183, over 5898.84 frames. ], batch size: 14, lr: 2.30e-03 +2024-08-06 22:43:15,906 INFO [trainer.py:765] (2/8) Epoch 37, batch 1100, train_loss[loss=2.964, NarTop10Accuracy=0.7328, over 7071.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7171, over 5933.70 frames. ], batch size: 18, lr: 2.30e-03 +2024-08-06 22:43:47,739 INFO [trainer.py:765] (2/8) Epoch 37, batch 1200, train_loss[loss=2.919, NarTop10Accuracy=0.7424, over 7563.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7172, over 5924.00 frames. ], batch size: 31, lr: 2.30e-03 +2024-08-06 22:44:11,753 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 22:44:20,075 INFO [trainer.py:811] (2/8) Epoch 37, validation: loss=2.92, NarTop10Accuracy=0.7407, over 1905321.00 frames. +2024-08-06 22:44:20,076 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 22:44:20,606 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.887e+02 2.309e+02 2.481e+02 2.647e+02 8.766e+02, threshold=4.961e+02, percent-clipped=0.1 +2024-08-06 22:44:32,783 INFO [trainer.py:765] (2/8) Epoch 37, batch 1300, train_loss[loss=2.699, NarTop10Accuracy=0.7833, over 5064.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7208, over 5998.50 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:45:10,387 INFO [trainer.py:765] (2/8) Epoch 37, batch 1400, train_loss[loss=2.767, NarTop10Accuracy=0.7826, over 6066.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7217, over 6013.38 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 22:45:40,511 INFO [trainer.py:765] (2/8) Epoch 37, batch 1500, train_loss[loss=2.976, NarTop10Accuracy=0.734, over 5655.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7189, over 5947.98 frames. ], batch size: 51, lr: 2.29e-03 +2024-08-06 22:46:08,437 INFO [trainer.py:765] (2/8) Epoch 37, batch 1600, train_loss[loss=3.313, NarTop10Accuracy=0.6631, over 7131.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7169, over 5924.64 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 22:46:35,186 INFO [trainer.py:765] (2/8) Epoch 37, batch 1700, train_loss[loss=3.323, NarTop10Accuracy=0.6567, over 6189.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7178, over 5900.92 frames. ], batch size: 13, lr: 2.29e-03 +2024-08-06 22:47:01,792 INFO [trainer.py:765] (2/8) Epoch 37, batch 1800, train_loss[loss=2.9, NarTop10Accuracy=0.7436, over 6939.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7186, over 5975.70 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 22:47:28,311 INFO [trainer.py:765] (2/8) Epoch 37, batch 1900, train_loss[loss=3.105, NarTop10Accuracy=0.7033, over 6480.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7187, over 6002.84 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:47:53,924 INFO [trainer.py:765] (2/8) Epoch 37, batch 2000, train_loss[loss=3.217, NarTop10Accuracy=0.6858, over 6024.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7188, over 5989.86 frames. ], batch size: 54, lr: 2.29e-03 +2024-08-06 22:48:19,325 INFO [trainer.py:765] (2/8) Epoch 37, batch 2100, train_loss[loss=2.945, NarTop10Accuracy=0.7345, over 5034.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7187, over 5972.78 frames. ], batch size: 5, lr: 2.29e-03 +2024-08-06 22:48:44,707 INFO [trainer.py:765] (2/8) Epoch 37, batch 2200, train_loss[loss=2.914, NarTop10Accuracy=0.7458, over 7257.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7171, over 6026.64 frames. ], batch size: 31, lr: 2.29e-03 +2024-08-06 22:49:09,912 INFO [trainer.py:765] (2/8) Epoch 37, batch 2300, train_loss[loss=2.846, NarTop10Accuracy=0.7625, over 5781.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7172, over 6043.25 frames. ], batch size: 9, lr: 2.29e-03 +2024-08-06 22:49:34,318 INFO [trainer.py:765] (2/8) Epoch 37, batch 2400, train_loss[loss=3.272, NarTop10Accuracy=0.6744, over 5070.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7194, over 5778.10 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:49:57,860 INFO [trainer.py:765] (2/8) Epoch 37, batch 2500, train_loss[loss=3.113, NarTop10Accuracy=0.6913, over 5166.00 frames. ], tot_loss[loss=2.993, NarTop10Accuracy=0.7267, over 5490.62 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:50:17,816 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 22:51:16,152 INFO [trainer.py:765] (2/8) Epoch 38, batch 100, train_loss[loss=2.951, NarTop10Accuracy=0.7369, over 7443.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7232, over 2363.44 frames. ], batch size: 31, lr: 2.25e-03 +2024-08-06 22:51:53,014 INFO [trainer.py:765] (2/8) Epoch 38, batch 200, train_loss[loss=3.276, NarTop10Accuracy=0.6712, over 6789.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7218, over 3867.62 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 22:52:25,202 INFO [trainer.py:765] (2/8) Epoch 38, batch 300, train_loss[loss=2.959, NarTop10Accuracy=0.7384, over 7011.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7184, over 4666.06 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 22:52:55,627 INFO [trainer.py:765] (2/8) Epoch 38, batch 400, train_loss[loss=3.168, NarTop10Accuracy=0.6886, over 5052.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.721, over 5114.42 frames. ], batch size: 7, lr: 2.25e-03 +2024-08-06 22:53:32,229 INFO [trainer.py:765] (2/8) Epoch 38, batch 500, train_loss[loss=2.856, NarTop10Accuracy=0.7572, over 6042.00 frames. ], tot_loss[loss=2.982, NarTop10Accuracy=0.7288, over 5398.97 frames. ], batch size: 11, lr: 2.25e-03 +2024-08-06 22:54:05,497 INFO [trainer.py:765] (2/8) Epoch 38, batch 600, train_loss[loss=3.1, NarTop10Accuracy=0.7069, over 5877.00 frames. ], tot_loss[loss=3.001, NarTop10Accuracy=0.7251, over 5651.15 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 22:54:36,002 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 22:54:43,918 INFO [trainer.py:811] (2/8) Epoch 38, validation: loss=2.939, NarTop10Accuracy=0.7369, over 1905321.00 frames. +2024-08-06 22:54:43,919 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 22:54:44,427 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.313e+02 2.478e+02 2.663e+02 7.254e+02, threshold=4.957e+02, percent-clipped=0.3 +2024-08-06 22:54:46,657 INFO [trainer.py:765] (2/8) Epoch 38, batch 700, train_loss[loss=2.818, NarTop10Accuracy=0.7523, over 5004.00 frames. ], tot_loss[loss=3.003, NarTop10Accuracy=0.7249, over 5721.99 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:24,937 INFO [trainer.py:765] (2/8) Epoch 38, batch 800, train_loss[loss=2.869, NarTop10Accuracy=0.7538, over 5139.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7213, over 5777.74 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:59,704 INFO [trainer.py:765] (2/8) Epoch 38, batch 900, train_loss[loss=2.836, NarTop10Accuracy=0.758, over 6291.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7219, over 5785.97 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 22:56:32,090 INFO [trainer.py:765] (2/8) Epoch 38, batch 1000, train_loss[loss=3.257, NarTop10Accuracy=0.6679, over 6045.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7212, over 5891.53 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 22:57:08,990 INFO [trainer.py:765] (2/8) Epoch 38, batch 1100, train_loss[loss=3.132, NarTop10Accuracy=0.6934, over 6984.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7175, over 5914.24 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 22:57:42,661 INFO [trainer.py:765] (2/8) Epoch 38, batch 1200, train_loss[loss=2.889, NarTop10Accuracy=0.7539, over 7182.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7184, over 5896.53 frames. ], batch size: 31, lr: 2.24e-03 +2024-08-06 22:58:16,545 INFO [trainer.py:765] (2/8) Epoch 38, batch 1300, train_loss[loss=3.008, NarTop10Accuracy=0.6956, over 5013.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7176, over 5972.42 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:58:49,810 INFO [trainer.py:765] (2/8) Epoch 38, batch 1400, train_loss[loss=2.921, NarTop10Accuracy=0.7486, over 6102.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7141, over 5984.32 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 22:59:22,853 INFO [trainer.py:765] (2/8) Epoch 38, batch 1500, train_loss[loss=3.568, NarTop10Accuracy=0.6108, over 6207.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7183, over 5914.30 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 22:59:50,644 INFO [trainer.py:765] (2/8) Epoch 38, batch 1600, train_loss[loss=3.301, NarTop10Accuracy=0.6616, over 7074.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7171, over 5905.42 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 23:00:17,315 INFO [trainer.py:765] (2/8) Epoch 38, batch 1700, train_loss[loss=3.039, NarTop10Accuracy=0.7213, over 6063.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7143, over 5910.93 frames. ], batch size: 13, lr: 2.23e-03 +2024-08-06 23:00:43,763 INFO [trainer.py:765] (2/8) Epoch 38, batch 1800, train_loss[loss=3.378, NarTop10Accuracy=0.6496, over 7062.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7163, over 5980.02 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 23:01:10,192 INFO [trainer.py:765] (2/8) Epoch 38, batch 1900, train_loss[loss=3.388, NarTop10Accuracy=0.6489, over 6213.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7155, over 6005.89 frames. ], batch size: 51, lr: 2.23e-03 +2024-08-06 23:01:35,681 INFO [trainer.py:765] (2/8) Epoch 38, batch 2000, train_loss[loss=3.268, NarTop10Accuracy=0.6674, over 5799.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7153, over 5978.34 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 23:02:01,050 INFO [trainer.py:765] (2/8) Epoch 38, batch 2100, train_loss[loss=2.891, NarTop10Accuracy=0.7414, over 3936.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7168, over 5954.39 frames. ], batch size: 4, lr: 2.23e-03 +2024-08-06 23:02:26,314 INFO [trainer.py:765] (2/8) Epoch 38, batch 2200, train_loss[loss=2.945, NarTop10Accuracy=0.735, over 7266.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7169, over 6000.37 frames. ], batch size: 31, lr: 2.23e-03 +2024-08-06 23:02:51,419 INFO [trainer.py:765] (2/8) Epoch 38, batch 2300, train_loss[loss=2.762, NarTop10Accuracy=0.7761, over 5754.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7162, over 6009.26 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 23:03:16,348 INFO [trainer.py:765] (2/8) Epoch 38, batch 2400, train_loss[loss=2.682, NarTop10Accuracy=0.7911, over 5142.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7182, over 5775.12 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:39,824 INFO [trainer.py:765] (2/8) Epoch 38, batch 2500, train_loss[loss=3.26, NarTop10Accuracy=0.6765, over 5130.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.723, over 5487.08 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:59,318 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 23:04:58,940 INFO [trainer.py:765] (2/8) Epoch 39, batch 100, train_loss[loss=3.394, NarTop10Accuracy=0.6484, over 7224.00 frames. ], tot_loss[loss=2.987, NarTop10Accuracy=0.7281, over 2384.12 frames. ], batch size: 31, lr: 2.19e-03 +2024-08-06 23:05:03,468 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 23:05:11,563 INFO [trainer.py:811] (2/8) Epoch 39, validation: loss=2.9, NarTop10Accuracy=0.7445, over 1905321.00 frames. +2024-08-06 23:05:11,564 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 23:05:12,137 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 2.316e+02 2.500e+02 2.688e+02 4.683e+02, threshold=5.001e+02, percent-clipped=0.0 +2024-08-06 23:05:40,163 INFO [trainer.py:765] (2/8) Epoch 39, batch 200, train_loss[loss=2.803, NarTop10Accuracy=0.7724, over 6954.00 frames. ], tot_loss[loss=2.998, NarTop10Accuracy=0.7255, over 3859.08 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 23:06:17,293 INFO [trainer.py:765] (2/8) Epoch 39, batch 300, train_loss[loss=2.967, NarTop10Accuracy=0.7292, over 7140.00 frames. ], tot_loss[loss=2.99, NarTop10Accuracy=0.7275, over 4643.54 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 23:06:48,275 INFO [trainer.py:765] (2/8) Epoch 39, batch 400, train_loss[loss=2.871, NarTop10Accuracy=0.749, over 5145.00 frames. ], tot_loss[loss=2.995, NarTop10Accuracy=0.7268, over 5090.47 frames. ], batch size: 7, lr: 2.19e-03 +2024-08-06 23:07:19,175 INFO [trainer.py:765] (2/8) Epoch 39, batch 500, train_loss[loss=3.406, NarTop10Accuracy=0.6374, over 6093.00 frames. ], tot_loss[loss=2.997, NarTop10Accuracy=0.726, over 5367.16 frames. ], batch size: 11, lr: 2.19e-03 +2024-08-06 23:07:52,563 INFO [trainer.py:765] (2/8) Epoch 39, batch 600, train_loss[loss=2.764, NarTop10Accuracy=0.778, over 5709.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7231, over 5648.57 frames. ], batch size: 9, lr: 2.19e-03 +2024-08-06 23:08:33,695 INFO [trainer.py:765] (2/8) Epoch 39, batch 700, train_loss[loss=3.152, NarTop10Accuracy=0.6968, over 5202.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7199, over 5699.04 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:09:05,861 INFO [trainer.py:765] (2/8) Epoch 39, batch 800, train_loss[loss=2.592, NarTop10Accuracy=0.8068, over 5010.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.719, over 5769.04 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:09:38,866 INFO [trainer.py:765] (2/8) Epoch 39, batch 900, train_loss[loss=3.436, NarTop10Accuracy=0.6434, over 6744.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7194, over 5793.83 frames. ], batch size: 14, lr: 2.18e-03 +2024-08-06 23:10:18,460 INFO [trainer.py:765] (2/8) Epoch 39, batch 1000, train_loss[loss=2.874, NarTop10Accuracy=0.7516, over 6819.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7212, over 5904.50 frames. ], batch size: 14, lr: 2.18e-03 +2024-08-06 23:10:53,934 INFO [trainer.py:765] (2/8) Epoch 39, batch 1100, train_loss[loss=2.821, NarTop10Accuracy=0.7717, over 6762.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7182, over 5944.60 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 23:11:27,822 INFO [trainer.py:765] (2/8) Epoch 39, batch 1200, train_loss[loss=2.94, NarTop10Accuracy=0.7422, over 7035.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7209, over 5923.87 frames. ], batch size: 31, lr: 2.18e-03 +2024-08-06 23:12:07,253 INFO [trainer.py:765] (2/8) Epoch 39, batch 1300, train_loss[loss=2.783, NarTop10Accuracy=0.7743, over 5178.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7235, over 5998.58 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:12:39,301 INFO [trainer.py:765] (2/8) Epoch 39, batch 1400, train_loss[loss=3.012, NarTop10Accuracy=0.7213, over 6153.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7221, over 6037.97 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 23:13:09,756 INFO [trainer.py:765] (2/8) Epoch 39, batch 1500, train_loss[loss=3.541, NarTop10Accuracy=0.6184, over 6156.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7226, over 5971.99 frames. ], batch size: 50, lr: 2.18e-03 +2024-08-06 23:13:37,586 INFO [trainer.py:765] (2/8) Epoch 39, batch 1600, train_loss[loss=2.913, NarTop10Accuracy=0.7349, over 6975.00 frames. ], tot_loss[loss=3.005, NarTop10Accuracy=0.7245, over 5932.93 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:04,220 INFO [trainer.py:765] (2/8) Epoch 39, batch 1700, train_loss[loss=3.317, NarTop10Accuracy=0.6572, over 6711.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7177, over 5920.98 frames. ], batch size: 14, lr: 2.17e-03 +2024-08-06 23:14:30,768 INFO [trainer.py:765] (2/8) Epoch 39, batch 1800, train_loss[loss=2.834, NarTop10Accuracy=0.7585, over 7290.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.718, over 5972.47 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:57,180 INFO [trainer.py:765] (2/8) Epoch 39, batch 1900, train_loss[loss=3.022, NarTop10Accuracy=0.7265, over 6264.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7158, over 6020.54 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 23:15:22,750 INFO [trainer.py:765] (2/8) Epoch 39, batch 2000, train_loss[loss=3.234, NarTop10Accuracy=0.6689, over 5949.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7201, over 5989.24 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 23:15:48,060 INFO [trainer.py:765] (2/8) Epoch 39, batch 2100, train_loss[loss=3.242, NarTop10Accuracy=0.6708, over 4749.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7204, over 5962.05 frames. ], batch size: 5, lr: 2.17e-03 +2024-08-06 23:15:51,871 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 23:16:02,156 INFO [trainer.py:811] (2/8) Epoch 39, validation: loss=2.85, NarTop10Accuracy=0.7552, over 1905321.00 frames. +2024-08-06 23:16:02,157 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 23:16:02,645 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.369e+02 2.530e+02 2.720e+02 6.127e+02, threshold=5.059e+02, percent-clipped=0.2 +2024-08-06 23:16:23,652 INFO [trainer.py:765] (2/8) Epoch 39, batch 2200, train_loss[loss=3.154, NarTop10Accuracy=0.696, over 7155.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.72, over 6007.73 frames. ], batch size: 31, lr: 2.17e-03 +2024-08-06 23:16:48,847 INFO [trainer.py:765] (2/8) Epoch 39, batch 2300, train_loss[loss=2.78, NarTop10Accuracy=0.7764, over 5805.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7166, over 6021.53 frames. ], batch size: 9, lr: 2.17e-03 +2024-08-06 23:17:13,136 INFO [trainer.py:765] (2/8) Epoch 39, batch 2400, train_loss[loss=2.753, NarTop10Accuracy=0.7723, over 5088.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7222, over 5770.20 frames. ], batch size: 7, lr: 2.17e-03 +2024-08-06 23:17:36,712 INFO [trainer.py:765] (2/8) Epoch 39, batch 2500, train_loss[loss=2.904, NarTop10Accuracy=0.7388, over 5181.00 frames. ], tot_loss[loss=2.991, NarTop10Accuracy=0.7265, over 5475.22 frames. ], batch size: 7, lr: 2.16e-03 +2024-08-06 23:17:56,329 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 23:18:48,947 INFO [trainer.py:765] (2/8) Epoch 40, batch 100, train_loss[loss=3.043, NarTop10Accuracy=0.7177, over 7452.00 frames. ], tot_loss[loss=2.996, NarTop10Accuracy=0.7261, over 2386.02 frames. ], batch size: 33, lr: 2.14e-03 +2024-08-06 23:19:23,035 INFO [trainer.py:765] (2/8) Epoch 40, batch 200, train_loss[loss=2.743, NarTop10Accuracy=0.7797, over 6933.00 frames. ], tot_loss[loss=2.992, NarTop10Accuracy=0.7275, over 3859.19 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 23:19:57,188 INFO [trainer.py:765] (2/8) Epoch 40, batch 300, train_loss[loss=2.74, NarTop10Accuracy=0.7675, over 7275.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7223, over 4653.45 frames. ], batch size: 22, lr: 2.13e-03 +2024-08-06 23:20:30,182 INFO [trainer.py:765] (2/8) Epoch 40, batch 400, train_loss[loss=2.962, NarTop10Accuracy=0.7348, over 5055.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7233, over 5111.17 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 23:21:00,250 INFO [trainer.py:765] (2/8) Epoch 40, batch 500, train_loss[loss=2.758, NarTop10Accuracy=0.7779, over 6099.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7232, over 5378.39 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 23:21:34,880 INFO [trainer.py:765] (2/8) Epoch 40, batch 600, train_loss[loss=2.913, NarTop10Accuracy=0.7406, over 5652.00 frames. ], tot_loss[loss=2.997, NarTop10Accuracy=0.7257, over 5631.56 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 23:22:11,097 INFO [trainer.py:765] (2/8) Epoch 40, batch 700, train_loss[loss=3.067, NarTop10Accuracy=0.7095, over 5019.00 frames. ], tot_loss[loss=3.011, NarTop10Accuracy=0.7229, over 5720.15 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:22:44,753 INFO [trainer.py:765] (2/8) Epoch 40, batch 800, train_loss[loss=2.722, NarTop10Accuracy=0.7772, over 5067.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.721, over 5782.69 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:23:16,635 INFO [trainer.py:765] (2/8) Epoch 40, batch 900, train_loss[loss=3.265, NarTop10Accuracy=0.6697, over 6282.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7212, over 5802.91 frames. ], batch size: 13, lr: 2.13e-03 +2024-08-06 23:23:55,591 INFO [trainer.py:765] (2/8) Epoch 40, batch 1000, train_loss[loss=3.247, NarTop10Accuracy=0.6647, over 6261.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7192, over 5909.27 frames. ], batch size: 13, lr: 2.13e-03 +2024-08-06 23:24:30,208 INFO [trainer.py:765] (2/8) Epoch 40, batch 1100, train_loss[loss=2.768, NarTop10Accuracy=0.7715, over 6879.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7203, over 5931.54 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 23:25:03,090 INFO [trainer.py:765] (2/8) Epoch 40, batch 1200, train_loss[loss=2.867, NarTop10Accuracy=0.7521, over 7041.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.721, over 5935.84 frames. ], batch size: 31, lr: 2.12e-03 +2024-08-06 23:25:41,842 INFO [trainer.py:765] (2/8) Epoch 40, batch 1300, train_loss[loss=2.823, NarTop10Accuracy=0.7659, over 5031.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7224, over 6007.05 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 23:26:13,385 INFO [trainer.py:765] (2/8) Epoch 40, batch 1400, train_loss[loss=2.833, NarTop10Accuracy=0.7686, over 6015.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7213, over 6010.38 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 23:26:43,377 INFO [trainer.py:765] (2/8) Epoch 40, batch 1500, train_loss[loss=3.243, NarTop10Accuracy=0.6776, over 5703.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7233, over 5940.99 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:26:54,420 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 23:27:02,676 INFO [trainer.py:811] (2/8) Epoch 40, validation: loss=2.86, NarTop10Accuracy=0.7522, over 1905321.00 frames. +2024-08-06 23:27:02,677 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 30120MB +2024-08-06 23:27:03,156 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.329e+02 2.511e+02 2.723e+02 1.241e+03, threshold=5.022e+02, percent-clipped=0.2 +2024-08-06 23:27:19,381 INFO [trainer.py:765] (2/8) Epoch 40, batch 1600, train_loss[loss=2.892, NarTop10Accuracy=0.7482, over 6927.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7228, over 5933.03 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:27:46,056 INFO [trainer.py:765] (2/8) Epoch 40, batch 1700, train_loss[loss=3.315, NarTop10Accuracy=0.6573, over 6150.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7216, over 5918.01 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 23:28:12,578 INFO [trainer.py:765] (2/8) Epoch 40, batch 1800, train_loss[loss=3.08, NarTop10Accuracy=0.711, over 7260.00 frames. ], tot_loss[loss=3.002, NarTop10Accuracy=0.7244, over 5970.24 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:28:38,908 INFO [trainer.py:765] (2/8) Epoch 40, batch 1900, train_loss[loss=3.183, NarTop10Accuracy=0.6875, over 5928.00 frames. ], tot_loss[loss=3.007, NarTop10Accuracy=0.7236, over 6015.93 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:04,444 INFO [trainer.py:765] (2/8) Epoch 40, batch 2000, train_loss[loss=3.516, NarTop10Accuracy=0.6232, over 6153.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7235, over 5994.93 frames. ], batch size: 51, lr: 2.12e-03 +2024-08-06 23:29:29,750 INFO [trainer.py:765] (2/8) Epoch 40, batch 2100, train_loss[loss=2.884, NarTop10Accuracy=0.7569, over 4035.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7229, over 5975.79 frames. ], batch size: 4, lr: 2.11e-03 +2024-08-06 23:29:54,939 INFO [trainer.py:765] (2/8) Epoch 40, batch 2200, train_loss[loss=3.168, NarTop10Accuracy=0.6909, over 7416.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7203, over 6018.34 frames. ], batch size: 31, lr: 2.11e-03 +2024-08-06 23:30:20,012 INFO [trainer.py:765] (2/8) Epoch 40, batch 2300, train_loss[loss=2.843, NarTop10Accuracy=0.7537, over 5703.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.72, over 6022.31 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 23:30:44,296 INFO [trainer.py:765] (2/8) Epoch 40, batch 2400, train_loss[loss=2.729, NarTop10Accuracy=0.7802, over 5223.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7215, over 5766.36 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:07,738 INFO [trainer.py:765] (2/8) Epoch 40, batch 2500, train_loss[loss=2.914, NarTop10Accuracy=0.7318, over 5154.00 frames. ], tot_loss[loss=2.984, NarTop10Accuracy=0.728, over 5468.88 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:27,883 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 23:31:27,886 INFO [trainer.py:1069] (2/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-14-23-41-3 b/libritts-r/log/log-train-2024-08-06-14-23-41-3 new file mode 100644 index 0000000000000000000000000000000000000000..35452ff70712fac1bbf1889cde52efeaf7c58fc1 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-14-23-41-3 @@ -0,0 +1,1260 @@ +2024-08-06 14:23:41,782 INFO [trainer.py:870] (3/8) Training started +2024-08-06 14:23:41,783 INFO [trainer.py:889] (3/8) Device: cuda:3 +2024-08-06 14:23:41,783 INFO [trainer.py:890] (3/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 100000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 14:23:41,784 INFO [trainer.py:892] (3/8) About to create model +2024-08-06 14:23:42,485 INFO [trainer.py:899] (3/8) Number of model parameters: 367386628 +2024-08-06 14:23:42,485 INFO [checkpoint.py:112] (3/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 14:23:47,413 INFO [trainer.py:914] (3/8) Using DDP +2024-08-06 14:23:49,643 INFO [datamodule.py:427] (3/8) About to get train cuts +2024-08-06 14:23:49,644 INFO [datamodule.py:434] (3/8) About to get dev cuts +2024-08-06 14:23:49,645 INFO [datamodule.py:292] (3/8) Disable SpecAugment +2024-08-06 14:23:49,645 INFO [datamodule.py:294] (3/8) About to create train dataset +2024-08-06 14:23:49,646 INFO [datamodule.py:323] (3/8) Using DynamicBucketingSampler +2024-08-06 14:23:50,272 INFO [datamodule.py:344] (3/8) About to create train dataloader +2024-08-06 14:23:50,272 INFO [datamodule.py:367] (3/8) About to create dev dataset +2024-08-06 14:23:50,606 INFO [datamodule.py:388] (3/8) About to create dev dataloader +2024-08-06 14:24:38,248 INFO [trainer.py:765] (3/8) Epoch 1, batch 100, train_loss[loss=106.9, NarTop10Accuracy=0.02041, over 7170.00 frames. ], tot_loss[loss=73.89, NarTop10Accuracy=0.04719, over 2363.47 frames. ], batch size: 31, lr: 2.25e-02 +2024-08-06 14:25:07,518 INFO [trainer.py:765] (3/8) Epoch 1, batch 200, train_loss[loss=131.2, NarTop10Accuracy=0.01516, over 6537.00 frames. ], tot_loss[loss=97.39, NarTop10Accuracy=0.04203, over 3863.13 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 14:25:37,110 INFO [trainer.py:765] (3/8) Epoch 1, batch 300, train_loss[loss=108, NarTop10Accuracy=0.02065, over 7017.00 frames. ], tot_loss[loss=85.18, NarTop10Accuracy=0.04247, over 4669.78 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 14:26:07,482 INFO [trainer.py:765] (3/8) Epoch 1, batch 400, train_loss[loss=51.92, NarTop10Accuracy=0.02073, over 5241.00 frames. ], tot_loss[loss=67.91, NarTop10Accuracy=0.04635, over 5113.19 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 14:26:35,357 INFO [trainer.py:765] (3/8) Epoch 1, batch 500, train_loss[loss=14.39, NarTop10Accuracy=0.0267, over 6054.00 frames. ], tot_loss[loss=48.97, NarTop10Accuracy=0.05035, over 5400.32 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 14:27:04,000 INFO [trainer.py:765] (3/8) Epoch 1, batch 600, train_loss[loss=6.159, NarTop10Accuracy=0.2001, over 5730.00 frames. ], tot_loss[loss=33.44, NarTop10Accuracy=0.0552, over 5655.01 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 14:27:39,490 INFO [trainer.py:765] (3/8) Epoch 1, batch 700, train_loss[loss=6.731, NarTop10Accuracy=0.1201, over 4356.00 frames. ], tot_loss[loss=23.39, NarTop10Accuracy=0.06443, over 5733.59 frames. ], batch size: 5, lr: 2.99e-02 +2024-08-06 14:28:08,831 INFO [trainer.py:765] (3/8) Epoch 1, batch 800, train_loss[loss=6.471, NarTop10Accuracy=0.1302, over 5175.00 frames. ], tot_loss[loss=17.17, NarTop10Accuracy=0.08574, over 5774.24 frames. ], batch size: 6, lr: 2.98e-02 +2024-08-06 14:28:36,759 INFO [trainer.py:765] (3/8) Epoch 1, batch 900, train_loss[loss=5.776, NarTop10Accuracy=0.1791, over 6234.00 frames. ], tot_loss[loss=12.81, NarTop10Accuracy=0.1125, over 5782.77 frames. ], batch size: 13, lr: 2.98e-02 +2024-08-06 14:29:12,585 INFO [trainer.py:765] (3/8) Epoch 1, batch 1000, train_loss[loss=5.751, NarTop10Accuracy=0.1824, over 6231.00 frames. ], tot_loss[loss=10.12, NarTop10Accuracy=0.1331, over 5890.08 frames. ], batch size: 13, lr: 2.97e-02 +2024-08-06 14:29:42,824 INFO [trainer.py:765] (3/8) Epoch 1, batch 1100, train_loss[loss=5.623, NarTop10Accuracy=0.2068, over 6945.00 frames. ], tot_loss[loss=8.423, NarTop10Accuracy=0.1521, over 5927.44 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 14:30:11,467 INFO [trainer.py:765] (3/8) Epoch 1, batch 1200, train_loss[loss=5.859, NarTop10Accuracy=0.1813, over 7272.00 frames. ], tot_loss[loss=7.355, NarTop10Accuracy=0.17, over 5922.68 frames. ], batch size: 31, lr: 2.96e-02 +2024-08-06 14:30:48,746 INFO [trainer.py:765] (3/8) Epoch 1, batch 1300, train_loss[loss=5.288, NarTop10Accuracy=0.2874, over 4920.00 frames. ], tot_loss[loss=6.68, NarTop10Accuracy=0.1871, over 5984.21 frames. ], batch size: 6, lr: 2.95e-02 +2024-08-06 14:31:18,143 INFO [trainer.py:765] (3/8) Epoch 1, batch 1400, train_loss[loss=5.618, NarTop10Accuracy=0.2033, over 6171.00 frames. ], tot_loss[loss=6.252, NarTop10Accuracy=0.197, over 6019.76 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 14:31:46,025 INFO [trainer.py:765] (3/8) Epoch 1, batch 1500, train_loss[loss=5.769, NarTop10Accuracy=0.187, over 6066.00 frames. ], tot_loss[loss=5.979, NarTop10Accuracy=0.2074, over 5962.70 frames. ], batch size: 51, lr: 2.94e-02 +2024-08-06 14:32:13,691 INFO [trainer.py:765] (3/8) Epoch 1, batch 1600, train_loss[loss=5.613, NarTop10Accuracy=0.2, over 6984.00 frames. ], tot_loss[loss=5.787, NarTop10Accuracy=0.2178, over 5935.75 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 14:32:40,198 INFO [trainer.py:765] (3/8) Epoch 1, batch 1700, train_loss[loss=5.363, NarTop10Accuracy=0.2575, over 6273.00 frames. ], tot_loss[loss=5.665, NarTop10Accuracy=0.2248, over 5939.13 frames. ], batch size: 13, lr: 2.92e-02 +2024-08-06 14:33:06,498 INFO [trainer.py:765] (3/8) Epoch 1, batch 1800, train_loss[loss=5.5, NarTop10Accuracy=0.2237, over 7002.00 frames. ], tot_loss[loss=5.573, NarTop10Accuracy=0.2331, over 6006.15 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 14:33:32,623 INFO [trainer.py:765] (3/8) Epoch 1, batch 1900, train_loss[loss=5.66, NarTop10Accuracy=0.1961, over 6279.00 frames. ], tot_loss[loss=5.503, NarTop10Accuracy=0.2415, over 6042.82 frames. ], batch size: 50, lr: 2.90e-02 +2024-08-06 14:33:58,014 INFO [trainer.py:765] (3/8) Epoch 1, batch 2000, train_loss[loss=5.471, NarTop10Accuracy=0.2504, over 6885.00 frames. ], tot_loss[loss=5.448, NarTop10Accuracy=0.2492, over 6018.24 frames. ], batch size: 50, lr: 2.89e-02 +2024-08-06 14:33:58,015 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 14:34:06,103 INFO [trainer.py:811] (3/8) Epoch 1, validation: loss=5.397, NarTop10Accuracy=0.2581, over 1905321.00 frames. +2024-08-06 14:34:06,104 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 26959MB +2024-08-06 14:34:06,612 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 4.749e+01 2.278e+02 7.300e+02 1.664e+04 7.177e+05, threshold=1.460e+03, percent-clipped=0.0 +2024-08-06 14:34:32,061 INFO [trainer.py:765] (3/8) Epoch 1, batch 2100, train_loss[loss=5.002, NarTop10Accuracy=0.3485, over 4056.00 frames. ], tot_loss[loss=5.39, NarTop10Accuracy=0.2587, over 5994.28 frames. ], batch size: 4, lr: 2.88e-02 +2024-08-06 14:34:57,305 INFO [trainer.py:765] (3/8) Epoch 1, batch 2200, train_loss[loss=5.54, NarTop10Accuracy=0.2302, over 7410.00 frames. ], tot_loss[loss=5.35, NarTop10Accuracy=0.2642, over 6026.31 frames. ], batch size: 32, lr: 2.87e-02 +2024-08-06 14:35:22,457 INFO [trainer.py:765] (3/8) Epoch 1, batch 2300, train_loss[loss=5.268, NarTop10Accuracy=0.2786, over 5592.00 frames. ], tot_loss[loss=5.344, NarTop10Accuracy=0.265, over 6026.29 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 14:35:46,816 INFO [trainer.py:765] (3/8) Epoch 1, batch 2400, train_loss[loss=5.384, NarTop10Accuracy=0.2511, over 5064.00 frames. ], tot_loss[loss=5.284, NarTop10Accuracy=0.2759, over 5767.57 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 14:36:10,409 INFO [trainer.py:765] (3/8) Epoch 1, batch 2500, train_loss[loss=5.011, NarTop10Accuracy=0.3322, over 5235.00 frames. ], tot_loss[loss=5.221, NarTop10Accuracy=0.2875, over 5460.03 frames. ], batch size: 7, lr: 2.84e-02 +2024-08-06 14:36:30,283 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 14:37:29,669 INFO [trainer.py:765] (3/8) Epoch 2, batch 100, train_loss[loss=4.971, NarTop10Accuracy=0.3324, over 7089.00 frames. ], tot_loss[loss=5.18, NarTop10Accuracy=0.296, over 2367.74 frames. ], batch size: 31, lr: 2.77e-02 +2024-08-06 14:38:10,015 INFO [trainer.py:765] (3/8) Epoch 2, batch 200, train_loss[loss=5.128, NarTop10Accuracy=0.3059, over 6870.00 frames. ], tot_loss[loss=5.157, NarTop10Accuracy=0.3006, over 3852.99 frames. ], batch size: 17, lr: 2.76e-02 +2024-08-06 14:38:38,297 INFO [trainer.py:765] (3/8) Epoch 2, batch 300, train_loss[loss=5.161, NarTop10Accuracy=0.3006, over 7041.00 frames. ], tot_loss[loss=5.131, NarTop10Accuracy=0.305, over 4660.18 frames. ], batch size: 22, lr: 2.75e-02 +2024-08-06 14:39:06,999 INFO [trainer.py:765] (3/8) Epoch 2, batch 400, train_loss[loss=4.877, NarTop10Accuracy=0.3572, over 5094.00 frames. ], tot_loss[loss=5.107, NarTop10Accuracy=0.3088, over 5099.80 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 14:39:46,119 INFO [trainer.py:765] (3/8) Epoch 2, batch 500, train_loss[loss=5.073, NarTop10Accuracy=0.3074, over 6066.00 frames. ], tot_loss[loss=5.068, NarTop10Accuracy=0.3164, over 5361.27 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 14:40:15,083 INFO [trainer.py:765] (3/8) Epoch 2, batch 600, train_loss[loss=4.836, NarTop10Accuracy=0.3649, over 5637.00 frames. ], tot_loss[loss=5.048, NarTop10Accuracy=0.3204, over 5643.57 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 14:40:44,589 INFO [trainer.py:765] (3/8) Epoch 2, batch 700, train_loss[loss=4.947, NarTop10Accuracy=0.3464, over 5268.00 frames. ], tot_loss[loss=5.033, NarTop10Accuracy=0.3226, over 5706.94 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 14:41:24,514 INFO [trainer.py:765] (3/8) Epoch 2, batch 800, train_loss[loss=5.185, NarTop10Accuracy=0.2918, over 5154.00 frames. ], tot_loss[loss=5.028, NarTop10Accuracy=0.3236, over 5772.89 frames. ], batch size: 6, lr: 2.69e-02 +2024-08-06 14:41:54,404 INFO [trainer.py:765] (3/8) Epoch 2, batch 900, train_loss[loss=4.671, NarTop10Accuracy=0.3893, over 6219.00 frames. ], tot_loss[loss=4.988, NarTop10Accuracy=0.3313, over 5787.03 frames. ], batch size: 13, lr: 2.68e-02 +2024-08-06 14:42:23,902 INFO [trainer.py:765] (3/8) Epoch 2, batch 1000, train_loss[loss=4.658, NarTop10Accuracy=0.3945, over 6648.00 frames. ], tot_loss[loss=4.949, NarTop10Accuracy=0.3383, over 5890.09 frames. ], batch size: 14, lr: 2.66e-02 +2024-08-06 14:42:56,256 INFO [trainer.py:765] (3/8) Epoch 2, batch 1100, train_loss[loss=5.171, NarTop10Accuracy=0.294, over 6819.00 frames. ], tot_loss[loss=4.937, NarTop10Accuracy=0.3409, over 5926.60 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 14:43:35,186 INFO [trainer.py:765] (3/8) Epoch 2, batch 1200, train_loss[loss=4.714, NarTop10Accuracy=0.3763, over 7338.00 frames. ], tot_loss[loss=4.903, NarTop10Accuracy=0.3468, over 5929.22 frames. ], batch size: 31, lr: 2.64e-02 +2024-08-06 14:44:04,345 INFO [trainer.py:765] (3/8) Epoch 2, batch 1300, train_loss[loss=4.851, NarTop10Accuracy=0.3542, over 5190.00 frames. ], tot_loss[loss=4.862, NarTop10Accuracy=0.355, over 5989.94 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 14:44:33,727 INFO [trainer.py:765] (3/8) Epoch 2, batch 1400, train_loss[loss=4.706, NarTop10Accuracy=0.38, over 6114.00 frames. ], tot_loss[loss=4.854, NarTop10Accuracy=0.3564, over 6032.30 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 14:44:40,441 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 14:44:48,506 INFO [trainer.py:811] (3/8) Epoch 2, validation: loss=4.808, NarTop10Accuracy=0.3642, over 1905321.00 frames. +2024-08-06 14:44:48,506 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 26959MB +2024-08-06 14:44:49,204 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 6.328e+01 1.178e+02 1.410e+02 1.789e+02 6.269e+02, threshold=2.821e+02, percent-clipped=0.0 +2024-08-06 14:45:09,806 INFO [trainer.py:765] (3/8) Epoch 2, batch 1500, train_loss[loss=4.851, NarTop10Accuracy=0.349, over 5994.00 frames. ], tot_loss[loss=4.83, NarTop10Accuracy=0.3609, over 5948.08 frames. ], batch size: 50, lr: 2.60e-02 +2024-08-06 14:45:37,659 INFO [trainer.py:765] (3/8) Epoch 2, batch 1600, train_loss[loss=4.608, NarTop10Accuracy=0.4074, over 7395.00 frames. ], tot_loss[loss=4.805, NarTop10Accuracy=0.366, over 5922.73 frames. ], batch size: 23, lr: 2.59e-02 +2024-08-06 14:46:04,368 INFO [trainer.py:765] (3/8) Epoch 2, batch 1700, train_loss[loss=4.846, NarTop10Accuracy=0.3569, over 6678.00 frames. ], tot_loss[loss=4.798, NarTop10Accuracy=0.3671, over 5907.39 frames. ], batch size: 14, lr: 2.58e-02 +2024-08-06 14:46:31,034 INFO [trainer.py:765] (3/8) Epoch 2, batch 1800, train_loss[loss=4.67, NarTop10Accuracy=0.3978, over 7092.00 frames. ], tot_loss[loss=4.777, NarTop10Accuracy=0.371, over 5977.57 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 14:46:57,532 INFO [trainer.py:765] (3/8) Epoch 2, batch 1900, train_loss[loss=4.66, NarTop10Accuracy=0.395, over 5892.00 frames. ], tot_loss[loss=4.754, NarTop10Accuracy=0.3756, over 6013.67 frames. ], batch size: 51, lr: 2.55e-02 +2024-08-06 14:47:23,233 INFO [trainer.py:765] (3/8) Epoch 2, batch 2000, train_loss[loss=4.821, NarTop10Accuracy=0.3679, over 6018.00 frames. ], tot_loss[loss=4.729, NarTop10Accuracy=0.3801, over 5998.91 frames. ], batch size: 50, lr: 2.54e-02 +2024-08-06 14:47:48,588 INFO [trainer.py:765] (3/8) Epoch 2, batch 2100, train_loss[loss=4.851, NarTop10Accuracy=0.3428, over 3855.00 frames. ], tot_loss[loss=4.711, NarTop10Accuracy=0.3835, over 5973.79 frames. ], batch size: 4, lr: 2.53e-02 +2024-08-06 14:48:13,764 INFO [trainer.py:765] (3/8) Epoch 2, batch 2200, train_loss[loss=4.723, NarTop10Accuracy=0.377, over 7227.00 frames. ], tot_loss[loss=4.681, NarTop10Accuracy=0.3893, over 6017.31 frames. ], batch size: 31, lr: 2.51e-02 +2024-08-06 14:48:38,951 INFO [trainer.py:765] (3/8) Epoch 2, batch 2300, train_loss[loss=4.805, NarTop10Accuracy=0.3645, over 5763.00 frames. ], tot_loss[loss=4.682, NarTop10Accuracy=0.389, over 6034.02 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 14:49:03,319 INFO [trainer.py:765] (3/8) Epoch 2, batch 2400, train_loss[loss=4.463, NarTop10Accuracy=0.4349, over 5268.00 frames. ], tot_loss[loss=4.638, NarTop10Accuracy=0.3973, over 5785.51 frames. ], batch size: 7, lr: 2.49e-02 +2024-08-06 14:49:26,867 INFO [trainer.py:765] (3/8) Epoch 2, batch 2500, train_loss[loss=4.745, NarTop10Accuracy=0.3686, over 5265.00 frames. ], tot_loss[loss=4.616, NarTop10Accuracy=0.4014, over 5490.06 frames. ], batch size: 7, lr: 2.48e-02 +2024-08-06 14:49:46,794 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 14:50:51,117 INFO [trainer.py:765] (3/8) Epoch 3, batch 100, train_loss[loss=4.759, NarTop10Accuracy=0.3726, over 7227.00 frames. ], tot_loss[loss=4.591, NarTop10Accuracy=0.4066, over 2359.95 frames. ], batch size: 31, lr: 2.36e-02 +2024-08-06 14:51:20,388 INFO [trainer.py:765] (3/8) Epoch 3, batch 200, train_loss[loss=4.715, NarTop10Accuracy=0.3818, over 6876.00 frames. ], tot_loss[loss=4.538, NarTop10Accuracy=0.4172, over 3848.41 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 14:51:50,954 INFO [trainer.py:765] (3/8) Epoch 3, batch 300, train_loss[loss=4.663, NarTop10Accuracy=0.3947, over 7188.00 frames. ], tot_loss[loss=4.522, NarTop10Accuracy=0.42, over 4661.20 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 14:52:32,359 INFO [trainer.py:765] (3/8) Epoch 3, batch 400, train_loss[loss=4.549, NarTop10Accuracy=0.4217, over 4932.00 frames. ], tot_loss[loss=4.496, NarTop10Accuracy=0.4252, over 5110.33 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 14:53:00,680 INFO [trainer.py:765] (3/8) Epoch 3, batch 500, train_loss[loss=4.401, NarTop10Accuracy=0.4367, over 6081.00 frames. ], tot_loss[loss=4.492, NarTop10Accuracy=0.4256, over 5384.69 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 14:53:29,552 INFO [trainer.py:765] (3/8) Epoch 3, batch 600, train_loss[loss=4.214, NarTop10Accuracy=0.4836, over 5763.00 frames. ], tot_loss[loss=4.469, NarTop10Accuracy=0.4306, over 5640.79 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 14:54:12,466 INFO [trainer.py:765] (3/8) Epoch 3, batch 700, train_loss[loss=4.287, NarTop10Accuracy=0.4699, over 4260.00 frames. ], tot_loss[loss=4.442, NarTop10Accuracy=0.4363, over 5707.50 frames. ], batch size: 5, lr: 2.29e-02 +2024-08-06 14:54:44,785 INFO [trainer.py:765] (3/8) Epoch 3, batch 800, train_loss[loss=4.179, NarTop10Accuracy=0.4841, over 4866.00 frames. ], tot_loss[loss=4.419, NarTop10Accuracy=0.4409, over 5764.05 frames. ], batch size: 6, lr: 2.28e-02 +2024-08-06 14:54:58,686 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 14:55:06,655 INFO [trainer.py:811] (3/8) Epoch 3, validation: loss=4.276, NarTop10Accuracy=0.4689, over 1905321.00 frames. +2024-08-06 14:55:06,656 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 14:55:07,182 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 8.443e+01 1.396e+02 1.639e+02 2.017e+02 7.124e+02, threshold=3.277e+02, percent-clipped=4.5 +2024-08-06 14:55:21,051 INFO [trainer.py:765] (3/8) Epoch 3, batch 900, train_loss[loss=4.13, NarTop10Accuracy=0.4954, over 6222.00 frames. ], tot_loss[loss=4.39, NarTop10Accuracy=0.4465, over 5777.05 frames. ], batch size: 13, lr: 2.26e-02 +2024-08-06 14:56:04,957 INFO [trainer.py:765] (3/8) Epoch 3, batch 1000, train_loss[loss=4.184, NarTop10Accuracy=0.4856, over 6600.00 frames. ], tot_loss[loss=4.37, NarTop10Accuracy=0.4504, over 5871.23 frames. ], batch size: 14, lr: 2.25e-02 +2024-08-06 14:56:37,300 INFO [trainer.py:765] (3/8) Epoch 3, batch 1100, train_loss[loss=4.56, NarTop10Accuracy=0.4058, over 6720.00 frames. ], tot_loss[loss=4.352, NarTop10Accuracy=0.454, over 5919.92 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 14:57:06,376 INFO [trainer.py:765] (3/8) Epoch 3, batch 1200, train_loss[loss=4.429, NarTop10Accuracy=0.4312, over 7431.00 frames. ], tot_loss[loss=4.335, NarTop10Accuracy=0.457, over 5932.99 frames. ], batch size: 31, lr: 2.23e-02 +2024-08-06 14:57:51,630 INFO [trainer.py:765] (3/8) Epoch 3, batch 1300, train_loss[loss=4.24, NarTop10Accuracy=0.478, over 5070.00 frames. ], tot_loss[loss=4.306, NarTop10Accuracy=0.4623, over 5998.69 frames. ], batch size: 6, lr: 2.22e-02 +2024-08-06 14:58:22,899 INFO [trainer.py:765] (3/8) Epoch 3, batch 1400, train_loss[loss=4.35, NarTop10Accuracy=0.459, over 6183.00 frames. ], tot_loss[loss=4.3, NarTop10Accuracy=0.4635, over 6019.73 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 14:58:50,854 INFO [trainer.py:765] (3/8) Epoch 3, batch 1500, train_loss[loss=4.35, NarTop10Accuracy=0.4504, over 6507.00 frames. ], tot_loss[loss=4.276, NarTop10Accuracy=0.4685, over 5960.82 frames. ], batch size: 50, lr: 2.20e-02 +2024-08-06 14:59:18,714 INFO [trainer.py:765] (3/8) Epoch 3, batch 1600, train_loss[loss=3.903, NarTop10Accuracy=0.5533, over 6969.00 frames. ], tot_loss[loss=4.259, NarTop10Accuracy=0.4715, over 5939.72 frames. ], batch size: 22, lr: 2.19e-02 +2024-08-06 14:59:45,951 INFO [trainer.py:765] (3/8) Epoch 3, batch 1700, train_loss[loss=4.173, NarTop10Accuracy=0.489, over 6273.00 frames. ], tot_loss[loss=4.234, NarTop10Accuracy=0.4766, over 5910.81 frames. ], batch size: 13, lr: 2.18e-02 +2024-08-06 15:00:12,496 INFO [trainer.py:765] (3/8) Epoch 3, batch 1800, train_loss[loss=3.973, NarTop10Accuracy=0.5326, over 7131.00 frames. ], tot_loss[loss=4.214, NarTop10Accuracy=0.4808, over 5985.72 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 15:00:38,947 INFO [trainer.py:765] (3/8) Epoch 3, batch 1900, train_loss[loss=4.658, NarTop10Accuracy=0.3937, over 6606.00 frames. ], tot_loss[loss=4.195, NarTop10Accuracy=0.4848, over 6033.08 frames. ], batch size: 51, lr: 2.16e-02 +2024-08-06 15:01:04,605 INFO [trainer.py:765] (3/8) Epoch 3, batch 2000, train_loss[loss=4.423, NarTop10Accuracy=0.4398, over 5751.00 frames. ], tot_loss[loss=4.167, NarTop10Accuracy=0.4903, over 6011.50 frames. ], batch size: 51, lr: 2.15e-02 +2024-08-06 15:01:29,897 INFO [trainer.py:765] (3/8) Epoch 3, batch 2100, train_loss[loss=3.921, NarTop10Accuracy=0.5438, over 3936.00 frames. ], tot_loss[loss=4.143, NarTop10Accuracy=0.495, over 5980.16 frames. ], batch size: 4, lr: 2.14e-02 +2024-08-06 15:01:55,182 INFO [trainer.py:765] (3/8) Epoch 3, batch 2200, train_loss[loss=3.935, NarTop10Accuracy=0.5495, over 7524.00 frames. ], tot_loss[loss=4.12, NarTop10Accuracy=0.5002, over 5998.18 frames. ], batch size: 31, lr: 2.13e-02 +2024-08-06 15:02:20,410 INFO [trainer.py:765] (3/8) Epoch 3, batch 2300, train_loss[loss=4.43, NarTop10Accuracy=0.4342, over 5583.00 frames. ], tot_loss[loss=4.134, NarTop10Accuracy=0.4971, over 6008.71 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 15:02:44,664 INFO [trainer.py:765] (3/8) Epoch 3, batch 2400, train_loss[loss=4.217, NarTop10Accuracy=0.471, over 5049.00 frames. ], tot_loss[loss=4.1, NarTop10Accuracy=0.5041, over 5756.50 frames. ], batch size: 7, lr: 2.11e-02 +2024-08-06 15:03:08,235 INFO [trainer.py:765] (3/8) Epoch 3, batch 2500, train_loss[loss=3.711, NarTop10Accuracy=0.5866, over 5178.00 frames. ], tot_loss[loss=4.052, NarTop10Accuracy=0.5141, over 5464.40 frames. ], batch size: 7, lr: 2.10e-02 +2024-08-06 15:03:28,358 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 15:04:28,131 INFO [trainer.py:765] (3/8) Epoch 4, batch 100, train_loss[loss=3.926, NarTop10Accuracy=0.5423, over 7152.00 frames. ], tot_loss[loss=4.029, NarTop10Accuracy=0.5185, over 2356.16 frames. ], batch size: 31, lr: 1.97e-02 +2024-08-06 15:04:59,843 INFO [trainer.py:765] (3/8) Epoch 4, batch 200, train_loss[loss=3.758, NarTop10Accuracy=0.5806, over 6726.00 frames. ], tot_loss[loss=4.009, NarTop10Accuracy=0.523, over 3859.81 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 15:05:27,509 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 15:05:35,694 INFO [trainer.py:811] (3/8) Epoch 4, validation: loss=3.804, NarTop10Accuracy=0.5644, over 1905321.00 frames. +2024-08-06 15:05:35,694 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 15:05:36,237 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.765e+02 1.975e+02 2.270e+02 5.852e+02, threshold=3.949e+02, percent-clipped=2.8 +2024-08-06 15:05:43,888 INFO [trainer.py:765] (3/8) Epoch 4, batch 300, train_loss[loss=3.786, NarTop10Accuracy=0.5727, over 7053.00 frames. ], tot_loss[loss=3.996, NarTop10Accuracy=0.5254, over 4657.93 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 15:06:16,123 INFO [trainer.py:765] (3/8) Epoch 4, batch 400, train_loss[loss=3.824, NarTop10Accuracy=0.5616, over 5145.00 frames. ], tot_loss[loss=4.008, NarTop10Accuracy=0.5235, over 5111.08 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 15:06:46,473 INFO [trainer.py:765] (3/8) Epoch 4, batch 500, train_loss[loss=3.968, NarTop10Accuracy=0.5243, over 6000.00 frames. ], tot_loss[loss=3.987, NarTop10Accuracy=0.5275, over 5387.25 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 15:07:23,817 INFO [trainer.py:765] (3/8) Epoch 4, batch 600, train_loss[loss=3.773, NarTop10Accuracy=0.5702, over 5736.00 frames. ], tot_loss[loss=3.979, NarTop10Accuracy=0.5295, over 5651.51 frames. ], batch size: 9, lr: 1.93e-02 +2024-08-06 15:07:59,001 INFO [trainer.py:765] (3/8) Epoch 4, batch 700, train_loss[loss=4.209, NarTop10Accuracy=0.4689, over 5010.00 frames. ], tot_loss[loss=3.972, NarTop10Accuracy=0.5302, over 5723.87 frames. ], batch size: 6, lr: 1.92e-02 +2024-08-06 15:08:32,430 INFO [trainer.py:765] (3/8) Epoch 4, batch 800, train_loss[loss=3.729, NarTop10Accuracy=0.5754, over 5055.00 frames. ], tot_loss[loss=3.959, NarTop10Accuracy=0.5326, over 5789.04 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 15:09:10,690 INFO [trainer.py:765] (3/8) Epoch 4, batch 900, train_loss[loss=3.626, NarTop10Accuracy=0.6057, over 6738.00 frames. ], tot_loss[loss=3.921, NarTop10Accuracy=0.5404, over 5799.40 frames. ], batch size: 14, lr: 1.90e-02 +2024-08-06 15:09:46,076 INFO [trainer.py:765] (3/8) Epoch 4, batch 1000, train_loss[loss=3.673, NarTop10Accuracy=0.6006, over 6141.00 frames. ], tot_loss[loss=3.917, NarTop10Accuracy=0.5419, over 5898.60 frames. ], batch size: 13, lr: 1.89e-02 +2024-08-06 15:10:18,139 INFO [trainer.py:765] (3/8) Epoch 4, batch 1100, train_loss[loss=3.794, NarTop10Accuracy=0.5672, over 6867.00 frames. ], tot_loss[loss=3.913, NarTop10Accuracy=0.5428, over 5920.26 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 15:10:55,075 INFO [trainer.py:765] (3/8) Epoch 4, batch 1200, train_loss[loss=4.321, NarTop10Accuracy=0.4597, over 7107.00 frames. ], tot_loss[loss=3.907, NarTop10Accuracy=0.5435, over 5924.51 frames. ], batch size: 31, lr: 1.88e-02 +2024-08-06 15:11:32,074 INFO [trainer.py:765] (3/8) Epoch 4, batch 1300, train_loss[loss=3.6, NarTop10Accuracy=0.6067, over 4395.00 frames. ], tot_loss[loss=3.87, NarTop10Accuracy=0.5511, over 5978.85 frames. ], batch size: 5, lr: 1.87e-02 +2024-08-06 15:12:05,688 INFO [trainer.py:765] (3/8) Epoch 4, batch 1400, train_loss[loss=3.66, NarTop10Accuracy=0.6048, over 6117.00 frames. ], tot_loss[loss=3.864, NarTop10Accuracy=0.5524, over 6004.96 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 15:12:33,695 INFO [trainer.py:765] (3/8) Epoch 4, batch 1500, train_loss[loss=3.783, NarTop10Accuracy=0.5715, over 6213.00 frames. ], tot_loss[loss=3.864, NarTop10Accuracy=0.5523, over 5951.38 frames. ], batch size: 50, lr: 1.85e-02 +2024-08-06 15:13:01,510 INFO [trainer.py:765] (3/8) Epoch 4, batch 1600, train_loss[loss=3.856, NarTop10Accuracy=0.5563, over 7092.00 frames. ], tot_loss[loss=3.855, NarTop10Accuracy=0.5541, over 5936.00 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 15:13:28,133 INFO [trainer.py:765] (3/8) Epoch 4, batch 1700, train_loss[loss=3.684, NarTop10Accuracy=0.5752, over 6606.00 frames. ], tot_loss[loss=3.822, NarTop10Accuracy=0.5605, over 5908.85 frames. ], batch size: 14, lr: 1.84e-02 +2024-08-06 15:13:54,557 INFO [trainer.py:765] (3/8) Epoch 4, batch 1800, train_loss[loss=3.813, NarTop10Accuracy=0.5621, over 6975.00 frames. ], tot_loss[loss=3.826, NarTop10Accuracy=0.5599, over 5968.20 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 15:14:20,998 INFO [trainer.py:765] (3/8) Epoch 4, batch 1900, train_loss[loss=3.708, NarTop10Accuracy=0.583, over 6249.00 frames. ], tot_loss[loss=3.847, NarTop10Accuracy=0.5554, over 6025.23 frames. ], batch size: 51, lr: 1.82e-02 +2024-08-06 15:14:46,672 INFO [trainer.py:765] (3/8) Epoch 4, batch 2000, train_loss[loss=3.708, NarTop10Accuracy=0.5845, over 6033.00 frames. ], tot_loss[loss=3.826, NarTop10Accuracy=0.56, over 5988.99 frames. ], batch size: 50, lr: 1.81e-02 +2024-08-06 15:15:11,859 INFO [trainer.py:765] (3/8) Epoch 4, batch 2100, train_loss[loss=3.602, NarTop10Accuracy=0.6083, over 3864.00 frames. ], tot_loss[loss=3.813, NarTop10Accuracy=0.5628, over 5966.04 frames. ], batch size: 4, lr: 1.81e-02 +2024-08-06 15:15:37,089 INFO [trainer.py:765] (3/8) Epoch 4, batch 2200, train_loss[loss=3.697, NarTop10Accuracy=0.5911, over 7356.00 frames. ], tot_loss[loss=3.805, NarTop10Accuracy=0.5642, over 6007.44 frames. ], batch size: 31, lr: 1.80e-02 +2024-08-06 15:15:55,089 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 15:16:03,243 INFO [trainer.py:811] (3/8) Epoch 4, validation: loss=3.665, NarTop10Accuracy=0.5912, over 1905321.00 frames. +2024-08-06 15:16:03,243 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 15:16:03,740 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.414e+02 1.889e+02 2.096e+02 2.369e+02 1.168e+03, threshold=4.192e+02, percent-clipped=1.7 +2024-08-06 15:16:10,347 INFO [trainer.py:765] (3/8) Epoch 4, batch 2300, train_loss[loss=3.762, NarTop10Accuracy=0.581, over 5739.00 frames. ], tot_loss[loss=3.812, NarTop10Accuracy=0.5627, over 6023.78 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 15:16:34,840 INFO [trainer.py:765] (3/8) Epoch 4, batch 2400, train_loss[loss=3.622, NarTop10Accuracy=0.6116, over 4968.00 frames. ], tot_loss[loss=3.781, NarTop10Accuracy=0.5687, over 5776.93 frames. ], batch size: 7, lr: 1.79e-02 +2024-08-06 15:16:58,535 INFO [trainer.py:765] (3/8) Epoch 4, batch 2500, train_loss[loss=3.412, NarTop10Accuracy=0.6493, over 5310.00 frames. ], tot_loss[loss=3.769, NarTop10Accuracy=0.5709, over 5483.44 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 15:17:18,222 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 15:18:24,101 INFO [trainer.py:765] (3/8) Epoch 5, batch 100, train_loss[loss=3.601, NarTop10Accuracy=0.6061, over 7134.00 frames. ], tot_loss[loss=3.778, NarTop10Accuracy=0.5692, over 2371.62 frames. ], batch size: 31, lr: 1.66e-02 +2024-08-06 15:18:59,675 INFO [trainer.py:765] (3/8) Epoch 5, batch 200, train_loss[loss=4.051, NarTop10Accuracy=0.5079, over 6831.00 frames. ], tot_loss[loss=3.768, NarTop10Accuracy=0.5712, over 3863.67 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 15:19:32,888 INFO [trainer.py:765] (3/8) Epoch 5, batch 300, train_loss[loss=3.981, NarTop10Accuracy=0.5203, over 7203.00 frames. ], tot_loss[loss=3.728, NarTop10Accuracy=0.5796, over 4672.07 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 15:20:01,656 INFO [trainer.py:765] (3/8) Epoch 5, batch 400, train_loss[loss=3.554, NarTop10Accuracy=0.6122, over 5139.00 frames. ], tot_loss[loss=3.721, NarTop10Accuracy=0.5807, over 5117.47 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 15:20:38,299 INFO [trainer.py:765] (3/8) Epoch 5, batch 500, train_loss[loss=3.851, NarTop10Accuracy=0.5498, over 6018.00 frames. ], tot_loss[loss=3.734, NarTop10Accuracy=0.5777, over 5388.25 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 15:21:13,711 INFO [trainer.py:765] (3/8) Epoch 5, batch 600, train_loss[loss=3.941, NarTop10Accuracy=0.5293, over 5586.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.5803, over 5662.46 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 15:21:45,881 INFO [trainer.py:765] (3/8) Epoch 5, batch 700, train_loss[loss=3.328, NarTop10Accuracy=0.6604, over 5028.00 frames. ], tot_loss[loss=3.723, NarTop10Accuracy=0.5806, over 5720.40 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 15:22:24,499 INFO [trainer.py:765] (3/8) Epoch 5, batch 800, train_loss[loss=4.127, NarTop10Accuracy=0.5028, over 5115.00 frames. ], tot_loss[loss=3.709, NarTop10Accuracy=0.5832, over 5777.23 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 15:22:56,783 INFO [trainer.py:765] (3/8) Epoch 5, batch 900, train_loss[loss=3.575, NarTop10Accuracy=0.6153, over 6231.00 frames. ], tot_loss[loss=3.692, NarTop10Accuracy=0.5862, over 5793.96 frames. ], batch size: 13, lr: 1.61e-02 +2024-08-06 15:23:31,914 INFO [trainer.py:765] (3/8) Epoch 5, batch 1000, train_loss[loss=3.483, NarTop10Accuracy=0.634, over 6135.00 frames. ], tot_loss[loss=3.682, NarTop10Accuracy=0.5884, over 5889.89 frames. ], batch size: 13, lr: 1.60e-02 +2024-08-06 15:24:09,572 INFO [trainer.py:765] (3/8) Epoch 5, batch 1100, train_loss[loss=3.457, NarTop10Accuracy=0.6466, over 6738.00 frames. ], tot_loss[loss=3.68, NarTop10Accuracy=0.5891, over 5929.09 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 15:24:44,528 INFO [trainer.py:765] (3/8) Epoch 5, batch 1200, train_loss[loss=3.478, NarTop10Accuracy=0.6265, over 7281.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5907, over 5916.28 frames. ], batch size: 31, lr: 1.59e-02 +2024-08-06 15:25:19,380 INFO [trainer.py:765] (3/8) Epoch 5, batch 1300, train_loss[loss=3.893, NarTop10Accuracy=0.5524, over 5175.00 frames. ], tot_loss[loss=3.662, NarTop10Accuracy=0.5928, over 5992.41 frames. ], batch size: 6, lr: 1.59e-02 +2024-08-06 15:25:51,694 INFO [trainer.py:765] (3/8) Epoch 5, batch 1400, train_loss[loss=3.963, NarTop10Accuracy=0.5219, over 6015.00 frames. ], tot_loss[loss=3.668, NarTop10Accuracy=0.5917, over 6023.84 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 15:26:26,195 INFO [trainer.py:765] (3/8) Epoch 5, batch 1500, train_loss[loss=3.681, NarTop10Accuracy=0.5965, over 5886.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5905, over 5953.97 frames. ], batch size: 50, lr: 1.58e-02 +2024-08-06 15:26:54,131 INFO [trainer.py:765] (3/8) Epoch 5, batch 1600, train_loss[loss=3.465, NarTop10Accuracy=0.6328, over 7401.00 frames. ], tot_loss[loss=3.679, NarTop10Accuracy=0.5889, over 5918.20 frames. ], batch size: 24, lr: 1.57e-02 +2024-08-06 15:27:19,604 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 15:27:27,821 INFO [trainer.py:811] (3/8) Epoch 5, validation: loss=3.552, NarTop10Accuracy=0.6147, over 1905321.00 frames. +2024-08-06 15:27:27,822 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 15:27:28,341 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.756e+02 1.962e+02 2.205e+02 5.880e+02, threshold=3.924e+02, percent-clipped=0.8 +2024-08-06 15:27:29,131 INFO [trainer.py:765] (3/8) Epoch 5, batch 1700, train_loss[loss=3.75, NarTop10Accuracy=0.5819, over 6255.00 frames. ], tot_loss[loss=3.667, NarTop10Accuracy=0.5912, over 5909.71 frames. ], batch size: 13, lr: 1.56e-02 +2024-08-06 15:27:55,653 INFO [trainer.py:765] (3/8) Epoch 5, batch 1800, train_loss[loss=3.797, NarTop10Accuracy=0.5567, over 7170.00 frames. ], tot_loss[loss=3.661, NarTop10Accuracy=0.5929, over 5962.49 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 15:28:22,172 INFO [trainer.py:765] (3/8) Epoch 5, batch 1900, train_loss[loss=3.723, NarTop10Accuracy=0.5838, over 5931.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.5916, over 6008.76 frames. ], batch size: 50, lr: 1.55e-02 +2024-08-06 15:28:47,893 INFO [trainer.py:765] (3/8) Epoch 5, batch 2000, train_loss[loss=3.579, NarTop10Accuracy=0.6116, over 6921.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5918, over 5988.93 frames. ], batch size: 50, lr: 1.55e-02 +2024-08-06 15:29:13,770 INFO [trainer.py:765] (3/8) Epoch 5, batch 2100, train_loss[loss=3.358, NarTop10Accuracy=0.6604, over 3915.00 frames. ], tot_loss[loss=3.682, NarTop10Accuracy=0.5877, over 5965.24 frames. ], batch size: 4, lr: 1.54e-02 +2024-08-06 15:29:39,177 INFO [trainer.py:765] (3/8) Epoch 5, batch 2200, train_loss[loss=4.205, NarTop10Accuracy=0.4793, over 7188.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.5918, over 6007.97 frames. ], batch size: 31, lr: 1.54e-02 +2024-08-06 15:30:04,430 INFO [trainer.py:765] (3/8) Epoch 5, batch 2300, train_loss[loss=3.504, NarTop10Accuracy=0.6246, over 5733.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.5903, over 6024.17 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 15:30:28,862 INFO [trainer.py:765] (3/8) Epoch 5, batch 2400, train_loss[loss=3.377, NarTop10Accuracy=0.6504, over 5673.00 frames. ], tot_loss[loss=3.646, NarTop10Accuracy=0.596, over 5762.78 frames. ], batch size: 8, lr: 1.53e-02 +2024-08-06 15:30:52,503 INFO [trainer.py:765] (3/8) Epoch 5, batch 2500, train_loss[loss=3.301, NarTop10Accuracy=0.6677, over 5748.00 frames. ], tot_loss[loss=3.606, NarTop10Accuracy=0.604, over 5470.41 frames. ], batch size: 8, lr: 1.52e-02 +2024-08-06 15:31:12,345 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 15:32:14,415 INFO [trainer.py:765] (3/8) Epoch 6, batch 100, train_loss[loss=3.536, NarTop10Accuracy=0.6138, over 7356.00 frames. ], tot_loss[loss=3.637, NarTop10Accuracy=0.597, over 2367.77 frames. ], batch size: 33, lr: 1.42e-02 +2024-08-06 15:32:46,015 INFO [trainer.py:765] (3/8) Epoch 6, batch 200, train_loss[loss=3.957, NarTop10Accuracy=0.529, over 6876.00 frames. ], tot_loss[loss=3.612, NarTop10Accuracy=0.6013, over 3859.88 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 15:33:21,242 INFO [trainer.py:765] (3/8) Epoch 6, batch 300, train_loss[loss=3.4, NarTop10Accuracy=0.6509, over 7227.00 frames. ], tot_loss[loss=3.606, NarTop10Accuracy=0.6029, over 4659.27 frames. ], batch size: 23, lr: 1.41e-02 +2024-08-06 15:33:56,035 INFO [trainer.py:765] (3/8) Epoch 6, batch 400, train_loss[loss=3.486, NarTop10Accuracy=0.6243, over 5118.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.6066, over 5117.02 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 15:34:26,759 INFO [trainer.py:765] (3/8) Epoch 6, batch 500, train_loss[loss=3.304, NarTop10Accuracy=0.6703, over 6087.00 frames. ], tot_loss[loss=3.577, NarTop10Accuracy=0.6098, over 5396.10 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 15:35:01,458 INFO [trainer.py:765] (3/8) Epoch 6, batch 600, train_loss[loss=3.256, NarTop10Accuracy=0.6787, over 5769.00 frames. ], tot_loss[loss=3.573, NarTop10Accuracy=0.6107, over 5653.44 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 15:35:32,734 INFO [trainer.py:765] (3/8) Epoch 6, batch 700, train_loss[loss=3.584, NarTop10Accuracy=0.6122, over 4965.00 frames. ], tot_loss[loss=3.579, NarTop10Accuracy=0.6095, over 5722.63 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 15:36:06,844 INFO [trainer.py:765] (3/8) Epoch 6, batch 800, train_loss[loss=3.733, NarTop10Accuracy=0.5703, over 5019.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.607, over 5783.36 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 15:36:40,384 INFO [trainer.py:765] (3/8) Epoch 6, batch 900, train_loss[loss=3.859, NarTop10Accuracy=0.5417, over 6630.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6089, over 5801.40 frames. ], batch size: 14, lr: 1.38e-02 +2024-08-06 15:37:15,272 INFO [trainer.py:765] (3/8) Epoch 6, batch 1000, train_loss[loss=3.258, NarTop10Accuracy=0.6759, over 6102.00 frames. ], tot_loss[loss=3.596, NarTop10Accuracy=0.6059, over 5892.71 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 15:37:50,508 INFO [trainer.py:765] (3/8) Epoch 6, batch 1100, train_loss[loss=3.389, NarTop10Accuracy=0.6497, over 6789.00 frames. ], tot_loss[loss=3.594, NarTop10Accuracy=0.6066, over 5935.83 frames. ], batch size: 17, lr: 1.38e-02 +2024-08-06 15:37:55,828 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 15:38:04,436 INFO [trainer.py:811] (3/8) Epoch 6, validation: loss=3.421, NarTop10Accuracy=0.6418, over 1905321.00 frames. +2024-08-06 15:38:04,437 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 15:38:04,965 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.809e+02 1.991e+02 2.234e+02 5.215e+02, threshold=3.983e+02, percent-clipped=0.5 +2024-08-06 15:38:36,168 INFO [trainer.py:765] (3/8) Epoch 6, batch 1200, train_loss[loss=3.366, NarTop10Accuracy=0.6539, over 7284.00 frames. ], tot_loss[loss=3.58, NarTop10Accuracy=0.6092, over 5938.35 frames. ], batch size: 31, lr: 1.37e-02 +2024-08-06 15:39:08,242 INFO [trainer.py:765] (3/8) Epoch 6, batch 1300, train_loss[loss=3.504, NarTop10Accuracy=0.6337, over 4341.00 frames. ], tot_loss[loss=3.58, NarTop10Accuracy=0.6094, over 5986.79 frames. ], batch size: 5, lr: 1.37e-02 +2024-08-06 15:39:44,069 INFO [trainer.py:765] (3/8) Epoch 6, batch 1400, train_loss[loss=3.375, NarTop10Accuracy=0.6461, over 6165.00 frames. ], tot_loss[loss=3.577, NarTop10Accuracy=0.6102, over 6012.38 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 15:40:15,383 INFO [trainer.py:765] (3/8) Epoch 6, batch 1500, train_loss[loss=3.958, NarTop10Accuracy=0.5224, over 6429.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.6102, over 5961.06 frames. ], batch size: 50, lr: 1.36e-02 +2024-08-06 15:40:43,105 INFO [trainer.py:765] (3/8) Epoch 6, batch 1600, train_loss[loss=3.437, NarTop10Accuracy=0.6469, over 7074.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6115, over 5930.66 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 15:41:09,788 INFO [trainer.py:765] (3/8) Epoch 6, batch 1700, train_loss[loss=3.457, NarTop10Accuracy=0.6316, over 6552.00 frames. ], tot_loss[loss=3.554, NarTop10Accuracy=0.6151, over 5918.90 frames. ], batch size: 14, lr: 1.35e-02 +2024-08-06 15:41:36,316 INFO [trainer.py:765] (3/8) Epoch 6, batch 1800, train_loss[loss=3.519, NarTop10Accuracy=0.632, over 7005.00 frames. ], tot_loss[loss=3.56, NarTop10Accuracy=0.6134, over 5993.80 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 15:42:02,720 INFO [trainer.py:765] (3/8) Epoch 6, batch 1900, train_loss[loss=3.82, NarTop10Accuracy=0.5688, over 6675.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6086, over 6031.57 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 15:42:28,318 INFO [trainer.py:765] (3/8) Epoch 6, batch 2000, train_loss[loss=3.498, NarTop10Accuracy=0.6231, over 6408.00 frames. ], tot_loss[loss=3.573, NarTop10Accuracy=0.6104, over 6010.00 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 15:42:53,668 INFO [trainer.py:765] (3/8) Epoch 6, batch 2100, train_loss[loss=3.3, NarTop10Accuracy=0.663, over 3879.00 frames. ], tot_loss[loss=3.565, NarTop10Accuracy=0.6121, over 5977.69 frames. ], batch size: 4, lr: 1.33e-02 +2024-08-06 15:43:18,977 INFO [trainer.py:765] (3/8) Epoch 6, batch 2200, train_loss[loss=3.829, NarTop10Accuracy=0.5523, over 7341.00 frames. ], tot_loss[loss=3.566, NarTop10Accuracy=0.6118, over 6001.22 frames. ], batch size: 31, lr: 1.33e-02 +2024-08-06 15:43:44,105 INFO [trainer.py:765] (3/8) Epoch 6, batch 2300, train_loss[loss=3.284, NarTop10Accuracy=0.6781, over 5808.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.6108, over 6010.58 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 15:44:08,620 INFO [trainer.py:765] (3/8) Epoch 6, batch 2400, train_loss[loss=3.299, NarTop10Accuracy=0.674, over 5241.00 frames. ], tot_loss[loss=3.536, NarTop10Accuracy=0.6177, over 5773.98 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:32,132 INFO [trainer.py:765] (3/8) Epoch 6, batch 2500, train_loss[loss=3.434, NarTop10Accuracy=0.6409, over 4986.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6203, over 5480.36 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:51,423 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 15:45:58,042 INFO [trainer.py:765] (3/8) Epoch 7, batch 100, train_loss[loss=3.315, NarTop10Accuracy=0.6608, over 7431.00 frames. ], tot_loss[loss=3.551, NarTop10Accuracy=0.6154, over 2365.13 frames. ], batch size: 32, lr: 1.24e-02 +2024-08-06 15:46:33,614 INFO [trainer.py:765] (3/8) Epoch 7, batch 200, train_loss[loss=3.407, NarTop10Accuracy=0.6404, over 6828.00 frames. ], tot_loss[loss=3.528, NarTop10Accuracy=0.6198, over 3850.47 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 15:47:03,246 INFO [trainer.py:765] (3/8) Epoch 7, batch 300, train_loss[loss=3.771, NarTop10Accuracy=0.5812, over 7197.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6181, over 4645.37 frames. ], batch size: 23, lr: 1.23e-02 +2024-08-06 15:47:34,495 INFO [trainer.py:765] (3/8) Epoch 7, batch 400, train_loss[loss=3.483, NarTop10Accuracy=0.6295, over 5718.00 frames. ], tot_loss[loss=3.529, NarTop10Accuracy=0.6196, over 5108.34 frames. ], batch size: 8, lr: 1.23e-02 +2024-08-06 15:48:13,730 INFO [trainer.py:765] (3/8) Epoch 7, batch 500, train_loss[loss=3.72, NarTop10Accuracy=0.5827, over 6204.00 frames. ], tot_loss[loss=3.523, NarTop10Accuracy=0.6207, over 5405.49 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 15:48:26,368 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 15:48:34,533 INFO [trainer.py:811] (3/8) Epoch 7, validation: loss=3.326, NarTop10Accuracy=0.6612, over 1905321.00 frames. +2024-08-06 15:48:34,534 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 15:48:35,078 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.860e+02 2.018e+02 2.241e+02 5.111e+02, threshold=4.035e+02, percent-clipped=0.3 +2024-08-06 15:48:52,720 INFO [trainer.py:765] (3/8) Epoch 7, batch 600, train_loss[loss=3.208, NarTop10Accuracy=0.6904, over 6204.00 frames. ], tot_loss[loss=3.532, NarTop10Accuracy=0.6187, over 5656.61 frames. ], batch size: 10, lr: 1.22e-02 +2024-08-06 15:49:24,912 INFO [trainer.py:765] (3/8) Epoch 7, batch 700, train_loss[loss=3.741, NarTop10Accuracy=0.5769, over 5136.00 frames. ], tot_loss[loss=3.521, NarTop10Accuracy=0.6208, over 5728.78 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 15:50:04,381 INFO [trainer.py:765] (3/8) Epoch 7, batch 800, train_loss[loss=3.253, NarTop10Accuracy=0.6882, over 5154.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.6246, over 5766.34 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 15:50:34,548 INFO [trainer.py:765] (3/8) Epoch 7, batch 900, train_loss[loss=3.365, NarTop10Accuracy=0.6578, over 6237.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.626, over 5777.16 frames. ], batch size: 13, lr: 1.21e-02 +2024-08-06 15:51:07,155 INFO [trainer.py:765] (3/8) Epoch 7, batch 1000, train_loss[loss=3.307, NarTop10Accuracy=0.6665, over 6249.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6275, over 5886.78 frames. ], batch size: 13, lr: 1.20e-02 +2024-08-06 15:51:51,758 INFO [trainer.py:765] (3/8) Epoch 7, batch 1100, train_loss[loss=3.299, NarTop10Accuracy=0.6701, over 6684.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6267, over 5936.77 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 15:52:22,699 INFO [trainer.py:765] (3/8) Epoch 7, batch 1200, train_loss[loss=3.256, NarTop10Accuracy=0.6797, over 7569.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6277, over 5937.92 frames. ], batch size: 32, lr: 1.20e-02 +2024-08-06 15:52:52,007 INFO [trainer.py:765] (3/8) Epoch 7, batch 1300, train_loss[loss=3.508, NarTop10Accuracy=0.6224, over 4986.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.627, over 5979.24 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 15:53:33,842 INFO [trainer.py:765] (3/8) Epoch 7, batch 1400, train_loss[loss=3.276, NarTop10Accuracy=0.6695, over 6024.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6262, over 6014.61 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 15:54:04,600 INFO [trainer.py:765] (3/8) Epoch 7, batch 1500, train_loss[loss=3.771, NarTop10Accuracy=0.575, over 6138.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6306, over 5947.98 frames. ], batch size: 51, lr: 1.19e-02 +2024-08-06 15:54:32,385 INFO [trainer.py:765] (3/8) Epoch 7, batch 1600, train_loss[loss=3.688, NarTop10Accuracy=0.5857, over 7206.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6299, over 5944.81 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 15:54:59,054 INFO [trainer.py:765] (3/8) Epoch 7, batch 1700, train_loss[loss=3.606, NarTop10Accuracy=0.6024, over 6666.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6266, over 5920.56 frames. ], batch size: 14, lr: 1.18e-02 +2024-08-06 15:55:25,512 INFO [trainer.py:765] (3/8) Epoch 7, batch 1800, train_loss[loss=3.79, NarTop10Accuracy=0.5572, over 6972.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6273, over 5974.65 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 15:55:52,082 INFO [trainer.py:765] (3/8) Epoch 7, batch 1900, train_loss[loss=3.301, NarTop10Accuracy=0.6757, over 5790.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.6242, over 6028.08 frames. ], batch size: 51, lr: 1.18e-02 +2024-08-06 15:56:17,591 INFO [trainer.py:765] (3/8) Epoch 7, batch 2000, train_loss[loss=3.768, NarTop10Accuracy=0.5735, over 5889.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.625, over 6002.24 frames. ], batch size: 51, lr: 1.17e-02 +2024-08-06 15:56:42,856 INFO [trainer.py:765] (3/8) Epoch 7, batch 2100, train_loss[loss=3.534, NarTop10Accuracy=0.6019, over 3990.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6279, over 5986.06 frames. ], batch size: 4, lr: 1.17e-02 +2024-08-06 15:57:08,079 INFO [trainer.py:765] (3/8) Epoch 7, batch 2200, train_loss[loss=3.468, NarTop10Accuracy=0.6323, over 7428.00 frames. ], tot_loss[loss=3.505, NarTop10Accuracy=0.6239, over 6022.60 frames. ], batch size: 32, lr: 1.17e-02 +2024-08-06 15:57:33,178 INFO [trainer.py:765] (3/8) Epoch 7, batch 2300, train_loss[loss=3.286, NarTop10Accuracy=0.6741, over 5652.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.6226, over 6024.86 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 15:57:57,619 INFO [trainer.py:765] (3/8) Epoch 7, batch 2400, train_loss[loss=3.182, NarTop10Accuracy=0.6873, over 5676.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6252, over 5777.56 frames. ], batch size: 8, lr: 1.16e-02 +2024-08-06 15:58:21,088 INFO [trainer.py:765] (3/8) Epoch 7, batch 2500, train_loss[loss=3.558, NarTop10Accuracy=0.6079, over 5214.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6307, over 5477.75 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 15:58:31,565 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 15:58:39,769 INFO [trainer.py:811] (3/8) Epoch 7, validation: loss=3.381, NarTop10Accuracy=0.6488, over 1905321.00 frames. +2024-08-06 15:58:39,770 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 15:58:40,220 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.831e+02 1.996e+02 2.207e+02 5.229e+02, threshold=3.992e+02, percent-clipped=0.2 +2024-08-06 15:58:48,981 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 15:59:52,877 INFO [trainer.py:765] (3/8) Epoch 8, batch 100, train_loss[loss=3.567, NarTop10Accuracy=0.6137, over 7770.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.635, over 2363.54 frames. ], batch size: 33, lr: 1.09e-02 +2024-08-06 16:00:27,881 INFO [trainer.py:765] (3/8) Epoch 8, batch 200, train_loss[loss=3.252, NarTop10Accuracy=0.6774, over 6963.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6299, over 3851.65 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 16:00:58,563 INFO [trainer.py:765] (3/8) Epoch 8, batch 300, train_loss[loss=3.289, NarTop10Accuracy=0.6712, over 7062.00 frames. ], tot_loss[loss=3.474, NarTop10Accuracy=0.6302, over 4670.11 frames. ], batch size: 22, lr: 1.08e-02 +2024-08-06 16:01:29,760 INFO [trainer.py:765] (3/8) Epoch 8, batch 400, train_loss[loss=3.743, NarTop10Accuracy=0.5739, over 5013.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.63, over 5107.33 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 16:02:04,066 INFO [trainer.py:765] (3/8) Epoch 8, batch 500, train_loss[loss=3.648, NarTop10Accuracy=0.5928, over 6147.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6345, over 5402.94 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 16:02:41,836 INFO [trainer.py:765] (3/8) Epoch 8, batch 600, train_loss[loss=3.08, NarTop10Accuracy=0.7081, over 5673.00 frames. ], tot_loss[loss=3.465, NarTop10Accuracy=0.6316, over 5655.59 frames. ], batch size: 9, lr: 1.08e-02 +2024-08-06 16:03:11,501 INFO [trainer.py:765] (3/8) Epoch 8, batch 700, train_loss[loss=3.725, NarTop10Accuracy=0.576, over 5100.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6308, over 5726.17 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 16:03:50,084 INFO [trainer.py:765] (3/8) Epoch 8, batch 800, train_loss[loss=3.509, NarTop10Accuracy=0.6257, over 5061.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6326, over 5779.25 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 16:04:27,588 INFO [trainer.py:765] (3/8) Epoch 8, batch 900, train_loss[loss=3.082, NarTop10Accuracy=0.7136, over 6156.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6368, over 5796.42 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 16:04:57,466 INFO [trainer.py:765] (3/8) Epoch 8, batch 1000, train_loss[loss=3.608, NarTop10Accuracy=0.6034, over 6129.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6373, over 5914.58 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 16:05:37,294 INFO [trainer.py:765] (3/8) Epoch 8, batch 1100, train_loss[loss=3.664, NarTop10Accuracy=0.5854, over 6783.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6369, over 5934.10 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 16:06:15,859 INFO [trainer.py:765] (3/8) Epoch 8, batch 1200, train_loss[loss=3.432, NarTop10Accuracy=0.6429, over 7140.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6349, over 5924.61 frames. ], batch size: 31, lr: 1.06e-02 +2024-08-06 16:06:45,187 INFO [trainer.py:765] (3/8) Epoch 8, batch 1300, train_loss[loss=3.126, NarTop10Accuracy=0.7107, over 4215.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6391, over 5981.40 frames. ], batch size: 5, lr: 1.06e-02 +2024-08-06 16:07:24,236 INFO [trainer.py:765] (3/8) Epoch 8, batch 1400, train_loss[loss=3.508, NarTop10Accuracy=0.6232, over 5988.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6391, over 6015.17 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 16:07:52,169 INFO [trainer.py:765] (3/8) Epoch 8, batch 1500, train_loss[loss=3.42, NarTop10Accuracy=0.6447, over 6381.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.642, over 5951.09 frames. ], batch size: 50, lr: 1.05e-02 +2024-08-06 16:08:19,948 INFO [trainer.py:765] (3/8) Epoch 8, batch 1600, train_loss[loss=3.144, NarTop10Accuracy=0.7015, over 7098.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.642, over 5932.41 frames. ], batch size: 22, lr: 1.05e-02 +2024-08-06 16:08:46,618 INFO [trainer.py:765] (3/8) Epoch 8, batch 1700, train_loss[loss=3.358, NarTop10Accuracy=0.6517, over 6687.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6412, over 5908.61 frames. ], batch size: 14, lr: 1.05e-02 +2024-08-06 16:09:13,106 INFO [trainer.py:765] (3/8) Epoch 8, batch 1800, train_loss[loss=3.264, NarTop10Accuracy=0.6768, over 7104.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6423, over 5971.38 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 16:09:39,636 INFO [trainer.py:765] (3/8) Epoch 8, batch 1900, train_loss[loss=3.744, NarTop10Accuracy=0.5803, over 6087.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6436, over 6021.73 frames. ], batch size: 51, lr: 1.04e-02 +2024-08-06 16:09:56,940 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 16:10:04,970 INFO [trainer.py:811] (3/8) Epoch 8, validation: loss=3.282, NarTop10Accuracy=0.6699, over 1905321.00 frames. +2024-08-06 16:10:04,970 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 16:10:05,470 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.814e+02 1.981e+02 2.158e+02 5.862e+02, threshold=3.962e+02, percent-clipped=0.1 +2024-08-06 16:10:13,204 INFO [trainer.py:765] (3/8) Epoch 8, batch 2000, train_loss[loss=3.948, NarTop10Accuracy=0.526, over 6420.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6413, over 5999.04 frames. ], batch size: 50, lr: 1.04e-02 +2024-08-06 16:10:38,514 INFO [trainer.py:765] (3/8) Epoch 8, batch 2100, train_loss[loss=3.545, NarTop10Accuracy=0.6193, over 3855.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6429, over 5991.74 frames. ], batch size: 4, lr: 1.04e-02 +2024-08-06 16:11:03,747 INFO [trainer.py:765] (3/8) Epoch 8, batch 2200, train_loss[loss=3.536, NarTop10Accuracy=0.619, over 7182.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6405, over 6017.82 frames. ], batch size: 32, lr: 1.04e-02 +2024-08-06 16:11:28,904 INFO [trainer.py:765] (3/8) Epoch 8, batch 2300, train_loss[loss=3.782, NarTop10Accuracy=0.5616, over 5787.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6364, over 6026.79 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 16:11:53,093 INFO [trainer.py:765] (3/8) Epoch 8, batch 2400, train_loss[loss=3.423, NarTop10Accuracy=0.6344, over 5136.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6397, over 5782.30 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 16:12:16,444 INFO [trainer.py:765] (3/8) Epoch 8, batch 2500, train_loss[loss=3.182, NarTop10Accuracy=0.6926, over 5040.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.643, over 5474.87 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 16:12:36,672 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 16:13:37,514 INFO [trainer.py:765] (3/8) Epoch 9, batch 100, train_loss[loss=3.294, NarTop10Accuracy=0.6718, over 7149.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6546, over 2389.95 frames. ], batch size: 31, lr: 9.72e-03 +2024-08-06 16:14:14,440 INFO [trainer.py:765] (3/8) Epoch 9, batch 200, train_loss[loss=3.656, NarTop10Accuracy=0.5967, over 6867.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.653, over 3883.63 frames. ], batch size: 17, lr: 9.70e-03 +2024-08-06 16:14:44,507 INFO [trainer.py:765] (3/8) Epoch 9, batch 300, train_loss[loss=3.353, NarTop10Accuracy=0.6584, over 7422.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.65, over 4668.78 frames. ], batch size: 22, lr: 9.68e-03 +2024-08-06 16:15:14,914 INFO [trainer.py:765] (3/8) Epoch 9, batch 400, train_loss[loss=3.129, NarTop10Accuracy=0.7043, over 5718.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.653, over 5112.91 frames. ], batch size: 8, lr: 9.65e-03 +2024-08-06 16:15:50,336 INFO [trainer.py:765] (3/8) Epoch 9, batch 500, train_loss[loss=3.215, NarTop10Accuracy=0.6947, over 6189.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6559, over 5397.23 frames. ], batch size: 11, lr: 9.63e-03 +2024-08-06 16:16:23,972 INFO [trainer.py:765] (3/8) Epoch 9, batch 600, train_loss[loss=3.623, NarTop10Accuracy=0.6005, over 5637.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.658, over 5664.19 frames. ], batch size: 9, lr: 9.61e-03 +2024-08-06 16:16:57,145 INFO [trainer.py:765] (3/8) Epoch 9, batch 700, train_loss[loss=3.085, NarTop10Accuracy=0.705, over 5130.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6549, over 5731.10 frames. ], batch size: 6, lr: 9.59e-03 +2024-08-06 16:17:32,052 INFO [trainer.py:765] (3/8) Epoch 9, batch 800, train_loss[loss=3.229, NarTop10Accuracy=0.6825, over 4290.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.648, over 5792.51 frames. ], batch size: 5, lr: 9.57e-03 +2024-08-06 16:18:07,815 INFO [trainer.py:765] (3/8) Epoch 9, batch 900, train_loss[loss=3.078, NarTop10Accuracy=0.7175, over 6189.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6491, over 5797.15 frames. ], batch size: 13, lr: 9.55e-03 +2024-08-06 16:18:39,344 INFO [trainer.py:765] (3/8) Epoch 9, batch 1000, train_loss[loss=3.13, NarTop10Accuracy=0.6868, over 6264.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6462, over 5890.54 frames. ], batch size: 13, lr: 9.53e-03 +2024-08-06 16:19:15,382 INFO [trainer.py:765] (3/8) Epoch 9, batch 1100, train_loss[loss=3.408, NarTop10Accuracy=0.6392, over 6876.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6456, over 5922.72 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 16:19:53,877 INFO [trainer.py:765] (3/8) Epoch 9, batch 1200, train_loss[loss=3.882, NarTop10Accuracy=0.5438, over 7029.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6448, over 5918.43 frames. ], batch size: 31, lr: 9.48e-03 +2024-08-06 16:20:24,906 INFO [trainer.py:765] (3/8) Epoch 9, batch 1300, train_loss[loss=3.192, NarTop10Accuracy=0.6912, over 5226.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6458, over 5989.98 frames. ], batch size: 6, lr: 9.46e-03 +2024-08-06 16:20:56,578 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 16:21:04,483 INFO [trainer.py:811] (3/8) Epoch 9, validation: loss=3.266, NarTop10Accuracy=0.6725, over 1905321.00 frames. +2024-08-06 16:21:04,484 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 16:21:05,035 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 1.808e+02 1.967e+02 2.142e+02 6.126e+02, threshold=3.935e+02, percent-clipped=0.5 +2024-08-06 16:21:06,690 INFO [trainer.py:765] (3/8) Epoch 9, batch 1400, train_loss[loss=3.451, NarTop10Accuracy=0.6322, over 6039.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6441, over 6021.73 frames. ], batch size: 11, lr: 9.44e-03 +2024-08-06 16:21:38,895 INFO [trainer.py:765] (3/8) Epoch 9, batch 1500, train_loss[loss=3.439, NarTop10Accuracy=0.6385, over 6000.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6486, over 5961.47 frames. ], batch size: 51, lr: 9.42e-03 +2024-08-06 16:22:06,720 INFO [trainer.py:765] (3/8) Epoch 9, batch 1600, train_loss[loss=3.342, NarTop10Accuracy=0.6617, over 7095.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6509, over 5951.18 frames. ], batch size: 22, lr: 9.40e-03 +2024-08-06 16:22:33,470 INFO [trainer.py:765] (3/8) Epoch 9, batch 1700, train_loss[loss=3.529, NarTop10Accuracy=0.6182, over 6498.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6477, over 5929.96 frames. ], batch size: 14, lr: 9.38e-03 +2024-08-06 16:23:00,062 INFO [trainer.py:765] (3/8) Epoch 9, batch 1800, train_loss[loss=3.309, NarTop10Accuracy=0.6747, over 7266.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6493, over 5986.67 frames. ], batch size: 23, lr: 9.36e-03 +2024-08-06 16:23:26,783 INFO [trainer.py:765] (3/8) Epoch 9, batch 1900, train_loss[loss=3.439, NarTop10Accuracy=0.6415, over 6267.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6472, over 6024.04 frames. ], batch size: 50, lr: 9.34e-03 +2024-08-06 16:23:52,485 INFO [trainer.py:765] (3/8) Epoch 9, batch 2000, train_loss[loss=3.87, NarTop10Accuracy=0.5464, over 6993.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6487, over 6001.50 frames. ], batch size: 52, lr: 9.32e-03 +2024-08-06 16:24:17,962 INFO [trainer.py:765] (3/8) Epoch 9, batch 2100, train_loss[loss=3.228, NarTop10Accuracy=0.6802, over 4887.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6485, over 5986.43 frames. ], batch size: 5, lr: 9.30e-03 +2024-08-06 16:24:43,421 INFO [trainer.py:765] (3/8) Epoch 9, batch 2200, train_loss[loss=3.689, NarTop10Accuracy=0.5827, over 7605.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.647, over 6020.48 frames. ], batch size: 32, lr: 9.28e-03 +2024-08-06 16:25:08,721 INFO [trainer.py:765] (3/8) Epoch 9, batch 2300, train_loss[loss=3.202, NarTop10Accuracy=0.6892, over 5685.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6439, over 6032.70 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 16:25:33,162 INFO [trainer.py:765] (3/8) Epoch 9, batch 2400, train_loss[loss=3.292, NarTop10Accuracy=0.667, over 4974.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6441, over 5765.77 frames. ], batch size: 7, lr: 9.25e-03 +2024-08-06 16:25:56,767 INFO [trainer.py:765] (3/8) Epoch 9, batch 2500, train_loss[loss=3.39, NarTop10Accuracy=0.6469, over 5277.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6498, over 5465.34 frames. ], batch size: 7, lr: 9.23e-03 +2024-08-06 16:26:16,370 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 16:27:19,584 INFO [trainer.py:765] (3/8) Epoch 10, batch 100, train_loss[loss=3.288, NarTop10Accuracy=0.6686, over 7152.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6508, over 2349.70 frames. ], batch size: 31, lr: 8.76e-03 +2024-08-06 16:27:52,628 INFO [trainer.py:765] (3/8) Epoch 10, batch 200, train_loss[loss=3.005, NarTop10Accuracy=0.7301, over 6789.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6537, over 3846.63 frames. ], batch size: 17, lr: 8.74e-03 +2024-08-06 16:28:23,057 INFO [trainer.py:765] (3/8) Epoch 10, batch 300, train_loss[loss=3.058, NarTop10Accuracy=0.7225, over 7008.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.655, over 4662.18 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 16:28:59,200 INFO [trainer.py:765] (3/8) Epoch 10, batch 400, train_loss[loss=3.357, NarTop10Accuracy=0.6599, over 5091.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6573, over 5118.78 frames. ], batch size: 7, lr: 8.71e-03 +2024-08-06 16:29:29,218 INFO [trainer.py:765] (3/8) Epoch 10, batch 500, train_loss[loss=3.063, NarTop10Accuracy=0.7125, over 6165.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6581, over 5390.58 frames. ], batch size: 11, lr: 8.69e-03 +2024-08-06 16:30:02,765 INFO [trainer.py:765] (3/8) Epoch 10, batch 600, train_loss[loss=3.334, NarTop10Accuracy=0.6556, over 5745.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6574, over 5635.93 frames. ], batch size: 9, lr: 8.67e-03 +2024-08-06 16:30:34,265 INFO [trainer.py:765] (3/8) Epoch 10, batch 700, train_loss[loss=3.246, NarTop10Accuracy=0.6632, over 5094.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6564, over 5721.05 frames. ], batch size: 6, lr: 8.65e-03 +2024-08-06 16:31:09,843 INFO [trainer.py:765] (3/8) Epoch 10, batch 800, train_loss[loss=3.632, NarTop10Accuracy=0.5945, over 4368.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6548, over 5770.70 frames. ], batch size: 5, lr: 8.64e-03 +2024-08-06 16:31:16,258 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 16:31:24,565 INFO [trainer.py:811] (3/8) Epoch 10, validation: loss=3.184, NarTop10Accuracy=0.6898, over 1905321.00 frames. +2024-08-06 16:31:24,566 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 16:31:25,154 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.851e+02 2.012e+02 2.196e+02 4.599e+02, threshold=4.024e+02, percent-clipped=0.1 +2024-08-06 16:31:50,345 INFO [trainer.py:765] (3/8) Epoch 10, batch 900, train_loss[loss=3.264, NarTop10Accuracy=0.6671, over 6804.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6598, over 5810.36 frames. ], batch size: 14, lr: 8.62e-03 +2024-08-06 16:32:28,588 INFO [trainer.py:765] (3/8) Epoch 10, batch 1000, train_loss[loss=3.186, NarTop10Accuracy=0.6922, over 6147.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6584, over 5905.94 frames. ], batch size: 13, lr: 8.60e-03 +2024-08-06 16:33:06,376 INFO [trainer.py:765] (3/8) Epoch 10, batch 1100, train_loss[loss=3.144, NarTop10Accuracy=0.7023, over 6738.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.656, over 5930.28 frames. ], batch size: 17, lr: 8.59e-03 +2024-08-06 16:33:40,960 INFO [trainer.py:765] (3/8) Epoch 10, batch 1200, train_loss[loss=3.231, NarTop10Accuracy=0.6823, over 7398.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6584, over 5931.91 frames. ], batch size: 31, lr: 8.57e-03 +2024-08-06 16:34:16,169 INFO [trainer.py:765] (3/8) Epoch 10, batch 1300, train_loss[loss=3.369, NarTop10Accuracy=0.6495, over 5019.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6584, over 5998.08 frames. ], batch size: 6, lr: 8.55e-03 +2024-08-06 16:34:51,200 INFO [trainer.py:765] (3/8) Epoch 10, batch 1400, train_loss[loss=3.33, NarTop10Accuracy=0.6541, over 6105.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6531, over 6005.02 frames. ], batch size: 11, lr: 8.54e-03 +2024-08-06 16:35:22,159 INFO [trainer.py:765] (3/8) Epoch 10, batch 1500, train_loss[loss=3.588, NarTop10Accuracy=0.6113, over 5946.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6575, over 5936.48 frames. ], batch size: 50, lr: 8.52e-03 +2024-08-06 16:35:50,136 INFO [trainer.py:765] (3/8) Epoch 10, batch 1600, train_loss[loss=3.679, NarTop10Accuracy=0.5875, over 6738.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6597, over 5927.21 frames. ], batch size: 22, lr: 8.50e-03 +2024-08-06 16:36:16,976 INFO [trainer.py:765] (3/8) Epoch 10, batch 1700, train_loss[loss=3.404, NarTop10Accuracy=0.6423, over 6573.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6573, over 5933.10 frames. ], batch size: 14, lr: 8.49e-03 +2024-08-06 16:36:43,647 INFO [trainer.py:765] (3/8) Epoch 10, batch 1800, train_loss[loss=3.232, NarTop10Accuracy=0.6694, over 7050.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6594, over 6004.90 frames. ], batch size: 22, lr: 8.47e-03 +2024-08-06 16:37:10,290 INFO [trainer.py:765] (3/8) Epoch 10, batch 1900, train_loss[loss=3.289, NarTop10Accuracy=0.6802, over 6636.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6589, over 6055.32 frames. ], batch size: 51, lr: 8.45e-03 +2024-08-06 16:37:36,089 INFO [trainer.py:765] (3/8) Epoch 10, batch 2000, train_loss[loss=3.304, NarTop10Accuracy=0.6765, over 6183.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6606, over 6019.86 frames. ], batch size: 50, lr: 8.44e-03 +2024-08-06 16:38:01,650 INFO [trainer.py:765] (3/8) Epoch 10, batch 2100, train_loss[loss=3.473, NarTop10Accuracy=0.6356, over 3828.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6575, over 5977.12 frames. ], batch size: 4, lr: 8.42e-03 +2024-08-06 16:38:27,120 INFO [trainer.py:765] (3/8) Epoch 10, batch 2200, train_loss[loss=3.809, NarTop10Accuracy=0.5423, over 7014.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6564, over 6023.36 frames. ], batch size: 31, lr: 8.41e-03 +2024-08-06 16:38:52,447 INFO [trainer.py:765] (3/8) Epoch 10, batch 2300, train_loss[loss=3.079, NarTop10Accuracy=0.7077, over 5775.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6559, over 6042.92 frames. ], batch size: 9, lr: 8.39e-03 +2024-08-06 16:39:17,005 INFO [trainer.py:765] (3/8) Epoch 10, batch 2400, train_loss[loss=3.116, NarTop10Accuracy=0.7013, over 5094.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6613, over 5789.73 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 16:39:40,801 INFO [trainer.py:765] (3/8) Epoch 10, batch 2500, train_loss[loss=3.724, NarTop10Accuracy=0.5749, over 5106.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6662, over 5505.50 frames. ], batch size: 7, lr: 8.36e-03 +2024-08-06 16:40:00,807 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 16:41:06,235 INFO [trainer.py:765] (3/8) Epoch 11, batch 100, train_loss[loss=3.664, NarTop10Accuracy=0.5944, over 7383.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6562, over 2358.08 frames. ], batch size: 31, lr: 7.97e-03 +2024-08-06 16:41:39,021 INFO [trainer.py:765] (3/8) Epoch 11, batch 200, train_loss[loss=3.59, NarTop10Accuracy=0.6029, over 6864.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6601, over 3866.81 frames. ], batch size: 17, lr: 7.95e-03 +2024-08-06 16:41:53,191 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 16:42:01,355 INFO [trainer.py:811] (3/8) Epoch 11, validation: loss=3.116, NarTop10Accuracy=0.7034, over 1905321.00 frames. +2024-08-06 16:42:01,356 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 16:42:01,879 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 1.889e+02 2.046e+02 2.249e+02 5.417e+02, threshold=4.093e+02, percent-clipped=0.2 +2024-08-06 16:42:17,976 INFO [trainer.py:765] (3/8) Epoch 11, batch 300, train_loss[loss=3.076, NarTop10Accuracy=0.7129, over 7023.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6664, over 4652.14 frames. ], batch size: 22, lr: 7.94e-03 +2024-08-06 16:42:55,155 INFO [trainer.py:765] (3/8) Epoch 11, batch 400, train_loss[loss=3.334, NarTop10Accuracy=0.6619, over 5196.00 frames. ], tot_loss[loss=3.288, NarTop10Accuracy=0.6684, over 5096.07 frames. ], batch size: 7, lr: 7.92e-03 +2024-08-06 16:43:25,719 INFO [trainer.py:765] (3/8) Epoch 11, batch 500, train_loss[loss=3.017, NarTop10Accuracy=0.7247, over 6171.00 frames. ], tot_loss[loss=3.284, NarTop10Accuracy=0.6694, over 5383.79 frames. ], batch size: 11, lr: 7.91e-03 +2024-08-06 16:44:02,241 INFO [trainer.py:765] (3/8) Epoch 11, batch 600, train_loss[loss=3.476, NarTop10Accuracy=0.6294, over 5679.00 frames. ], tot_loss[loss=3.296, NarTop10Accuracy=0.6669, over 5648.76 frames. ], batch size: 9, lr: 7.89e-03 +2024-08-06 16:44:35,716 INFO [trainer.py:765] (3/8) Epoch 11, batch 700, train_loss[loss=3.562, NarTop10Accuracy=0.6113, over 4305.00 frames. ], tot_loss[loss=3.289, NarTop10Accuracy=0.6683, over 5728.43 frames. ], batch size: 5, lr: 7.88e-03 +2024-08-06 16:45:10,468 INFO [trainer.py:765] (3/8) Epoch 11, batch 800, train_loss[loss=3.067, NarTop10Accuracy=0.7145, over 4260.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6641, over 5783.97 frames. ], batch size: 5, lr: 7.86e-03 +2024-08-06 16:45:46,458 INFO [trainer.py:765] (3/8) Epoch 11, batch 900, train_loss[loss=3.758, NarTop10Accuracy=0.5715, over 6693.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6637, over 5806.25 frames. ], batch size: 14, lr: 7.85e-03 +2024-08-06 16:46:20,311 INFO [trainer.py:765] (3/8) Epoch 11, batch 1000, train_loss[loss=3.342, NarTop10Accuracy=0.6481, over 6624.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6638, over 5901.06 frames. ], batch size: 14, lr: 7.84e-03 +2024-08-06 16:46:53,457 INFO [trainer.py:765] (3/8) Epoch 11, batch 1100, train_loss[loss=2.962, NarTop10Accuracy=0.7335, over 6786.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6653, over 5961.03 frames. ], batch size: 17, lr: 7.82e-03 +2024-08-06 16:47:33,030 INFO [trainer.py:765] (3/8) Epoch 11, batch 1200, train_loss[loss=3.477, NarTop10Accuracy=0.6288, over 7095.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6641, over 5955.19 frames. ], batch size: 31, lr: 7.81e-03 +2024-08-06 16:48:06,482 INFO [trainer.py:765] (3/8) Epoch 11, batch 1300, train_loss[loss=2.947, NarTop10Accuracy=0.7535, over 5058.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6637, over 6003.43 frames. ], batch size: 6, lr: 7.79e-03 +2024-08-06 16:48:41,355 INFO [trainer.py:765] (3/8) Epoch 11, batch 1400, train_loss[loss=3.523, NarTop10Accuracy=0.6217, over 6012.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.66, over 6012.38 frames. ], batch size: 11, lr: 7.78e-03 +2024-08-06 16:49:09,345 INFO [trainer.py:765] (3/8) Epoch 11, batch 1500, train_loss[loss=3.302, NarTop10Accuracy=0.6732, over 5823.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6587, over 5955.35 frames. ], batch size: 51, lr: 7.77e-03 +2024-08-06 16:49:37,103 INFO [trainer.py:765] (3/8) Epoch 11, batch 1600, train_loss[loss=3.21, NarTop10Accuracy=0.6852, over 7062.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6627, over 5937.41 frames. ], batch size: 22, lr: 7.75e-03 +2024-08-06 16:50:03,792 INFO [trainer.py:765] (3/8) Epoch 11, batch 1700, train_loss[loss=3.441, NarTop10Accuracy=0.635, over 6600.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6648, over 5902.46 frames. ], batch size: 14, lr: 7.74e-03 +2024-08-06 16:50:30,354 INFO [trainer.py:765] (3/8) Epoch 11, batch 1800, train_loss[loss=3.532, NarTop10Accuracy=0.6197, over 6996.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6609, over 5984.99 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 16:50:56,822 INFO [trainer.py:765] (3/8) Epoch 11, batch 1900, train_loss[loss=3.781, NarTop10Accuracy=0.564, over 5646.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6599, over 6015.26 frames. ], batch size: 50, lr: 7.71e-03 +2024-08-06 16:51:22,405 INFO [trainer.py:765] (3/8) Epoch 11, batch 2000, train_loss[loss=3.86, NarTop10Accuracy=0.5464, over 6060.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.663, over 5998.13 frames. ], batch size: 50, lr: 7.70e-03 +2024-08-06 16:51:47,794 INFO [trainer.py:765] (3/8) Epoch 11, batch 2100, train_loss[loss=3.078, NarTop10Accuracy=0.7182, over 4812.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6641, over 5984.94 frames. ], batch size: 5, lr: 7.68e-03 +2024-08-06 16:52:13,118 INFO [trainer.py:765] (3/8) Epoch 11, batch 2200, train_loss[loss=3.344, NarTop10Accuracy=0.6573, over 7371.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6642, over 6017.26 frames. ], batch size: 31, lr: 7.67e-03 +2024-08-06 16:52:23,899 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 16:52:32,079 INFO [trainer.py:811] (3/8) Epoch 11, validation: loss=3.101, NarTop10Accuracy=0.7058, over 1905321.00 frames. +2024-08-06 16:52:32,080 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 16:52:32,593 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.920e+02 2.088e+02 2.244e+02 3.599e+02, threshold=4.177e+02, percent-clipped=0.0 +2024-08-06 16:52:46,444 INFO [trainer.py:765] (3/8) Epoch 11, batch 2300, train_loss[loss=3.14, NarTop10Accuracy=0.702, over 5553.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6627, over 6013.65 frames. ], batch size: 9, lr: 7.66e-03 +2024-08-06 16:53:10,887 INFO [trainer.py:765] (3/8) Epoch 11, batch 2400, train_loss[loss=3.594, NarTop10Accuracy=0.6113, over 5754.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6643, over 5782.15 frames. ], batch size: 8, lr: 7.64e-03 +2024-08-06 16:53:34,371 INFO [trainer.py:765] (3/8) Epoch 11, batch 2500, train_loss[loss=3.523, NarTop10Accuracy=0.616, over 5169.00 frames. ], tot_loss[loss=3.296, NarTop10Accuracy=0.666, over 5483.88 frames. ], batch size: 7, lr: 7.63e-03 +2024-08-06 16:53:54,339 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 16:54:58,525 INFO [trainer.py:765] (3/8) Epoch 12, batch 100, train_loss[loss=3.708, NarTop10Accuracy=0.5796, over 6987.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6649, over 2363.90 frames. ], batch size: 31, lr: 7.30e-03 +2024-08-06 16:55:32,432 INFO [trainer.py:765] (3/8) Epoch 12, batch 200, train_loss[loss=3.225, NarTop10Accuracy=0.6877, over 6783.00 frames. ], tot_loss[loss=3.278, NarTop10Accuracy=0.6704, over 3852.95 frames. ], batch size: 17, lr: 7.29e-03 +2024-08-06 16:56:05,096 INFO [trainer.py:765] (3/8) Epoch 12, batch 300, train_loss[loss=3.034, NarTop10Accuracy=0.7167, over 7185.00 frames. ], tot_loss[loss=3.249, NarTop10Accuracy=0.6765, over 4648.02 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 16:56:36,426 INFO [trainer.py:765] (3/8) Epoch 12, batch 400, train_loss[loss=3.062, NarTop10Accuracy=0.716, over 5103.00 frames. ], tot_loss[loss=3.259, NarTop10Accuracy=0.6743, over 5121.65 frames. ], batch size: 7, lr: 7.26e-03 +2024-08-06 16:57:10,503 INFO [trainer.py:765] (3/8) Epoch 12, batch 500, train_loss[loss=3.548, NarTop10Accuracy=0.6021, over 6144.00 frames. ], tot_loss[loss=3.263, NarTop10Accuracy=0.6728, over 5385.65 frames. ], batch size: 11, lr: 7.25e-03 +2024-08-06 16:57:45,483 INFO [trainer.py:765] (3/8) Epoch 12, batch 600, train_loss[loss=2.931, NarTop10Accuracy=0.738, over 5598.00 frames. ], tot_loss[loss=3.264, NarTop10Accuracy=0.6732, over 5655.20 frames. ], batch size: 9, lr: 7.24e-03 +2024-08-06 16:58:17,005 INFO [trainer.py:765] (3/8) Epoch 12, batch 700, train_loss[loss=3.682, NarTop10Accuracy=0.5821, over 4365.00 frames. ], tot_loss[loss=3.282, NarTop10Accuracy=0.6695, over 5707.25 frames. ], batch size: 5, lr: 7.22e-03 +2024-08-06 16:58:53,469 INFO [trainer.py:765] (3/8) Epoch 12, batch 800, train_loss[loss=3.279, NarTop10Accuracy=0.6683, over 5058.00 frames. ], tot_loss[loss=3.29, NarTop10Accuracy=0.6678, over 5768.66 frames. ], batch size: 6, lr: 7.21e-03 +2024-08-06 16:59:27,206 INFO [trainer.py:765] (3/8) Epoch 12, batch 900, train_loss[loss=3.068, NarTop10Accuracy=0.7167, over 6129.00 frames. ], tot_loss[loss=3.274, NarTop10Accuracy=0.6714, over 5801.80 frames. ], batch size: 13, lr: 7.20e-03 +2024-08-06 17:00:01,574 INFO [trainer.py:765] (3/8) Epoch 12, batch 1000, train_loss[loss=3.075, NarTop10Accuracy=0.7164, over 6243.00 frames. ], tot_loss[loss=3.29, NarTop10Accuracy=0.668, over 5912.82 frames. ], batch size: 13, lr: 7.19e-03 +2024-08-06 17:00:39,189 INFO [trainer.py:765] (3/8) Epoch 12, batch 1100, train_loss[loss=3.626, NarTop10Accuracy=0.5937, over 6777.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6647, over 5933.28 frames. ], batch size: 17, lr: 7.18e-03 +2024-08-06 17:01:13,963 INFO [trainer.py:765] (3/8) Epoch 12, batch 1200, train_loss[loss=3.017, NarTop10Accuracy=0.7267, over 7407.00 frames. ], tot_loss[loss=3.268, NarTop10Accuracy=0.6724, over 5921.54 frames. ], batch size: 31, lr: 7.17e-03 +2024-08-06 17:01:48,107 INFO [trainer.py:765] (3/8) Epoch 12, batch 1300, train_loss[loss=3.198, NarTop10Accuracy=0.6873, over 5073.00 frames. ], tot_loss[loss=3.278, NarTop10Accuracy=0.6705, over 5993.79 frames. ], batch size: 6, lr: 7.15e-03 +2024-08-06 17:02:22,323 INFO [trainer.py:765] (3/8) Epoch 12, batch 1400, train_loss[loss=3.591, NarTop10Accuracy=0.6051, over 6042.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.6673, over 6014.00 frames. ], batch size: 11, lr: 7.14e-03 +2024-08-06 17:02:52,877 INFO [trainer.py:765] (3/8) Epoch 12, batch 1500, train_loss[loss=3.438, NarTop10Accuracy=0.6484, over 5703.00 frames. ], tot_loss[loss=3.272, NarTop10Accuracy=0.6716, over 5953.85 frames. ], batch size: 51, lr: 7.13e-03 +2024-08-06 17:03:20,691 INFO [trainer.py:765] (3/8) Epoch 12, batch 1600, train_loss[loss=3.311, NarTop10Accuracy=0.659, over 7089.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6686, over 5929.92 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 17:03:38,296 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 17:03:46,473 INFO [trainer.py:811] (3/8) Epoch 12, validation: loss=3.054, NarTop10Accuracy=0.7153, over 1905321.00 frames. +2024-08-06 17:03:46,474 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 17:03:46,987 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.899e+02 2.078e+02 2.276e+02 5.455e+02, threshold=4.157e+02, percent-clipped=0.1 +2024-08-06 17:03:55,601 INFO [trainer.py:765] (3/8) Epoch 12, batch 1700, train_loss[loss=3.426, NarTop10Accuracy=0.6359, over 6279.00 frames. ], tot_loss[loss=3.289, NarTop10Accuracy=0.668, over 5914.92 frames. ], batch size: 13, lr: 7.11e-03 +2024-08-06 17:04:22,119 INFO [trainer.py:765] (3/8) Epoch 12, batch 1800, train_loss[loss=3.61, NarTop10Accuracy=0.5964, over 7059.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6679, over 5970.26 frames. ], batch size: 22, lr: 7.10e-03 +2024-08-06 17:04:48,589 INFO [trainer.py:765] (3/8) Epoch 12, batch 1900, train_loss[loss=3.306, NarTop10Accuracy=0.6728, over 5979.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.6697, over 6013.03 frames. ], batch size: 50, lr: 7.08e-03 +2024-08-06 17:05:14,196 INFO [trainer.py:765] (3/8) Epoch 12, batch 2000, train_loss[loss=3.531, NarTop10Accuracy=0.6126, over 6291.00 frames. ], tot_loss[loss=3.271, NarTop10Accuracy=0.6718, over 6000.16 frames. ], batch size: 51, lr: 7.07e-03 +2024-08-06 17:05:39,466 INFO [trainer.py:765] (3/8) Epoch 12, batch 2100, train_loss[loss=3.351, NarTop10Accuracy=0.654, over 3981.00 frames. ], tot_loss[loss=3.274, NarTop10Accuracy=0.671, over 5975.26 frames. ], batch size: 4, lr: 7.06e-03 +2024-08-06 17:06:04,689 INFO [trainer.py:765] (3/8) Epoch 12, batch 2200, train_loss[loss=3.38, NarTop10Accuracy=0.6464, over 7383.00 frames. ], tot_loss[loss=3.29, NarTop10Accuracy=0.6676, over 6002.03 frames. ], batch size: 31, lr: 7.05e-03 +2024-08-06 17:06:29,845 INFO [trainer.py:765] (3/8) Epoch 12, batch 2300, train_loss[loss=3.463, NarTop10Accuracy=0.625, over 5697.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.669, over 6018.97 frames. ], batch size: 9, lr: 7.04e-03 +2024-08-06 17:06:54,199 INFO [trainer.py:765] (3/8) Epoch 12, batch 2400, train_loss[loss=3.189, NarTop10Accuracy=0.6991, over 5244.00 frames. ], tot_loss[loss=3.274, NarTop10Accuracy=0.6705, over 5776.49 frames. ], batch size: 7, lr: 7.03e-03 +2024-08-06 17:07:17,645 INFO [trainer.py:765] (3/8) Epoch 12, batch 2500, train_loss[loss=3.243, NarTop10Accuracy=0.6695, over 5091.00 frames. ], tot_loss[loss=3.26, NarTop10Accuracy=0.6734, over 5475.16 frames. ], batch size: 7, lr: 7.02e-03 +2024-08-06 17:07:37,619 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 17:08:40,078 INFO [trainer.py:765] (3/8) Epoch 13, batch 100, train_loss[loss=3.175, NarTop10Accuracy=0.6911, over 7416.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6677, over 2368.03 frames. ], batch size: 31, lr: 6.73e-03 +2024-08-06 17:09:14,119 INFO [trainer.py:765] (3/8) Epoch 13, batch 200, train_loss[loss=3.009, NarTop10Accuracy=0.7228, over 6798.00 frames. ], tot_loss[loss=3.284, NarTop10Accuracy=0.6702, over 3850.44 frames. ], batch size: 17, lr: 6.72e-03 +2024-08-06 17:09:46,276 INFO [trainer.py:765] (3/8) Epoch 13, batch 300, train_loss[loss=3.53, NarTop10Accuracy=0.6159, over 7383.00 frames. ], tot_loss[loss=3.261, NarTop10Accuracy=0.6747, over 4667.72 frames. ], batch size: 23, lr: 6.71e-03 +2024-08-06 17:10:19,163 INFO [trainer.py:765] (3/8) Epoch 13, batch 400, train_loss[loss=2.987, NarTop10Accuracy=0.7314, over 5031.00 frames. ], tot_loss[loss=3.245, NarTop10Accuracy=0.6772, over 5128.25 frames. ], batch size: 7, lr: 6.70e-03 +2024-08-06 17:10:49,335 INFO [trainer.py:765] (3/8) Epoch 13, batch 500, train_loss[loss=3.146, NarTop10Accuracy=0.6928, over 6054.00 frames. ], tot_loss[loss=3.243, NarTop10Accuracy=0.6779, over 5391.57 frames. ], batch size: 11, lr: 6.69e-03 +2024-08-06 17:11:26,244 INFO [trainer.py:765] (3/8) Epoch 13, batch 600, train_loss[loss=3.091, NarTop10Accuracy=0.7093, over 5799.00 frames. ], tot_loss[loss=3.233, NarTop10Accuracy=0.6797, over 5647.50 frames. ], batch size: 9, lr: 6.68e-03 +2024-08-06 17:11:57,381 INFO [trainer.py:765] (3/8) Epoch 13, batch 700, train_loss[loss=3.078, NarTop10Accuracy=0.7094, over 5073.00 frames. ], tot_loss[loss=3.239, NarTop10Accuracy=0.6784, over 5725.00 frames. ], batch size: 6, lr: 6.67e-03 +2024-08-06 17:12:33,441 INFO [trainer.py:765] (3/8) Epoch 13, batch 800, train_loss[loss=2.913, NarTop10Accuracy=0.7432, over 5124.00 frames. ], tot_loss[loss=3.245, NarTop10Accuracy=0.6771, over 5792.55 frames. ], batch size: 6, lr: 6.66e-03 +2024-08-06 17:13:10,032 INFO [trainer.py:765] (3/8) Epoch 13, batch 900, train_loss[loss=3.291, NarTop10Accuracy=0.6669, over 6174.00 frames. ], tot_loss[loss=3.241, NarTop10Accuracy=0.6779, over 5802.45 frames. ], batch size: 13, lr: 6.65e-03 +2024-08-06 17:13:41,442 INFO [trainer.py:765] (3/8) Epoch 13, batch 1000, train_loss[loss=3.739, NarTop10Accuracy=0.5865, over 6273.00 frames. ], tot_loss[loss=3.245, NarTop10Accuracy=0.6771, over 5900.22 frames. ], batch size: 13, lr: 6.64e-03 +2024-08-06 17:14:15,536 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 17:14:23,644 INFO [trainer.py:811] (3/8) Epoch 13, validation: loss=3.099, NarTop10Accuracy=0.7062, over 1905321.00 frames. +2024-08-06 17:14:23,645 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 17:14:24,471 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 1.948e+02 2.091e+02 2.295e+02 3.353e+02, threshold=4.181e+02, percent-clipped=0.0 +2024-08-06 17:14:26,697 INFO [trainer.py:765] (3/8) Epoch 13, batch 1100, train_loss[loss=3.439, NarTop10Accuracy=0.6339, over 6831.00 frames. ], tot_loss[loss=3.252, NarTop10Accuracy=0.6757, over 5936.46 frames. ], batch size: 17, lr: 6.63e-03 +2024-08-06 17:15:03,476 INFO [trainer.py:765] (3/8) Epoch 13, batch 1200, train_loss[loss=3.398, NarTop10Accuracy=0.6408, over 7143.00 frames. ], tot_loss[loss=3.256, NarTop10Accuracy=0.6747, over 5920.43 frames. ], batch size: 31, lr: 6.62e-03 +2024-08-06 17:15:35,514 INFO [trainer.py:765] (3/8) Epoch 13, batch 1300, train_loss[loss=2.953, NarTop10Accuracy=0.7445, over 5139.00 frames. ], tot_loss[loss=3.263, NarTop10Accuracy=0.6734, over 5991.28 frames. ], batch size: 6, lr: 6.61e-03 +2024-08-06 17:16:11,782 INFO [trainer.py:765] (3/8) Epoch 13, batch 1400, train_loss[loss=3.07, NarTop10Accuracy=0.7146, over 6153.00 frames. ], tot_loss[loss=3.263, NarTop10Accuracy=0.6736, over 6001.29 frames. ], batch size: 11, lr: 6.60e-03 +2024-08-06 17:16:39,788 INFO [trainer.py:765] (3/8) Epoch 13, batch 1500, train_loss[loss=3.551, NarTop10Accuracy=0.6066, over 6213.00 frames. ], tot_loss[loss=3.262, NarTop10Accuracy=0.6736, over 5937.55 frames. ], batch size: 50, lr: 6.59e-03 +2024-08-06 17:17:07,603 INFO [trainer.py:765] (3/8) Epoch 13, batch 1600, train_loss[loss=3.076, NarTop10Accuracy=0.7038, over 7125.00 frames. ], tot_loss[loss=3.263, NarTop10Accuracy=0.6727, over 5918.73 frames. ], batch size: 22, lr: 6.58e-03 +2024-08-06 17:17:34,259 INFO [trainer.py:765] (3/8) Epoch 13, batch 1700, train_loss[loss=3.17, NarTop10Accuracy=0.6887, over 6198.00 frames. ], tot_loss[loss=3.26, NarTop10Accuracy=0.6735, over 5905.51 frames. ], batch size: 13, lr: 6.57e-03 +2024-08-06 17:18:00,762 INFO [trainer.py:765] (3/8) Epoch 13, batch 1800, train_loss[loss=3.042, NarTop10Accuracy=0.7177, over 7263.00 frames. ], tot_loss[loss=3.252, NarTop10Accuracy=0.6756, over 5970.27 frames. ], batch size: 22, lr: 6.56e-03 +2024-08-06 17:18:27,244 INFO [trainer.py:765] (3/8) Epoch 13, batch 1900, train_loss[loss=3.46, NarTop10Accuracy=0.633, over 5916.00 frames. ], tot_loss[loss=3.25, NarTop10Accuracy=0.6762, over 5997.16 frames. ], batch size: 50, lr: 6.55e-03 +2024-08-06 17:18:52,778 INFO [trainer.py:765] (3/8) Epoch 13, batch 2000, train_loss[loss=3.508, NarTop10Accuracy=0.63, over 5937.00 frames. ], tot_loss[loss=3.235, NarTop10Accuracy=0.6794, over 5989.38 frames. ], batch size: 50, lr: 6.54e-03 +2024-08-06 17:19:18,147 INFO [trainer.py:765] (3/8) Epoch 13, batch 2100, train_loss[loss=2.888, NarTop10Accuracy=0.7506, over 4878.00 frames. ], tot_loss[loss=3.23, NarTop10Accuracy=0.6805, over 5972.49 frames. ], batch size: 5, lr: 6.53e-03 +2024-08-06 17:19:43,412 INFO [trainer.py:765] (3/8) Epoch 13, batch 2200, train_loss[loss=3.37, NarTop10Accuracy=0.6497, over 7275.00 frames. ], tot_loss[loss=3.241, NarTop10Accuracy=0.6783, over 6006.92 frames. ], batch size: 31, lr: 6.52e-03 +2024-08-06 17:20:08,543 INFO [trainer.py:765] (3/8) Epoch 13, batch 2300, train_loss[loss=3.6, NarTop10Accuracy=0.6022, over 5682.00 frames. ], tot_loss[loss=3.266, NarTop10Accuracy=0.6728, over 6038.96 frames. ], batch size: 9, lr: 6.51e-03 +2024-08-06 17:20:32,940 INFO [trainer.py:765] (3/8) Epoch 13, batch 2400, train_loss[loss=3.803, NarTop10Accuracy=0.5728, over 5145.00 frames. ], tot_loss[loss=3.233, NarTop10Accuracy=0.6793, over 5773.62 frames. ], batch size: 7, lr: 6.50e-03 +2024-08-06 17:20:56,408 INFO [trainer.py:765] (3/8) Epoch 13, batch 2500, train_loss[loss=3.584, NarTop10Accuracy=0.6113, over 5388.00 frames. ], tot_loss[loss=3.222, NarTop10Accuracy=0.6812, over 5456.46 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 17:21:16,306 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 17:22:19,315 INFO [trainer.py:765] (3/8) Epoch 14, batch 100, train_loss[loss=3.001, NarTop10Accuracy=0.7341, over 7185.00 frames. ], tot_loss[loss=3.225, NarTop10Accuracy=0.6808, over 2382.05 frames. ], batch size: 32, lr: 6.24e-03 +2024-08-06 17:22:50,378 INFO [trainer.py:765] (3/8) Epoch 14, batch 200, train_loss[loss=3.289, NarTop10Accuracy=0.6705, over 6840.00 frames. ], tot_loss[loss=3.234, NarTop10Accuracy=0.6795, over 3864.81 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 17:23:23,879 INFO [trainer.py:765] (3/8) Epoch 14, batch 300, train_loss[loss=3.141, NarTop10Accuracy=0.6969, over 6999.00 frames. ], tot_loss[loss=3.211, NarTop10Accuracy=0.6843, over 4673.83 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 17:23:57,484 INFO [trainer.py:765] (3/8) Epoch 14, batch 400, train_loss[loss=3.038, NarTop10Accuracy=0.719, over 5073.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.6811, over 5132.64 frames. ], batch size: 7, lr: 6.22e-03 +2024-08-06 17:24:32,113 INFO [trainer.py:765] (3/8) Epoch 14, batch 500, train_loss[loss=3.3, NarTop10Accuracy=0.6641, over 6042.00 frames. ], tot_loss[loss=3.235, NarTop10Accuracy=0.6785, over 5406.77 frames. ], batch size: 11, lr: 6.21e-03 +2024-08-06 17:24:36,213 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 17:24:44,275 INFO [trainer.py:811] (3/8) Epoch 14, validation: loss=3.004, NarTop10Accuracy=0.726, over 1905321.00 frames. +2024-08-06 17:24:44,276 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 17:24:44,823 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 1.969e+02 2.114e+02 2.287e+02 4.406e+02, threshold=4.227e+02, percent-clipped=0.1 +2024-08-06 17:25:12,914 INFO [trainer.py:765] (3/8) Epoch 14, batch 600, train_loss[loss=2.893, NarTop10Accuracy=0.7513, over 5769.00 frames. ], tot_loss[loss=3.237, NarTop10Accuracy=0.6781, over 5668.24 frames. ], batch size: 9, lr: 6.20e-03 +2024-08-06 17:25:48,548 INFO [trainer.py:765] (3/8) Epoch 14, batch 700, train_loss[loss=3.274, NarTop10Accuracy=0.6677, over 5202.00 frames. ], tot_loss[loss=3.222, NarTop10Accuracy=0.6817, over 5732.94 frames. ], batch size: 6, lr: 6.19e-03 +2024-08-06 17:26:25,279 INFO [trainer.py:765] (3/8) Epoch 14, batch 800, train_loss[loss=3.034, NarTop10Accuracy=0.7229, over 5157.00 frames. ], tot_loss[loss=3.213, NarTop10Accuracy=0.6837, over 5784.65 frames. ], batch size: 6, lr: 6.18e-03 +2024-08-06 17:26:57,659 INFO [trainer.py:765] (3/8) Epoch 14, batch 900, train_loss[loss=3.277, NarTop10Accuracy=0.6666, over 6147.00 frames. ], tot_loss[loss=3.204, NarTop10Accuracy=0.6848, over 5811.54 frames. ], batch size: 13, lr: 6.17e-03 +2024-08-06 17:27:31,717 INFO [trainer.py:765] (3/8) Epoch 14, batch 1000, train_loss[loss=3.402, NarTop10Accuracy=0.6549, over 6348.00 frames. ], tot_loss[loss=3.221, NarTop10Accuracy=0.6811, over 5913.28 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 17:28:11,597 INFO [trainer.py:765] (3/8) Epoch 14, batch 1100, train_loss[loss=3.061, NarTop10Accuracy=0.7146, over 6801.00 frames. ], tot_loss[loss=3.23, NarTop10Accuracy=0.6794, over 5945.49 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 17:28:40,734 INFO [trainer.py:765] (3/8) Epoch 14, batch 1200, train_loss[loss=3.457, NarTop10Accuracy=0.6301, over 7212.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.6807, over 5943.32 frames. ], batch size: 31, lr: 6.15e-03 +2024-08-06 17:29:16,214 INFO [trainer.py:765] (3/8) Epoch 14, batch 1300, train_loss[loss=3.467, NarTop10Accuracy=0.6291, over 5070.00 frames. ], tot_loss[loss=3.226, NarTop10Accuracy=0.6804, over 6002.81 frames. ], batch size: 6, lr: 6.14e-03 +2024-08-06 17:29:54,602 INFO [trainer.py:765] (3/8) Epoch 14, batch 1400, train_loss[loss=3.567, NarTop10Accuracy=0.6104, over 6117.00 frames. ], tot_loss[loss=3.236, NarTop10Accuracy=0.6785, over 6019.18 frames. ], batch size: 11, lr: 6.13e-03 +2024-08-06 17:30:25,315 INFO [trainer.py:765] (3/8) Epoch 14, batch 1500, train_loss[loss=3.719, NarTop10Accuracy=0.58, over 7005.00 frames. ], tot_loss[loss=3.24, NarTop10Accuracy=0.6773, over 5948.23 frames. ], batch size: 50, lr: 6.12e-03 +2024-08-06 17:30:53,044 INFO [trainer.py:765] (3/8) Epoch 14, batch 1600, train_loss[loss=2.946, NarTop10Accuracy=0.7398, over 7038.00 frames. ], tot_loss[loss=3.23, NarTop10Accuracy=0.68, over 5929.67 frames. ], batch size: 22, lr: 6.11e-03 +2024-08-06 17:31:19,729 INFO [trainer.py:765] (3/8) Epoch 14, batch 1700, train_loss[loss=3.247, NarTop10Accuracy=0.6836, over 6303.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.6845, over 5911.94 frames. ], batch size: 13, lr: 6.10e-03 +2024-08-06 17:31:46,290 INFO [trainer.py:765] (3/8) Epoch 14, batch 1800, train_loss[loss=2.979, NarTop10Accuracy=0.7251, over 6957.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6885, over 5971.40 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 17:32:12,727 INFO [trainer.py:765] (3/8) Epoch 14, batch 1900, train_loss[loss=3.68, NarTop10Accuracy=0.5826, over 6240.00 frames. ], tot_loss[loss=3.207, NarTop10Accuracy=0.6849, over 6015.21 frames. ], batch size: 50, lr: 6.09e-03 +2024-08-06 17:32:38,282 INFO [trainer.py:765] (3/8) Epoch 14, batch 2000, train_loss[loss=3.308, NarTop10Accuracy=0.6772, over 5973.00 frames. ], tot_loss[loss=3.214, NarTop10Accuracy=0.6829, over 5976.40 frames. ], batch size: 50, lr: 6.08e-03 +2024-08-06 17:33:03,646 INFO [trainer.py:765] (3/8) Epoch 14, batch 2100, train_loss[loss=2.992, NarTop10Accuracy=0.7204, over 4794.00 frames. ], tot_loss[loss=3.228, NarTop10Accuracy=0.6802, over 5963.96 frames. ], batch size: 5, lr: 6.07e-03 +2024-08-06 17:33:28,999 INFO [trainer.py:765] (3/8) Epoch 14, batch 2200, train_loss[loss=3.219, NarTop10Accuracy=0.6862, over 7470.00 frames. ], tot_loss[loss=3.219, NarTop10Accuracy=0.6825, over 6006.62 frames. ], batch size: 32, lr: 6.06e-03 +2024-08-06 17:33:54,087 INFO [trainer.py:765] (3/8) Epoch 14, batch 2300, train_loss[loss=2.857, NarTop10Accuracy=0.7672, over 5652.00 frames. ], tot_loss[loss=3.235, NarTop10Accuracy=0.6787, over 6004.24 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 17:34:18,534 INFO [trainer.py:765] (3/8) Epoch 14, batch 2400, train_loss[loss=2.794, NarTop10Accuracy=0.7644, over 5106.00 frames. ], tot_loss[loss=3.232, NarTop10Accuracy=0.6794, over 5789.51 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:42,116 INFO [trainer.py:765] (3/8) Epoch 14, batch 2500, train_loss[loss=2.916, NarTop10Accuracy=0.7467, over 5076.00 frames. ], tot_loss[loss=3.199, NarTop10Accuracy=0.6858, over 5480.32 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:45,395 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 17:34:53,209 INFO [trainer.py:811] (3/8) Epoch 14, validation: loss=3.062, NarTop10Accuracy=0.7136, over 1905321.00 frames. +2024-08-06 17:34:53,209 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 17:34:53,679 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 1.975e+02 2.132e+02 2.304e+02 3.875e+02, threshold=4.265e+02, percent-clipped=0.0 +2024-08-06 17:35:09,747 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 17:36:11,738 INFO [trainer.py:765] (3/8) Epoch 15, batch 100, train_loss[loss=3.068, NarTop10Accuracy=0.7138, over 7395.00 frames. ], tot_loss[loss=3.219, NarTop10Accuracy=0.6815, over 2361.87 frames. ], batch size: 31, lr: 5.82e-03 +2024-08-06 17:36:44,334 INFO [trainer.py:765] (3/8) Epoch 15, batch 200, train_loss[loss=3.496, NarTop10Accuracy=0.6313, over 6852.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6878, over 3864.00 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 17:37:17,714 INFO [trainer.py:765] (3/8) Epoch 15, batch 300, train_loss[loss=3.277, NarTop10Accuracy=0.6728, over 7104.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.6877, over 4651.36 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 17:37:48,903 INFO [trainer.py:765] (3/8) Epoch 15, batch 400, train_loss[loss=2.894, NarTop10Accuracy=0.7573, over 5100.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6883, over 5098.52 frames. ], batch size: 7, lr: 5.80e-03 +2024-08-06 17:38:22,353 INFO [trainer.py:765] (3/8) Epoch 15, batch 500, train_loss[loss=2.835, NarTop10Accuracy=0.7538, over 6102.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6879, over 5376.32 frames. ], batch size: 11, lr: 5.79e-03 +2024-08-06 17:38:53,093 INFO [trainer.py:765] (3/8) Epoch 15, batch 600, train_loss[loss=2.919, NarTop10Accuracy=0.744, over 5577.00 frames. ], tot_loss[loss=3.198, NarTop10Accuracy=0.6859, over 5654.99 frames. ], batch size: 9, lr: 5.78e-03 +2024-08-06 17:39:27,922 INFO [trainer.py:765] (3/8) Epoch 15, batch 700, train_loss[loss=2.933, NarTop10Accuracy=0.742, over 5109.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.6844, over 5730.57 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 17:40:05,564 INFO [trainer.py:765] (3/8) Epoch 15, batch 800, train_loss[loss=3.263, NarTop10Accuracy=0.6725, over 5109.00 frames. ], tot_loss[loss=3.228, NarTop10Accuracy=0.6801, over 5767.87 frames. ], batch size: 6, lr: 5.76e-03 +2024-08-06 17:40:35,790 INFO [trainer.py:765] (3/8) Epoch 15, batch 900, train_loss[loss=3.415, NarTop10Accuracy=0.6463, over 6285.00 frames. ], tot_loss[loss=3.209, NarTop10Accuracy=0.6839, over 5767.00 frames. ], batch size: 13, lr: 5.76e-03 +2024-08-06 17:41:11,250 INFO [trainer.py:765] (3/8) Epoch 15, batch 1000, train_loss[loss=3.134, NarTop10Accuracy=0.6954, over 6237.00 frames. ], tot_loss[loss=3.199, NarTop10Accuracy=0.6863, over 5871.37 frames. ], batch size: 13, lr: 5.75e-03 +2024-08-06 17:41:46,451 INFO [trainer.py:765] (3/8) Epoch 15, batch 1100, train_loss[loss=3.138, NarTop10Accuracy=0.6955, over 6861.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.686, over 5900.28 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 17:42:19,455 INFO [trainer.py:765] (3/8) Epoch 15, batch 1200, train_loss[loss=3.435, NarTop10Accuracy=0.6403, over 7254.00 frames. ], tot_loss[loss=3.227, NarTop10Accuracy=0.6803, over 5917.97 frames. ], batch size: 31, lr: 5.73e-03 +2024-08-06 17:42:54,427 INFO [trainer.py:765] (3/8) Epoch 15, batch 1300, train_loss[loss=2.94, NarTop10Accuracy=0.731, over 5097.00 frames. ], tot_loss[loss=3.205, NarTop10Accuracy=0.6847, over 5989.46 frames. ], batch size: 6, lr: 5.73e-03 +2024-08-06 17:43:26,606 INFO [trainer.py:765] (3/8) Epoch 15, batch 1400, train_loss[loss=3.354, NarTop10Accuracy=0.6515, over 6162.00 frames. ], tot_loss[loss=3.216, NarTop10Accuracy=0.6824, over 6006.26 frames. ], batch size: 11, lr: 5.72e-03 +2024-08-06 17:43:56,557 INFO [trainer.py:765] (3/8) Epoch 15, batch 1500, train_loss[loss=3.131, NarTop10Accuracy=0.7047, over 5826.00 frames. ], tot_loss[loss=3.218, NarTop10Accuracy=0.6821, over 5939.63 frames. ], batch size: 50, lr: 5.71e-03 +2024-08-06 17:44:24,240 INFO [trainer.py:765] (3/8) Epoch 15, batch 1600, train_loss[loss=3.644, NarTop10Accuracy=0.6004, over 6930.00 frames. ], tot_loss[loss=3.202, NarTop10Accuracy=0.6856, over 5909.55 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 17:44:50,855 INFO [trainer.py:765] (3/8) Epoch 15, batch 1700, train_loss[loss=3.076, NarTop10Accuracy=0.7158, over 6579.00 frames. ], tot_loss[loss=3.198, NarTop10Accuracy=0.6866, over 5899.61 frames. ], batch size: 14, lr: 5.70e-03 +2024-08-06 17:45:17,293 INFO [trainer.py:765] (3/8) Epoch 15, batch 1800, train_loss[loss=3.133, NarTop10Accuracy=0.6973, over 6984.00 frames. ], tot_loss[loss=3.195, NarTop10Accuracy=0.6871, over 5966.99 frames. ], batch size: 22, lr: 5.69e-03 +2024-08-06 17:45:43,678 INFO [trainer.py:765] (3/8) Epoch 15, batch 1900, train_loss[loss=3.113, NarTop10Accuracy=0.7069, over 6330.00 frames. ], tot_loss[loss=3.214, NarTop10Accuracy=0.6833, over 6025.36 frames. ], batch size: 50, lr: 5.68e-03 +2024-08-06 17:45:53,539 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 17:46:01,743 INFO [trainer.py:811] (3/8) Epoch 15, validation: loss=3.006, NarTop10Accuracy=0.725, over 1905321.00 frames. +2024-08-06 17:46:01,743 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 17:46:02,217 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.004e+02 2.149e+02 2.324e+02 3.721e+02, threshold=4.298e+02, percent-clipped=0.0 +2024-08-06 17:46:17,372 INFO [trainer.py:765] (3/8) Epoch 15, batch 2000, train_loss[loss=3.211, NarTop10Accuracy=0.6827, over 6030.00 frames. ], tot_loss[loss=3.207, NarTop10Accuracy=0.6842, over 6010.76 frames. ], batch size: 50, lr: 5.67e-03 +2024-08-06 17:46:42,773 INFO [trainer.py:765] (3/8) Epoch 15, batch 2100, train_loss[loss=3.114, NarTop10Accuracy=0.7087, over 5001.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.6855, over 5989.88 frames. ], batch size: 5, lr: 5.67e-03 +2024-08-06 17:47:08,033 INFO [trainer.py:765] (3/8) Epoch 15, batch 2200, train_loss[loss=3.082, NarTop10Accuracy=0.7139, over 7338.00 frames. ], tot_loss[loss=3.213, NarTop10Accuracy=0.6832, over 6012.88 frames. ], batch size: 31, lr: 5.66e-03 +2024-08-06 17:47:33,291 INFO [trainer.py:765] (3/8) Epoch 15, batch 2300, train_loss[loss=3.472, NarTop10Accuracy=0.6308, over 5691.00 frames. ], tot_loss[loss=3.215, NarTop10Accuracy=0.6831, over 6021.20 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 17:47:57,639 INFO [trainer.py:765] (3/8) Epoch 15, batch 2400, train_loss[loss=3.352, NarTop10Accuracy=0.6523, over 5067.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.6877, over 5771.95 frames. ], batch size: 7, lr: 5.65e-03 +2024-08-06 17:48:21,161 INFO [trainer.py:765] (3/8) Epoch 15, batch 2500, train_loss[loss=2.858, NarTop10Accuracy=0.7485, over 5106.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6934, over 5476.35 frames. ], batch size: 7, lr: 5.64e-03 +2024-08-06 17:48:40,518 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 17:49:41,220 INFO [trainer.py:765] (3/8) Epoch 16, batch 100, train_loss[loss=3.494, NarTop10Accuracy=0.6262, over 7200.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6943, over 2347.86 frames. ], batch size: 31, lr: 5.45e-03 +2024-08-06 17:50:12,156 INFO [trainer.py:765] (3/8) Epoch 16, batch 200, train_loss[loss=2.88, NarTop10Accuracy=0.7504, over 6864.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.687, over 3841.44 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 17:50:45,158 INFO [trainer.py:765] (3/8) Epoch 16, batch 300, train_loss[loss=3.166, NarTop10Accuracy=0.6962, over 6942.00 frames. ], tot_loss[loss=3.191, NarTop10Accuracy=0.6879, over 4645.42 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 17:51:15,975 INFO [trainer.py:765] (3/8) Epoch 16, batch 400, train_loss[loss=3.535, NarTop10Accuracy=0.6069, over 5085.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6875, over 5088.55 frames. ], batch size: 7, lr: 5.43e-03 +2024-08-06 17:51:50,322 INFO [trainer.py:765] (3/8) Epoch 16, batch 500, train_loss[loss=2.945, NarTop10Accuracy=0.7435, over 6153.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6884, over 5364.16 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 17:52:24,250 INFO [trainer.py:765] (3/8) Epoch 16, batch 600, train_loss[loss=3.061, NarTop10Accuracy=0.7273, over 6192.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.6868, over 5621.58 frames. ], batch size: 10, lr: 5.41e-03 +2024-08-06 17:52:55,385 INFO [trainer.py:765] (3/8) Epoch 16, batch 700, train_loss[loss=3.019, NarTop10Accuracy=0.7217, over 4911.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6876, over 5690.42 frames. ], batch size: 6, lr: 5.41e-03 +2024-08-06 17:53:33,814 INFO [trainer.py:765] (3/8) Epoch 16, batch 800, train_loss[loss=3.196, NarTop10Accuracy=0.6894, over 4914.00 frames. ], tot_loss[loss=3.181, NarTop10Accuracy=0.6899, over 5760.46 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 17:54:03,922 INFO [trainer.py:765] (3/8) Epoch 16, batch 900, train_loss[loss=3.468, NarTop10Accuracy=0.6265, over 6186.00 frames. ], tot_loss[loss=3.167, NarTop10Accuracy=0.6925, over 5808.52 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 17:54:37,606 INFO [trainer.py:765] (3/8) Epoch 16, batch 1000, train_loss[loss=2.995, NarTop10Accuracy=0.7268, over 6579.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6945, over 5917.13 frames. ], batch size: 14, lr: 5.39e-03 +2024-08-06 17:55:17,195 INFO [trainer.py:765] (3/8) Epoch 16, batch 1100, train_loss[loss=3.238, NarTop10Accuracy=0.6813, over 6963.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.6874, over 5932.87 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 17:55:46,208 INFO [trainer.py:765] (3/8) Epoch 16, batch 1200, train_loss[loss=3.526, NarTop10Accuracy=0.6218, over 7041.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.6864, over 5914.22 frames. ], batch size: 31, lr: 5.37e-03 +2024-08-06 17:56:22,774 INFO [trainer.py:765] (3/8) Epoch 16, batch 1300, train_loss[loss=3.328, NarTop10Accuracy=0.6628, over 5199.00 frames. ], tot_loss[loss=3.191, NarTop10Accuracy=0.6872, over 5980.74 frames. ], batch size: 6, lr: 5.37e-03 +2024-08-06 17:56:44,647 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 17:56:53,428 INFO [trainer.py:811] (3/8) Epoch 16, validation: loss=3.112, NarTop10Accuracy=0.703, over 1905321.00 frames. +2024-08-06 17:56:53,429 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 17:56:54,007 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 1.974e+02 2.136e+02 2.310e+02 5.351e+02, threshold=4.271e+02, percent-clipped=0.2 +2024-08-06 17:57:06,171 INFO [trainer.py:765] (3/8) Epoch 16, batch 1400, train_loss[loss=3.157, NarTop10Accuracy=0.6927, over 6000.00 frames. ], tot_loss[loss=3.183, NarTop10Accuracy=0.6892, over 6009.62 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 17:57:34,033 INFO [trainer.py:765] (3/8) Epoch 16, batch 1500, train_loss[loss=3.409, NarTop10Accuracy=0.6477, over 5778.00 frames. ], tot_loss[loss=3.177, NarTop10Accuracy=0.6904, over 5962.80 frames. ], batch size: 50, lr: 5.35e-03 +2024-08-06 17:58:01,774 INFO [trainer.py:765] (3/8) Epoch 16, batch 1600, train_loss[loss=2.937, NarTop10Accuracy=0.7389, over 7212.00 frames. ], tot_loss[loss=3.177, NarTop10Accuracy=0.6906, over 5927.19 frames. ], batch size: 22, lr: 5.35e-03 +2024-08-06 17:58:28,475 INFO [trainer.py:765] (3/8) Epoch 16, batch 1700, train_loss[loss=2.915, NarTop10Accuracy=0.7477, over 6066.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.6877, over 5925.09 frames. ], batch size: 13, lr: 5.34e-03 +2024-08-06 17:58:54,975 INFO [trainer.py:765] (3/8) Epoch 16, batch 1800, train_loss[loss=3.071, NarTop10Accuracy=0.7099, over 7137.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.6913, over 5967.98 frames. ], batch size: 22, lr: 5.33e-03 +2024-08-06 17:59:21,359 INFO [trainer.py:765] (3/8) Epoch 16, batch 1900, train_loss[loss=3.341, NarTop10Accuracy=0.649, over 6531.00 frames. ], tot_loss[loss=3.202, NarTop10Accuracy=0.6851, over 6025.62 frames. ], batch size: 51, lr: 5.33e-03 +2024-08-06 17:59:46,856 INFO [trainer.py:765] (3/8) Epoch 16, batch 2000, train_loss[loss=3.043, NarTop10Accuracy=0.7199, over 5652.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6909, over 5995.48 frames. ], batch size: 50, lr: 5.32e-03 +2024-08-06 18:00:12,116 INFO [trainer.py:765] (3/8) Epoch 16, batch 2100, train_loss[loss=3.368, NarTop10Accuracy=0.6371, over 4746.00 frames. ], tot_loss[loss=3.201, NarTop10Accuracy=0.6851, over 5985.23 frames. ], batch size: 5, lr: 5.32e-03 +2024-08-06 18:00:37,333 INFO [trainer.py:765] (3/8) Epoch 16, batch 2200, train_loss[loss=3.142, NarTop10Accuracy=0.6997, over 7365.00 frames. ], tot_loss[loss=3.213, NarTop10Accuracy=0.6824, over 6021.73 frames. ], batch size: 31, lr: 5.31e-03 +2024-08-06 18:01:02,502 INFO [trainer.py:765] (3/8) Epoch 16, batch 2300, train_loss[loss=3.138, NarTop10Accuracy=0.6978, over 5781.00 frames. ], tot_loss[loss=3.217, NarTop10Accuracy=0.6821, over 6024.07 frames. ], batch size: 9, lr: 5.30e-03 +2024-08-06 18:01:26,883 INFO [trainer.py:765] (3/8) Epoch 16, batch 2400, train_loss[loss=3.036, NarTop10Accuracy=0.7168, over 5166.00 frames. ], tot_loss[loss=3.198, NarTop10Accuracy=0.6861, over 5777.05 frames. ], batch size: 7, lr: 5.30e-03 +2024-08-06 18:01:50,406 INFO [trainer.py:765] (3/8) Epoch 16, batch 2500, train_loss[loss=3.115, NarTop10Accuracy=0.7024, over 5184.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6926, over 5491.53 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 18:02:10,445 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 18:03:08,531 INFO [trainer.py:765] (3/8) Epoch 17, batch 100, train_loss[loss=3.179, NarTop10Accuracy=0.6945, over 7326.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6997, over 2368.25 frames. ], batch size: 31, lr: 5.12e-03 +2024-08-06 18:03:45,145 INFO [trainer.py:765] (3/8) Epoch 17, batch 200, train_loss[loss=3.37, NarTop10Accuracy=0.6444, over 6912.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.6961, over 3853.73 frames. ], batch size: 17, lr: 5.12e-03 +2024-08-06 18:04:19,590 INFO [trainer.py:765] (3/8) Epoch 17, batch 300, train_loss[loss=3.309, NarTop10Accuracy=0.6638, over 7152.00 frames. ], tot_loss[loss=3.167, NarTop10Accuracy=0.6927, over 4659.82 frames. ], batch size: 22, lr: 5.11e-03 +2024-08-06 18:04:48,401 INFO [trainer.py:765] (3/8) Epoch 17, batch 400, train_loss[loss=3.299, NarTop10Accuracy=0.6537, over 5094.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6926, over 5102.74 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 18:05:24,680 INFO [trainer.py:765] (3/8) Epoch 17, batch 500, train_loss[loss=2.878, NarTop10Accuracy=0.753, over 6177.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6958, over 5365.37 frames. ], batch size: 11, lr: 5.10e-03 +2024-08-06 18:05:58,739 INFO [trainer.py:765] (3/8) Epoch 17, batch 600, train_loss[loss=3.108, NarTop10Accuracy=0.7014, over 5763.00 frames. ], tot_loss[loss=3.167, NarTop10Accuracy=0.6923, over 5645.82 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 18:06:32,475 INFO [trainer.py:765] (3/8) Epoch 17, batch 700, train_loss[loss=3.014, NarTop10Accuracy=0.7224, over 4905.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6936, over 5719.80 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 18:07:02,724 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 18:07:10,763 INFO [trainer.py:811] (3/8) Epoch 17, validation: loss=3.018, NarTop10Accuracy=0.7223, over 1905321.00 frames. +2024-08-06 18:07:10,763 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 18:07:11,312 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.005e+02 2.161e+02 2.341e+02 3.806e+02, threshold=4.323e+02, percent-clipped=0.0 +2024-08-06 18:07:14,353 INFO [trainer.py:765] (3/8) Epoch 17, batch 800, train_loss[loss=2.934, NarTop10Accuracy=0.7291, over 4983.00 frames. ], tot_loss[loss=3.18, NarTop10Accuracy=0.6897, over 5776.88 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 18:07:49,721 INFO [trainer.py:765] (3/8) Epoch 17, batch 900, train_loss[loss=3.4, NarTop10Accuracy=0.6355, over 6642.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6953, over 5801.94 frames. ], batch size: 14, lr: 5.07e-03 +2024-08-06 18:08:21,598 INFO [trainer.py:765] (3/8) Epoch 17, batch 1000, train_loss[loss=3.214, NarTop10Accuracy=0.6941, over 6543.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.693, over 5903.23 frames. ], batch size: 14, lr: 5.07e-03 +2024-08-06 18:09:03,106 INFO [trainer.py:765] (3/8) Epoch 17, batch 1100, train_loss[loss=2.938, NarTop10Accuracy=0.7429, over 6780.00 frames. ], tot_loss[loss=3.176, NarTop10Accuracy=0.6906, over 5939.96 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 18:09:36,746 INFO [trainer.py:765] (3/8) Epoch 17, batch 1200, train_loss[loss=3.159, NarTop10Accuracy=0.6989, over 6963.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.6916, over 5936.70 frames. ], batch size: 31, lr: 5.06e-03 +2024-08-06 18:10:10,688 INFO [trainer.py:765] (3/8) Epoch 17, batch 1300, train_loss[loss=3.341, NarTop10Accuracy=0.6597, over 4959.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.6911, over 6006.75 frames. ], batch size: 6, lr: 5.05e-03 +2024-08-06 18:10:48,026 INFO [trainer.py:765] (3/8) Epoch 17, batch 1400, train_loss[loss=3.332, NarTop10Accuracy=0.6669, over 5919.00 frames. ], tot_loss[loss=3.181, NarTop10Accuracy=0.6896, over 6007.29 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 18:11:19,105 INFO [trainer.py:765] (3/8) Epoch 17, batch 1500, train_loss[loss=3.526, NarTop10Accuracy=0.6224, over 5577.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6928, over 5938.72 frames. ], batch size: 50, lr: 5.04e-03 +2024-08-06 18:11:46,855 INFO [trainer.py:765] (3/8) Epoch 17, batch 1600, train_loss[loss=3.16, NarTop10Accuracy=0.6962, over 7107.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6958, over 5933.70 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 18:12:13,509 INFO [trainer.py:765] (3/8) Epoch 17, batch 1700, train_loss[loss=3.51, NarTop10Accuracy=0.6271, over 6282.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6913, over 5911.31 frames. ], batch size: 13, lr: 5.03e-03 +2024-08-06 18:12:40,002 INFO [trainer.py:765] (3/8) Epoch 17, batch 1800, train_loss[loss=2.848, NarTop10Accuracy=0.7576, over 7050.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6879, over 5972.83 frames. ], batch size: 22, lr: 5.02e-03 +2024-08-06 18:13:06,380 INFO [trainer.py:765] (3/8) Epoch 17, batch 1900, train_loss[loss=3.09, NarTop10Accuracy=0.7039, over 6243.00 frames. ], tot_loss[loss=3.197, NarTop10Accuracy=0.6864, over 6030.06 frames. ], batch size: 50, lr: 5.01e-03 +2024-08-06 18:13:31,923 INFO [trainer.py:765] (3/8) Epoch 17, batch 2000, train_loss[loss=3.567, NarTop10Accuracy=0.6098, over 6174.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6907, over 5999.84 frames. ], batch size: 51, lr: 5.01e-03 +2024-08-06 18:13:57,228 INFO [trainer.py:765] (3/8) Epoch 17, batch 2100, train_loss[loss=2.896, NarTop10Accuracy=0.7481, over 4797.00 frames. ], tot_loss[loss=3.178, NarTop10Accuracy=0.6897, over 5968.81 frames. ], batch size: 5, lr: 5.00e-03 +2024-08-06 18:14:22,434 INFO [trainer.py:765] (3/8) Epoch 17, batch 2200, train_loss[loss=3.108, NarTop10Accuracy=0.7146, over 7035.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.6857, over 6022.33 frames. ], batch size: 31, lr: 5.00e-03 +2024-08-06 18:14:47,592 INFO [trainer.py:765] (3/8) Epoch 17, batch 2300, train_loss[loss=2.985, NarTop10Accuracy=0.73, over 5862.00 frames. ], tot_loss[loss=3.195, NarTop10Accuracy=0.687, over 6027.85 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 18:15:12,061 INFO [trainer.py:765] (3/8) Epoch 17, batch 2400, train_loss[loss=2.959, NarTop10Accuracy=0.7368, over 5082.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6878, over 5780.90 frames. ], batch size: 7, lr: 4.99e-03 +2024-08-06 18:15:35,515 INFO [trainer.py:765] (3/8) Epoch 17, batch 2500, train_loss[loss=2.779, NarTop10Accuracy=0.7657, over 5124.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6913, over 5475.42 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 18:15:56,000 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 18:16:49,908 INFO [trainer.py:765] (3/8) Epoch 18, batch 100, train_loss[loss=3.111, NarTop10Accuracy=0.7048, over 7329.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.688, over 2367.70 frames. ], batch size: 31, lr: 4.83e-03 +2024-08-06 18:17:24,749 INFO [trainer.py:765] (3/8) Epoch 18, batch 200, train_loss[loss=2.828, NarTop10Accuracy=0.7648, over 6891.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6946, over 3864.70 frames. ], batch size: 17, lr: 4.83e-03 +2024-08-06 18:17:27,716 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 18:17:35,927 INFO [trainer.py:811] (3/8) Epoch 18, validation: loss=3.062, NarTop10Accuracy=0.7137, over 1905321.00 frames. +2024-08-06 18:17:35,927 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 18:17:36,529 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.024e+02 2.164e+02 2.334e+02 7.024e+02, threshold=4.329e+02, percent-clipped=0.1 +2024-08-06 18:18:06,912 INFO [trainer.py:765] (3/8) Epoch 18, batch 300, train_loss[loss=3.349, NarTop10Accuracy=0.6575, over 7206.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6942, over 4680.02 frames. ], batch size: 22, lr: 4.82e-03 +2024-08-06 18:18:38,183 INFO [trainer.py:765] (3/8) Epoch 18, batch 400, train_loss[loss=3.446, NarTop10Accuracy=0.6373, over 5151.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6958, over 5126.11 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 18:19:13,599 INFO [trainer.py:765] (3/8) Epoch 18, batch 500, train_loss[loss=3.129, NarTop10Accuracy=0.6975, over 6036.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.6952, over 5401.99 frames. ], batch size: 11, lr: 4.81e-03 +2024-08-06 18:19:48,151 INFO [trainer.py:765] (3/8) Epoch 18, batch 600, train_loss[loss=3.572, NarTop10Accuracy=0.6106, over 5823.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.6949, over 5673.84 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 18:20:23,870 INFO [trainer.py:765] (3/8) Epoch 18, batch 700, train_loss[loss=3.645, NarTop10Accuracy=0.5994, over 5103.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6924, over 5749.70 frames. ], batch size: 6, lr: 4.80e-03 +2024-08-06 18:21:01,026 INFO [trainer.py:765] (3/8) Epoch 18, batch 800, train_loss[loss=2.932, NarTop10Accuracy=0.7486, over 4977.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.6913, over 5801.66 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 18:21:32,409 INFO [trainer.py:765] (3/8) Epoch 18, batch 900, train_loss[loss=2.999, NarTop10Accuracy=0.7255, over 6243.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6967, over 5814.56 frames. ], batch size: 13, lr: 4.79e-03 +2024-08-06 18:22:11,192 INFO [trainer.py:765] (3/8) Epoch 18, batch 1000, train_loss[loss=2.866, NarTop10Accuracy=0.7543, over 6657.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6931, over 5900.22 frames. ], batch size: 14, lr: 4.78e-03 +2024-08-06 18:22:46,969 INFO [trainer.py:765] (3/8) Epoch 18, batch 1100, train_loss[loss=3.466, NarTop10Accuracy=0.6335, over 6861.00 frames. ], tot_loss[loss=3.156, NarTop10Accuracy=0.6943, over 5943.13 frames. ], batch size: 17, lr: 4.78e-03 +2024-08-06 18:23:18,605 INFO [trainer.py:765] (3/8) Epoch 18, batch 1200, train_loss[loss=3.59, NarTop10Accuracy=0.6085, over 7188.00 frames. ], tot_loss[loss=3.176, NarTop10Accuracy=0.69, over 5921.31 frames. ], batch size: 31, lr: 4.77e-03 +2024-08-06 18:24:00,100 INFO [trainer.py:765] (3/8) Epoch 18, batch 1300, train_loss[loss=2.929, NarTop10Accuracy=0.743, over 4215.00 frames. ], tot_loss[loss=3.156, NarTop10Accuracy=0.6944, over 5967.52 frames. ], batch size: 5, lr: 4.77e-03 +2024-08-06 18:24:29,575 INFO [trainer.py:765] (3/8) Epoch 18, batch 1400, train_loss[loss=3.013, NarTop10Accuracy=0.7198, over 5970.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.694, over 5992.22 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 18:25:00,307 INFO [trainer.py:765] (3/8) Epoch 18, batch 1500, train_loss[loss=3.127, NarTop10Accuracy=0.7085, over 6060.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6943, over 5924.67 frames. ], batch size: 52, lr: 4.76e-03 +2024-08-06 18:25:28,085 INFO [trainer.py:765] (3/8) Epoch 18, batch 1600, train_loss[loss=3.024, NarTop10Accuracy=0.7213, over 7335.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.692, over 5914.45 frames. ], batch size: 23, lr: 4.75e-03 +2024-08-06 18:25:54,688 INFO [trainer.py:765] (3/8) Epoch 18, batch 1700, train_loss[loss=3.177, NarTop10Accuracy=0.6929, over 6717.00 frames. ], tot_loss[loss=3.17, NarTop10Accuracy=0.6919, over 5904.42 frames. ], batch size: 14, lr: 4.75e-03 +2024-08-06 18:26:21,197 INFO [trainer.py:765] (3/8) Epoch 18, batch 1800, train_loss[loss=3.455, NarTop10Accuracy=0.6401, over 7086.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6933, over 5968.18 frames. ], batch size: 22, lr: 4.74e-03 +2024-08-06 18:26:47,567 INFO [trainer.py:765] (3/8) Epoch 18, batch 1900, train_loss[loss=3.065, NarTop10Accuracy=0.7199, over 6102.00 frames. ], tot_loss[loss=3.175, NarTop10Accuracy=0.6913, over 6017.15 frames. ], batch size: 50, lr: 4.74e-03 +2024-08-06 18:27:13,176 INFO [trainer.py:765] (3/8) Epoch 18, batch 2000, train_loss[loss=3.076, NarTop10Accuracy=0.7149, over 5895.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.694, over 5982.20 frames. ], batch size: 52, lr: 4.73e-03 +2024-08-06 18:27:38,529 INFO [trainer.py:765] (3/8) Epoch 18, batch 2100, train_loss[loss=3.337, NarTop10Accuracy=0.6571, over 4773.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6951, over 5965.50 frames. ], batch size: 5, lr: 4.73e-03 +2024-08-06 18:28:03,813 INFO [trainer.py:765] (3/8) Epoch 18, batch 2200, train_loss[loss=3.087, NarTop10Accuracy=0.7164, over 7419.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6952, over 6006.21 frames. ], batch size: 32, lr: 4.72e-03 +2024-08-06 18:28:06,572 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 18:28:14,650 INFO [trainer.py:811] (3/8) Epoch 18, validation: loss=3.028, NarTop10Accuracy=0.7201, over 1905321.00 frames. +2024-08-06 18:28:14,650 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 18:28:15,147 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.054e+02 2.220e+02 2.384e+02 3.992e+02, threshold=4.441e+02, percent-clipped=0.0 +2024-08-06 18:28:37,097 INFO [trainer.py:765] (3/8) Epoch 18, batch 2300, train_loss[loss=2.882, NarTop10Accuracy=0.7441, over 5577.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6918, over 6025.36 frames. ], batch size: 9, lr: 4.72e-03 +2024-08-06 18:29:01,592 INFO [trainer.py:765] (3/8) Epoch 18, batch 2400, train_loss[loss=2.741, NarTop10Accuracy=0.7797, over 5202.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6965, over 5754.26 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 18:29:25,027 INFO [trainer.py:765] (3/8) Epoch 18, batch 2500, train_loss[loss=3.075, NarTop10Accuracy=0.7098, over 5271.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7005, over 5467.59 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 18:29:45,157 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 18:30:41,232 INFO [trainer.py:765] (3/8) Epoch 19, batch 100, train_loss[loss=3.003, NarTop10Accuracy=0.723, over 7362.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6941, over 2358.44 frames. ], batch size: 31, lr: 4.57e-03 +2024-08-06 18:31:15,603 INFO [trainer.py:765] (3/8) Epoch 19, batch 200, train_loss[loss=3.106, NarTop10Accuracy=0.7087, over 6837.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6952, over 3850.90 frames. ], batch size: 17, lr: 4.57e-03 +2024-08-06 18:31:47,468 INFO [trainer.py:765] (3/8) Epoch 19, batch 300, train_loss[loss=3.453, NarTop10Accuracy=0.6288, over 6978.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.6986, over 4662.01 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 18:32:20,356 INFO [trainer.py:765] (3/8) Epoch 19, batch 400, train_loss[loss=3.202, NarTop10Accuracy=0.6821, over 5145.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.6983, over 5114.69 frames. ], batch size: 7, lr: 4.56e-03 +2024-08-06 18:32:50,335 INFO [trainer.py:765] (3/8) Epoch 19, batch 500, train_loss[loss=2.949, NarTop10Accuracy=0.7405, over 5997.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.6987, over 5384.27 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 18:33:29,610 INFO [trainer.py:765] (3/8) Epoch 19, batch 600, train_loss[loss=3.194, NarTop10Accuracy=0.6847, over 5688.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.6976, over 5665.53 frames. ], batch size: 9, lr: 4.55e-03 +2024-08-06 18:34:03,592 INFO [trainer.py:765] (3/8) Epoch 19, batch 700, train_loss[loss=2.875, NarTop10Accuracy=0.7469, over 5100.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6966, over 5726.51 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 18:34:35,179 INFO [trainer.py:765] (3/8) Epoch 19, batch 800, train_loss[loss=3.044, NarTop10Accuracy=0.7243, over 4263.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6951, over 5786.05 frames. ], batch size: 5, lr: 4.54e-03 +2024-08-06 18:35:10,263 INFO [trainer.py:765] (3/8) Epoch 19, batch 900, train_loss[loss=2.808, NarTop10Accuracy=0.7611, over 6237.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.697, over 5791.85 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 18:35:48,637 INFO [trainer.py:765] (3/8) Epoch 19, batch 1000, train_loss[loss=3.423, NarTop10Accuracy=0.6343, over 6222.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6961, over 5902.56 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 18:36:20,939 INFO [trainer.py:765] (3/8) Epoch 19, batch 1100, train_loss[loss=2.998, NarTop10Accuracy=0.7241, over 6840.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6939, over 5922.96 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 18:36:57,130 INFO [trainer.py:765] (3/8) Epoch 19, batch 1200, train_loss[loss=2.94, NarTop10Accuracy=0.7447, over 7551.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6919, over 5925.72 frames. ], batch size: 32, lr: 4.52e-03 +2024-08-06 18:37:35,315 INFO [trainer.py:765] (3/8) Epoch 19, batch 1300, train_loss[loss=3.009, NarTop10Accuracy=0.7266, over 4989.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6936, over 6001.20 frames. ], batch size: 6, lr: 4.51e-03 +2024-08-06 18:38:04,680 INFO [trainer.py:765] (3/8) Epoch 19, batch 1400, train_loss[loss=2.923, NarTop10Accuracy=0.7446, over 6192.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6943, over 6026.05 frames. ], batch size: 11, lr: 4.51e-03 +2024-08-06 18:38:34,551 INFO [trainer.py:765] (3/8) Epoch 19, batch 1500, train_loss[loss=3.458, NarTop10Accuracy=0.6319, over 6372.00 frames. ], tot_loss[loss=3.14, NarTop10Accuracy=0.6974, over 5953.83 frames. ], batch size: 51, lr: 4.50e-03 +2024-08-06 18:39:02,312 INFO [trainer.py:765] (3/8) Epoch 19, batch 1600, train_loss[loss=3.542, NarTop10Accuracy=0.6186, over 7170.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6968, over 5928.91 frames. ], batch size: 22, lr: 4.50e-03 +2024-08-06 18:39:11,590 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 18:39:19,795 INFO [trainer.py:811] (3/8) Epoch 19, validation: loss=2.958, NarTop10Accuracy=0.7345, over 1905321.00 frames. +2024-08-06 18:39:19,796 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 18:39:20,378 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.040e+02 2.194e+02 2.364e+02 6.410e+02, threshold=4.387e+02, percent-clipped=0.2 +2024-08-06 18:39:37,191 INFO [trainer.py:765] (3/8) Epoch 19, batch 1700, train_loss[loss=3.411, NarTop10Accuracy=0.6326, over 6249.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6956, over 5933.69 frames. ], batch size: 13, lr: 4.49e-03 +2024-08-06 18:40:03,789 INFO [trainer.py:765] (3/8) Epoch 19, batch 1800, train_loss[loss=3.517, NarTop10Accuracy=0.6163, over 6888.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6953, over 5991.15 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 18:40:30,217 INFO [trainer.py:765] (3/8) Epoch 19, batch 1900, train_loss[loss=3.127, NarTop10Accuracy=0.7052, over 6135.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6968, over 6029.66 frames. ], batch size: 50, lr: 4.49e-03 +2024-08-06 18:40:55,793 INFO [trainer.py:765] (3/8) Epoch 19, batch 2000, train_loss[loss=3.342, NarTop10Accuracy=0.6635, over 5445.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6955, over 6010.44 frames. ], batch size: 50, lr: 4.48e-03 +2024-08-06 18:41:21,183 INFO [trainer.py:765] (3/8) Epoch 19, batch 2100, train_loss[loss=2.801, NarTop10Accuracy=0.7669, over 4926.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6993, over 5985.33 frames. ], batch size: 5, lr: 4.48e-03 +2024-08-06 18:41:46,455 INFO [trainer.py:765] (3/8) Epoch 19, batch 2200, train_loss[loss=3.076, NarTop10Accuracy=0.7065, over 7092.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6968, over 6030.98 frames. ], batch size: 31, lr: 4.47e-03 +2024-08-06 18:42:11,559 INFO [trainer.py:765] (3/8) Epoch 19, batch 2300, train_loss[loss=3.164, NarTop10Accuracy=0.6917, over 5745.00 frames. ], tot_loss[loss=3.167, NarTop10Accuracy=0.6926, over 6035.13 frames. ], batch size: 9, lr: 4.47e-03 +2024-08-06 18:42:35,987 INFO [trainer.py:765] (3/8) Epoch 19, batch 2400, train_loss[loss=3.002, NarTop10Accuracy=0.7272, over 5109.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6963, over 5771.22 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:42:59,690 INFO [trainer.py:765] (3/8) Epoch 19, batch 2500, train_loss[loss=2.813, NarTop10Accuracy=0.7672, over 5088.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7002, over 5467.67 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:43:19,696 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 18:44:22,973 INFO [trainer.py:765] (3/8) Epoch 20, batch 100, train_loss[loss=3.254, NarTop10Accuracy=0.6777, over 7131.00 frames. ], tot_loss[loss=3.179, NarTop10Accuracy=0.69, over 2366.38 frames. ], batch size: 31, lr: 4.34e-03 +2024-08-06 18:44:58,379 INFO [trainer.py:765] (3/8) Epoch 20, batch 200, train_loss[loss=3.53, NarTop10Accuracy=0.6176, over 6765.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6992, over 3873.91 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 18:45:32,278 INFO [trainer.py:765] (3/8) Epoch 20, batch 300, train_loss[loss=3.367, NarTop10Accuracy=0.6455, over 7083.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7023, over 4684.55 frames. ], batch size: 22, lr: 4.33e-03 +2024-08-06 18:46:05,128 INFO [trainer.py:765] (3/8) Epoch 20, batch 400, train_loss[loss=2.818, NarTop10Accuracy=0.7595, over 5184.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7018, over 5131.92 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 18:46:35,769 INFO [trainer.py:765] (3/8) Epoch 20, batch 500, train_loss[loss=2.788, NarTop10Accuracy=0.7673, over 6051.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6995, over 5394.39 frames. ], batch size: 11, lr: 4.32e-03 +2024-08-06 18:47:13,254 INFO [trainer.py:765] (3/8) Epoch 20, batch 600, train_loss[loss=3.12, NarTop10Accuracy=0.6958, over 5733.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.7002, over 5659.22 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 18:47:44,481 INFO [trainer.py:765] (3/8) Epoch 20, batch 700, train_loss[loss=2.759, NarTop10Accuracy=0.778, over 4242.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.7015, over 5712.59 frames. ], batch size: 5, lr: 4.31e-03 +2024-08-06 18:48:21,015 INFO [trainer.py:765] (3/8) Epoch 20, batch 800, train_loss[loss=2.798, NarTop10Accuracy=0.7666, over 5121.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.6989, over 5769.36 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 18:48:56,534 INFO [trainer.py:765] (3/8) Epoch 20, batch 900, train_loss[loss=2.951, NarTop10Accuracy=0.7414, over 6129.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7004, over 5782.37 frames. ], batch size: 13, lr: 4.30e-03 +2024-08-06 18:49:29,805 INFO [trainer.py:765] (3/8) Epoch 20, batch 1000, train_loss[loss=3.275, NarTop10Accuracy=0.6742, over 6597.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6961, over 5882.83 frames. ], batch size: 14, lr: 4.30e-03 +2024-08-06 18:49:52,236 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 18:50:00,327 INFO [trainer.py:811] (3/8) Epoch 20, validation: loss=2.962, NarTop10Accuracy=0.7336, over 1905321.00 frames. +2024-08-06 18:50:00,327 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 18:50:00,875 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.061e+02 2.223e+02 2.401e+02 3.871e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 18:50:15,427 INFO [trainer.py:765] (3/8) Epoch 20, batch 1100, train_loss[loss=3.284, NarTop10Accuracy=0.6681, over 6747.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.6976, over 5928.00 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 18:50:53,776 INFO [trainer.py:765] (3/8) Epoch 20, batch 1200, train_loss[loss=2.933, NarTop10Accuracy=0.7388, over 7200.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6969, over 5913.91 frames. ], batch size: 32, lr: 4.29e-03 +2024-08-06 18:51:25,130 INFO [trainer.py:765] (3/8) Epoch 20, batch 1300, train_loss[loss=3.314, NarTop10Accuracy=0.6573, over 5106.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.6986, over 5966.18 frames. ], batch size: 6, lr: 4.29e-03 +2024-08-06 18:51:59,315 INFO [trainer.py:765] (3/8) Epoch 20, batch 1400, train_loss[loss=3.102, NarTop10Accuracy=0.6999, over 6117.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.6994, over 5995.93 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 18:52:32,806 INFO [trainer.py:765] (3/8) Epoch 20, batch 1500, train_loss[loss=3.286, NarTop10Accuracy=0.6701, over 6207.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6969, over 5930.60 frames. ], batch size: 50, lr: 4.28e-03 +2024-08-06 18:53:00,635 INFO [trainer.py:765] (3/8) Epoch 20, batch 1600, train_loss[loss=2.984, NarTop10Accuracy=0.7328, over 7026.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6956, over 5911.21 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 18:53:27,328 INFO [trainer.py:765] (3/8) Epoch 20, batch 1700, train_loss[loss=3.549, NarTop10Accuracy=0.6146, over 6303.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6966, over 5894.56 frames. ], batch size: 13, lr: 4.27e-03 +2024-08-06 18:53:53,851 INFO [trainer.py:765] (3/8) Epoch 20, batch 1800, train_loss[loss=3.059, NarTop10Accuracy=0.719, over 7062.00 frames. ], tot_loss[loss=3.141, NarTop10Accuracy=0.6978, over 5965.81 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 18:54:20,316 INFO [trainer.py:765] (3/8) Epoch 20, batch 1900, train_loss[loss=3.094, NarTop10Accuracy=0.7085, over 5967.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6944, over 6002.43 frames. ], batch size: 50, lr: 4.26e-03 +2024-08-06 18:54:45,890 INFO [trainer.py:765] (3/8) Epoch 20, batch 2000, train_loss[loss=3.634, NarTop10Accuracy=0.597, over 5934.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6941, over 5982.52 frames. ], batch size: 51, lr: 4.26e-03 +2024-08-06 18:55:11,183 INFO [trainer.py:765] (3/8) Epoch 20, batch 2100, train_loss[loss=3.062, NarTop10Accuracy=0.699, over 3966.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6958, over 5967.51 frames. ], batch size: 4, lr: 4.25e-03 +2024-08-06 18:55:36,414 INFO [trainer.py:765] (3/8) Epoch 20, batch 2200, train_loss[loss=2.928, NarTop10Accuracy=0.7398, over 7338.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.695, over 6003.09 frames. ], batch size: 31, lr: 4.25e-03 +2024-08-06 18:56:01,636 INFO [trainer.py:765] (3/8) Epoch 20, batch 2300, train_loss[loss=3.165, NarTop10Accuracy=0.695, over 5778.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.693, over 6009.76 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 18:56:26,050 INFO [trainer.py:765] (3/8) Epoch 20, batch 2400, train_loss[loss=3.002, NarTop10Accuracy=0.7276, over 5103.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6947, over 5762.67 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:56:49,566 INFO [trainer.py:765] (3/8) Epoch 20, batch 2500, train_loss[loss=2.802, NarTop10Accuracy=0.7647, over 5307.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7031, over 5466.68 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:57:09,699 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 18:58:09,585 INFO [trainer.py:765] (3/8) Epoch 21, batch 100, train_loss[loss=3.331, NarTop10Accuracy=0.6594, over 7170.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7035, over 2365.79 frames. ], batch size: 31, lr: 4.13e-03 +2024-08-06 18:58:40,417 INFO [trainer.py:765] (3/8) Epoch 21, batch 200, train_loss[loss=2.869, NarTop10Accuracy=0.7526, over 6696.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7017, over 3865.50 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 18:59:13,333 INFO [trainer.py:765] (3/8) Epoch 21, batch 300, train_loss[loss=2.901, NarTop10Accuracy=0.7411, over 7032.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.6998, over 4672.59 frames. ], batch size: 22, lr: 4.12e-03 +2024-08-06 18:59:48,150 INFO [trainer.py:765] (3/8) Epoch 21, batch 400, train_loss[loss=2.874, NarTop10Accuracy=0.7578, over 5043.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7032, over 5116.84 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 19:00:16,839 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 19:00:25,075 INFO [trainer.py:811] (3/8) Epoch 21, validation: loss=2.992, NarTop10Accuracy=0.7268, over 1905321.00 frames. +2024-08-06 19:00:25,076 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 19:00:25,622 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.071e+02 2.224e+02 2.387e+02 3.839e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 19:00:29,891 INFO [trainer.py:765] (3/8) Epoch 21, batch 500, train_loss[loss=2.741, NarTop10Accuracy=0.7795, over 6057.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7022, over 5375.13 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 19:01:03,329 INFO [trainer.py:765] (3/8) Epoch 21, batch 600, train_loss[loss=3.283, NarTop10Accuracy=0.6601, over 6192.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7055, over 5630.47 frames. ], batch size: 10, lr: 4.11e-03 +2024-08-06 19:01:39,388 INFO [trainer.py:765] (3/8) Epoch 21, batch 700, train_loss[loss=2.876, NarTop10Accuracy=0.7556, over 5184.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7017, over 5698.72 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 19:02:18,047 INFO [trainer.py:765] (3/8) Epoch 21, batch 800, train_loss[loss=3.163, NarTop10Accuracy=0.703, over 4956.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.6995, over 5780.96 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 19:02:48,663 INFO [trainer.py:765] (3/8) Epoch 21, batch 900, train_loss[loss=2.887, NarTop10Accuracy=0.7457, over 6654.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.6992, over 5804.05 frames. ], batch size: 14, lr: 4.09e-03 +2024-08-06 19:03:25,801 INFO [trainer.py:765] (3/8) Epoch 21, batch 1000, train_loss[loss=2.833, NarTop10Accuracy=0.7589, over 6216.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.6989, over 5903.68 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 19:04:07,207 INFO [trainer.py:765] (3/8) Epoch 21, batch 1100, train_loss[loss=3.486, NarTop10Accuracy=0.6312, over 6588.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6951, over 5925.46 frames. ], batch size: 17, lr: 4.09e-03 +2024-08-06 19:04:38,463 INFO [trainer.py:765] (3/8) Epoch 21, batch 1200, train_loss[loss=3.28, NarTop10Accuracy=0.6679, over 7365.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.6996, over 5930.70 frames. ], batch size: 32, lr: 4.08e-03 +2024-08-06 19:05:15,316 INFO [trainer.py:765] (3/8) Epoch 21, batch 1300, train_loss[loss=2.86, NarTop10Accuracy=0.7518, over 4362.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7036, over 5995.69 frames. ], batch size: 5, lr: 4.08e-03 +2024-08-06 19:05:55,559 INFO [trainer.py:765] (3/8) Epoch 21, batch 1400, train_loss[loss=3.478, NarTop10Accuracy=0.6203, over 6033.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.703, over 6014.45 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 19:06:23,600 INFO [trainer.py:765] (3/8) Epoch 21, batch 1500, train_loss[loss=3.293, NarTop10Accuracy=0.6645, over 6168.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7007, over 5946.14 frames. ], batch size: 50, lr: 4.07e-03 +2024-08-06 19:06:51,462 INFO [trainer.py:765] (3/8) Epoch 21, batch 1600, train_loss[loss=2.8, NarTop10Accuracy=0.7706, over 7029.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.6999, over 5925.26 frames. ], batch size: 22, lr: 4.07e-03 +2024-08-06 19:07:18,212 INFO [trainer.py:765] (3/8) Epoch 21, batch 1700, train_loss[loss=3.169, NarTop10Accuracy=0.6954, over 6615.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.699, over 5933.65 frames. ], batch size: 14, lr: 4.06e-03 +2024-08-06 19:07:44,809 INFO [trainer.py:765] (3/8) Epoch 21, batch 1800, train_loss[loss=3.031, NarTop10Accuracy=0.723, over 6816.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6995, over 5980.32 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 19:08:11,369 INFO [trainer.py:765] (3/8) Epoch 21, batch 1900, train_loss[loss=3.638, NarTop10Accuracy=0.5947, over 6075.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6974, over 6019.32 frames. ], batch size: 53, lr: 4.06e-03 +2024-08-06 19:08:37,105 INFO [trainer.py:765] (3/8) Epoch 21, batch 2000, train_loss[loss=3.47, NarTop10Accuracy=0.6309, over 6591.00 frames. ], tot_loss[loss=3.14, NarTop10Accuracy=0.6979, over 6000.97 frames. ], batch size: 50, lr: 4.05e-03 +2024-08-06 19:09:02,507 INFO [trainer.py:765] (3/8) Epoch 21, batch 2100, train_loss[loss=2.903, NarTop10Accuracy=0.7399, over 3945.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6963, over 5996.07 frames. ], batch size: 4, lr: 4.05e-03 +2024-08-06 19:09:27,891 INFO [trainer.py:765] (3/8) Epoch 21, batch 2200, train_loss[loss=2.918, NarTop10Accuracy=0.7435, over 7437.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6955, over 6030.04 frames. ], batch size: 32, lr: 4.04e-03 +2024-08-06 19:09:53,223 INFO [trainer.py:765] (3/8) Epoch 21, batch 2300, train_loss[loss=3.157, NarTop10Accuracy=0.7057, over 5742.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6928, over 6029.69 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 19:10:17,597 INFO [trainer.py:765] (3/8) Epoch 21, batch 2400, train_loss[loss=3.345, NarTop10Accuracy=0.6408, over 5022.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6963, over 5764.97 frames. ], batch size: 7, lr: 4.04e-03 +2024-08-06 19:10:37,230 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 19:10:45,275 INFO [trainer.py:811] (3/8) Epoch 21, validation: loss=2.971, NarTop10Accuracy=0.7316, over 1905321.00 frames. +2024-08-06 19:10:45,275 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 19:10:45,741 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.100e+02 2.242e+02 2.407e+02 6.546e+02, threshold=4.484e+02, percent-clipped=0.1 +2024-08-06 19:10:49,272 INFO [trainer.py:765] (3/8) Epoch 21, batch 2500, train_loss[loss=3.375, NarTop10Accuracy=0.649, over 5022.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7042, over 5474.58 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 19:11:09,185 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 19:12:09,054 INFO [trainer.py:765] (3/8) Epoch 22, batch 100, train_loss[loss=2.838, NarTop10Accuracy=0.7642, over 7071.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7074, over 2360.08 frames. ], batch size: 31, lr: 3.93e-03 +2024-08-06 19:12:44,462 INFO [trainer.py:765] (3/8) Epoch 22, batch 200, train_loss[loss=3.196, NarTop10Accuracy=0.6922, over 6717.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.7064, over 3851.68 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 19:13:14,533 INFO [trainer.py:765] (3/8) Epoch 22, batch 300, train_loss[loss=2.913, NarTop10Accuracy=0.7563, over 7014.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7066, over 4663.73 frames. ], batch size: 22, lr: 3.93e-03 +2024-08-06 19:13:49,229 INFO [trainer.py:765] (3/8) Epoch 22, batch 400, train_loss[loss=2.818, NarTop10Accuracy=0.7628, over 5061.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7082, over 5106.34 frames. ], batch size: 7, lr: 3.92e-03 +2024-08-06 19:14:24,850 INFO [trainer.py:765] (3/8) Epoch 22, batch 500, train_loss[loss=3.128, NarTop10Accuracy=0.7051, over 6039.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7074, over 5384.79 frames. ], batch size: 11, lr: 3.92e-03 +2024-08-06 19:14:55,701 INFO [trainer.py:765] (3/8) Epoch 22, batch 600, train_loss[loss=2.994, NarTop10Accuracy=0.7368, over 5799.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7018, over 5646.22 frames. ], batch size: 9, lr: 3.92e-03 +2024-08-06 19:15:30,867 INFO [trainer.py:765] (3/8) Epoch 22, batch 700, train_loss[loss=3.426, NarTop10Accuracy=0.6347, over 5010.00 frames. ], tot_loss[loss=3.121, NarTop10Accuracy=0.7016, over 5719.62 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 19:16:10,665 INFO [trainer.py:765] (3/8) Epoch 22, batch 800, train_loss[loss=2.888, NarTop10Accuracy=0.7428, over 4326.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7027, over 5773.07 frames. ], batch size: 5, lr: 3.91e-03 +2024-08-06 19:16:40,952 INFO [trainer.py:765] (3/8) Epoch 22, batch 900, train_loss[loss=3.011, NarTop10Accuracy=0.725, over 6207.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7025, over 5797.93 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 19:17:16,433 INFO [trainer.py:765] (3/8) Epoch 22, batch 1000, train_loss[loss=3.076, NarTop10Accuracy=0.7009, over 6285.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7039, over 5899.26 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 19:17:52,085 INFO [trainer.py:765] (3/8) Epoch 22, batch 1100, train_loss[loss=3.031, NarTop10Accuracy=0.7214, over 6798.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7022, over 5936.15 frames. ], batch size: 17, lr: 3.90e-03 +2024-08-06 19:18:25,927 INFO [trainer.py:765] (3/8) Epoch 22, batch 1200, train_loss[loss=2.906, NarTop10Accuracy=0.7501, over 7353.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7057, over 5944.96 frames. ], batch size: 31, lr: 3.89e-03 +2024-08-06 19:19:01,253 INFO [trainer.py:765] (3/8) Epoch 22, batch 1300, train_loss[loss=3.003, NarTop10Accuracy=0.7231, over 5073.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7068, over 6009.90 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 19:19:33,317 INFO [trainer.py:765] (3/8) Epoch 22, batch 1400, train_loss[loss=2.885, NarTop10Accuracy=0.7537, over 6231.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7038, over 6017.84 frames. ], batch size: 11, lr: 3.89e-03 +2024-08-06 19:20:03,830 INFO [trainer.py:765] (3/8) Epoch 22, batch 1500, train_loss[loss=3.462, NarTop10Accuracy=0.6328, over 5904.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7034, over 5940.48 frames. ], batch size: 50, lr: 3.88e-03 +2024-08-06 19:20:31,646 INFO [trainer.py:765] (3/8) Epoch 22, batch 1600, train_loss[loss=3.054, NarTop10Accuracy=0.7173, over 7077.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7001, over 5909.04 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 19:20:58,418 INFO [trainer.py:765] (3/8) Epoch 22, batch 1700, train_loss[loss=3.2, NarTop10Accuracy=0.679, over 6534.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.6999, over 5912.10 frames. ], batch size: 14, lr: 3.88e-03 +2024-08-06 19:21:25,010 INFO [trainer.py:765] (3/8) Epoch 22, batch 1800, train_loss[loss=2.937, NarTop10Accuracy=0.7438, over 7149.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7012, over 5993.60 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 19:21:51,372 INFO [trainer.py:765] (3/8) Epoch 22, batch 1900, train_loss[loss=3.1, NarTop10Accuracy=0.714, over 6096.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6972, over 6037.53 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 19:21:53,109 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 19:22:01,088 INFO [trainer.py:811] (3/8) Epoch 22, validation: loss=3.009, NarTop10Accuracy=0.7241, over 1905321.00 frames. +2024-08-06 19:22:01,089 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 19:22:01,575 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.114e+02 2.276e+02 2.445e+02 4.438e+02, threshold=4.551e+02, percent-clipped=0.0 +2024-08-06 19:22:24,818 INFO [trainer.py:765] (3/8) Epoch 22, batch 2000, train_loss[loss=3.524, NarTop10Accuracy=0.6201, over 6570.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7009, over 6018.31 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 19:22:50,041 INFO [trainer.py:765] (3/8) Epoch 22, batch 2100, train_loss[loss=3.121, NarTop10Accuracy=0.7042, over 4749.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7029, over 5998.21 frames. ], batch size: 5, lr: 3.86e-03 +2024-08-06 19:23:15,229 INFO [trainer.py:765] (3/8) Epoch 22, batch 2200, train_loss[loss=2.984, NarTop10Accuracy=0.7309, over 7212.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7019, over 6038.52 frames. ], batch size: 31, lr: 3.86e-03 +2024-08-06 19:23:40,314 INFO [trainer.py:765] (3/8) Epoch 22, batch 2300, train_loss[loss=3.086, NarTop10Accuracy=0.7081, over 6207.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6992, over 6044.30 frames. ], batch size: 10, lr: 3.86e-03 +2024-08-06 19:24:04,602 INFO [trainer.py:765] (3/8) Epoch 22, batch 2400, train_loss[loss=3.192, NarTop10Accuracy=0.6818, over 5772.00 frames. ], tot_loss[loss=3.121, NarTop10Accuracy=0.7012, over 5794.45 frames. ], batch size: 8, lr: 3.85e-03 +2024-08-06 19:24:28,024 INFO [trainer.py:765] (3/8) Epoch 22, batch 2500, train_loss[loss=3.199, NarTop10Accuracy=0.6757, over 5238.00 frames. ], tot_loss[loss=3.105, NarTop10Accuracy=0.7041, over 5488.57 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:47,439 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 19:25:45,385 INFO [trainer.py:765] (3/8) Epoch 23, batch 100, train_loss[loss=2.989, NarTop10Accuracy=0.7254, over 7125.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7014, over 2355.94 frames. ], batch size: 31, lr: 3.76e-03 +2024-08-06 19:26:21,309 INFO [trainer.py:765] (3/8) Epoch 23, batch 200, train_loss[loss=3.444, NarTop10Accuracy=0.6411, over 6750.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7008, over 3848.34 frames. ], batch size: 17, lr: 3.76e-03 +2024-08-06 19:26:57,603 INFO [trainer.py:765] (3/8) Epoch 23, batch 300, train_loss[loss=2.98, NarTop10Accuracy=0.7286, over 7059.00 frames. ], tot_loss[loss=3.105, NarTop10Accuracy=0.7052, over 4640.14 frames. ], batch size: 22, lr: 3.75e-03 +2024-08-06 19:27:26,541 INFO [trainer.py:765] (3/8) Epoch 23, batch 400, train_loss[loss=3.356, NarTop10Accuracy=0.6481, over 5109.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7039, over 5100.64 frames. ], batch size: 7, lr: 3.75e-03 +2024-08-06 19:27:59,713 INFO [trainer.py:765] (3/8) Epoch 23, batch 500, train_loss[loss=3.429, NarTop10Accuracy=0.6293, over 6003.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.7024, over 5370.55 frames. ], batch size: 11, lr: 3.75e-03 +2024-08-06 19:28:35,883 INFO [trainer.py:765] (3/8) Epoch 23, batch 600, train_loss[loss=3.351, NarTop10Accuracy=0.6568, over 5667.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7038, over 5639.80 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 19:29:11,367 INFO [trainer.py:765] (3/8) Epoch 23, batch 700, train_loss[loss=3.173, NarTop10Accuracy=0.6819, over 5226.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.7063, over 5704.91 frames. ], batch size: 6, lr: 3.74e-03 +2024-08-06 19:29:43,613 INFO [trainer.py:765] (3/8) Epoch 23, batch 800, train_loss[loss=2.859, NarTop10Accuracy=0.7549, over 4356.00 frames. ], tot_loss[loss=3.106, NarTop10Accuracy=0.7043, over 5772.33 frames. ], batch size: 5, lr: 3.74e-03 +2024-08-06 19:30:19,390 INFO [trainer.py:765] (3/8) Epoch 23, batch 900, train_loss[loss=3.221, NarTop10Accuracy=0.6821, over 6465.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7055, over 5793.73 frames. ], batch size: 14, lr: 3.73e-03 +2024-08-06 19:30:58,195 INFO [trainer.py:765] (3/8) Epoch 23, batch 1000, train_loss[loss=2.951, NarTop10Accuracy=0.7337, over 6255.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7075, over 5916.19 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 19:31:31,521 INFO [trainer.py:765] (3/8) Epoch 23, batch 1100, train_loss[loss=3.043, NarTop10Accuracy=0.7202, over 6900.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7068, over 5938.15 frames. ], batch size: 17, lr: 3.73e-03 +2024-08-06 19:32:08,518 INFO [trainer.py:765] (3/8) Epoch 23, batch 1200, train_loss[loss=3.071, NarTop10Accuracy=0.7192, over 7227.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7038, over 5933.80 frames. ], batch size: 31, lr: 3.72e-03 +2024-08-06 19:32:46,937 INFO [trainer.py:765] (3/8) Epoch 23, batch 1300, train_loss[loss=3.077, NarTop10Accuracy=0.7085, over 5031.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.703, over 5982.56 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 19:32:56,402 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 19:33:04,722 INFO [trainer.py:811] (3/8) Epoch 23, validation: loss=2.893, NarTop10Accuracy=0.7468, over 1905321.00 frames. +2024-08-06 19:33:04,723 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 19:33:05,263 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.108e+02 2.273e+02 2.457e+02 3.966e+02, threshold=4.546e+02, percent-clipped=0.0 +2024-08-06 19:33:27,407 INFO [trainer.py:765] (3/8) Epoch 23, batch 1400, train_loss[loss=2.77, NarTop10Accuracy=0.7739, over 6126.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.703, over 6005.93 frames. ], batch size: 11, lr: 3.72e-03 +2024-08-06 19:33:58,216 INFO [trainer.py:765] (3/8) Epoch 23, batch 1500, train_loss[loss=3.296, NarTop10Accuracy=0.6705, over 5616.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.705, over 5954.33 frames. ], batch size: 50, lr: 3.71e-03 +2024-08-06 19:34:26,015 INFO [trainer.py:765] (3/8) Epoch 23, batch 1600, train_loss[loss=2.902, NarTop10Accuracy=0.7511, over 7209.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7041, over 5939.75 frames. ], batch size: 22, lr: 3.71e-03 +2024-08-06 19:34:52,783 INFO [trainer.py:765] (3/8) Epoch 23, batch 1700, train_loss[loss=3.31, NarTop10Accuracy=0.6642, over 6231.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7009, over 5912.73 frames. ], batch size: 13, lr: 3.71e-03 +2024-08-06 19:35:19,262 INFO [trainer.py:765] (3/8) Epoch 23, batch 1800, train_loss[loss=2.95, NarTop10Accuracy=0.7429, over 7044.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7027, over 5975.65 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 19:35:45,597 INFO [trainer.py:765] (3/8) Epoch 23, batch 1900, train_loss[loss=3.43, NarTop10Accuracy=0.6371, over 6192.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.6999, over 6019.30 frames. ], batch size: 51, lr: 3.70e-03 +2024-08-06 19:36:11,171 INFO [trainer.py:765] (3/8) Epoch 23, batch 2000, train_loss[loss=3.643, NarTop10Accuracy=0.5959, over 5631.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7023, over 5983.98 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 19:36:36,518 INFO [trainer.py:765] (3/8) Epoch 23, batch 2100, train_loss[loss=3.394, NarTop10Accuracy=0.6479, over 3936.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7032, over 5979.84 frames. ], batch size: 4, lr: 3.69e-03 +2024-08-06 19:37:01,909 INFO [trainer.py:765] (3/8) Epoch 23, batch 2200, train_loss[loss=3.163, NarTop10Accuracy=0.6989, over 7005.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7004, over 6019.64 frames. ], batch size: 31, lr: 3.69e-03 +2024-08-06 19:37:27,062 INFO [trainer.py:765] (3/8) Epoch 23, batch 2300, train_loss[loss=2.963, NarTop10Accuracy=0.7385, over 5592.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7026, over 6026.63 frames. ], batch size: 9, lr: 3.69e-03 +2024-08-06 19:37:51,424 INFO [trainer.py:765] (3/8) Epoch 23, batch 2400, train_loss[loss=3.134, NarTop10Accuracy=0.6976, over 5226.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7029, over 5775.06 frames. ], batch size: 7, lr: 3.69e-03 +2024-08-06 19:38:15,053 INFO [trainer.py:765] (3/8) Epoch 23, batch 2500, train_loss[loss=3.258, NarTop10Accuracy=0.675, over 5178.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7075, over 5468.04 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 19:38:35,038 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 19:39:37,631 INFO [trainer.py:765] (3/8) Epoch 24, batch 100, train_loss[loss=3.485, NarTop10Accuracy=0.6236, over 7413.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7024, over 2369.84 frames. ], batch size: 31, lr: 3.60e-03 +2024-08-06 19:40:10,189 INFO [trainer.py:765] (3/8) Epoch 24, batch 200, train_loss[loss=2.85, NarTop10Accuracy=0.7483, over 6987.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7083, over 3860.05 frames. ], batch size: 17, lr: 3.60e-03 +2024-08-06 19:40:40,555 INFO [trainer.py:765] (3/8) Epoch 24, batch 300, train_loss[loss=2.903, NarTop10Accuracy=0.7449, over 7251.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7078, over 4670.58 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 19:41:18,233 INFO [trainer.py:765] (3/8) Epoch 24, batch 400, train_loss[loss=3.08, NarTop10Accuracy=0.7146, over 5079.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7075, over 5113.99 frames. ], batch size: 7, lr: 3.59e-03 +2024-08-06 19:41:50,322 INFO [trainer.py:765] (3/8) Epoch 24, batch 500, train_loss[loss=2.809, NarTop10Accuracy=0.7615, over 6174.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7092, over 5388.22 frames. ], batch size: 11, lr: 3.59e-03 +2024-08-06 19:42:21,451 INFO [trainer.py:765] (3/8) Epoch 24, batch 600, train_loss[loss=2.887, NarTop10Accuracy=0.7576, over 5670.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7087, over 5651.30 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 19:42:52,843 INFO [trainer.py:765] (3/8) Epoch 24, batch 700, train_loss[loss=3.006, NarTop10Accuracy=0.7225, over 4989.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7074, over 5721.04 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 19:43:17,380 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 19:43:25,410 INFO [trainer.py:811] (3/8) Epoch 24, validation: loss=3.021, NarTop10Accuracy=0.7204, over 1905321.00 frames. +2024-08-06 19:43:25,411 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 19:43:28,563 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.113e+02 2.282e+02 2.472e+02 2.357e+03, threshold=4.564e+02, percent-clipped=0.2 +2024-08-06 19:43:40,815 INFO [trainer.py:765] (3/8) Epoch 24, batch 800, train_loss[loss=2.883, NarTop10Accuracy=0.7453, over 5076.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7084, over 5771.88 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 19:44:11,410 INFO [trainer.py:765] (3/8) Epoch 24, batch 900, train_loss[loss=2.839, NarTop10Accuracy=0.7658, over 6267.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7074, over 5813.09 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 19:44:47,490 INFO [trainer.py:765] (3/8) Epoch 24, batch 1000, train_loss[loss=3.278, NarTop10Accuracy=0.6669, over 6282.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7048, over 5913.18 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 19:45:27,108 INFO [trainer.py:765] (3/8) Epoch 24, batch 1100, train_loss[loss=3.406, NarTop10Accuracy=0.6422, over 6879.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7029, over 5933.70 frames. ], batch size: 17, lr: 3.57e-03 +2024-08-06 19:45:58,438 INFO [trainer.py:765] (3/8) Epoch 24, batch 1200, train_loss[loss=3.09, NarTop10Accuracy=0.711, over 7494.00 frames. ], tot_loss[loss=3.106, NarTop10Accuracy=0.7044, over 5934.83 frames. ], batch size: 31, lr: 3.57e-03 +2024-08-06 19:46:30,295 INFO [trainer.py:765] (3/8) Epoch 24, batch 1300, train_loss[loss=3.268, NarTop10Accuracy=0.6597, over 5076.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7064, over 5998.41 frames. ], batch size: 6, lr: 3.56e-03 +2024-08-06 19:47:07,860 INFO [trainer.py:765] (3/8) Epoch 24, batch 1400, train_loss[loss=3.388, NarTop10Accuracy=0.6478, over 6204.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7033, over 6017.82 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 19:47:40,957 INFO [trainer.py:765] (3/8) Epoch 24, batch 1500, train_loss[loss=3.439, NarTop10Accuracy=0.6346, over 6222.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7007, over 5973.91 frames. ], batch size: 51, lr: 3.56e-03 +2024-08-06 19:48:08,676 INFO [trainer.py:765] (3/8) Epoch 24, batch 1600, train_loss[loss=3.366, NarTop10Accuracy=0.6523, over 7167.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7001, over 5943.88 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:48:35,267 INFO [trainer.py:765] (3/8) Epoch 24, batch 1700, train_loss[loss=2.851, NarTop10Accuracy=0.7582, over 6543.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7001, over 5920.33 frames. ], batch size: 14, lr: 3.55e-03 +2024-08-06 19:49:01,639 INFO [trainer.py:765] (3/8) Epoch 24, batch 1800, train_loss[loss=2.839, NarTop10Accuracy=0.764, over 7221.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6991, over 5984.30 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:49:28,042 INFO [trainer.py:765] (3/8) Epoch 24, batch 1900, train_loss[loss=3.497, NarTop10Accuracy=0.6252, over 6261.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.6978, over 6018.33 frames. ], batch size: 53, lr: 3.55e-03 +2024-08-06 19:49:53,534 INFO [trainer.py:765] (3/8) Epoch 24, batch 2000, train_loss[loss=3.553, NarTop10Accuracy=0.6149, over 6516.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7036, over 5991.32 frames. ], batch size: 52, lr: 3.54e-03 +2024-08-06 19:50:18,820 INFO [trainer.py:765] (3/8) Epoch 24, batch 2100, train_loss[loss=2.815, NarTop10Accuracy=0.7705, over 4950.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7036, over 5971.84 frames. ], batch size: 5, lr: 3.54e-03 +2024-08-06 19:50:43,942 INFO [trainer.py:765] (3/8) Epoch 24, batch 2200, train_loss[loss=3.49, NarTop10Accuracy=0.6277, over 7353.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.703, over 5999.53 frames. ], batch size: 31, lr: 3.54e-03 +2024-08-06 19:51:09,025 INFO [trainer.py:765] (3/8) Epoch 24, batch 2300, train_loss[loss=2.843, NarTop10Accuracy=0.7626, over 5745.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7037, over 6015.71 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 19:51:33,349 INFO [trainer.py:765] (3/8) Epoch 24, batch 2400, train_loss[loss=2.973, NarTop10Accuracy=0.7168, over 5286.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7049, over 5782.64 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 19:51:56,783 INFO [trainer.py:765] (3/8) Epoch 24, batch 2500, train_loss[loss=2.99, NarTop10Accuracy=0.7333, over 5709.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7093, over 5493.99 frames. ], batch size: 8, lr: 3.53e-03 +2024-08-06 19:52:16,695 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 19:53:22,199 INFO [trainer.py:765] (3/8) Epoch 25, batch 100, train_loss[loss=3.444, NarTop10Accuracy=0.6337, over 7488.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7084, over 2362.86 frames. ], batch size: 32, lr: 3.45e-03 +2024-08-06 19:53:47,262 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 19:53:55,329 INFO [trainer.py:811] (3/8) Epoch 25, validation: loss=2.96, NarTop10Accuracy=0.7332, over 1905321.00 frames. +2024-08-06 19:53:55,329 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 19:53:55,916 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.155e+02 2.306e+02 2.475e+02 6.485e+02, threshold=4.611e+02, percent-clipped=0.1 +2024-08-06 19:54:01,176 INFO [trainer.py:765] (3/8) Epoch 25, batch 200, train_loss[loss=2.908, NarTop10Accuracy=0.7587, over 6735.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.709, over 3850.93 frames. ], batch size: 17, lr: 3.45e-03 +2024-08-06 19:54:35,646 INFO [trainer.py:765] (3/8) Epoch 25, batch 300, train_loss[loss=3.307, NarTop10Accuracy=0.6681, over 6927.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.711, over 4660.67 frames. ], batch size: 22, lr: 3.45e-03 +2024-08-06 19:55:12,958 INFO [trainer.py:765] (3/8) Epoch 25, batch 400, train_loss[loss=2.988, NarTop10Accuracy=0.726, over 5031.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7088, over 5125.77 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 19:55:43,737 INFO [trainer.py:765] (3/8) Epoch 25, batch 500, train_loss[loss=2.8, NarTop10Accuracy=0.7687, over 6021.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.711, over 5407.78 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 19:56:14,814 INFO [trainer.py:765] (3/8) Epoch 25, batch 600, train_loss[loss=2.84, NarTop10Accuracy=0.7617, over 5784.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7101, over 5659.35 frames. ], batch size: 9, lr: 3.44e-03 +2024-08-06 19:56:55,496 INFO [trainer.py:765] (3/8) Epoch 25, batch 700, train_loss[loss=2.894, NarTop10Accuracy=0.7474, over 5130.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7104, over 5712.39 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 19:57:30,135 INFO [trainer.py:765] (3/8) Epoch 25, batch 800, train_loss[loss=3.006, NarTop10Accuracy=0.7246, over 4908.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7088, over 5767.69 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 19:58:00,678 INFO [trainer.py:765] (3/8) Epoch 25, batch 900, train_loss[loss=3.181, NarTop10Accuracy=0.6916, over 6297.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7097, over 5790.31 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 19:58:37,638 INFO [trainer.py:765] (3/8) Epoch 25, batch 1000, train_loss[loss=2.803, NarTop10Accuracy=0.7655, over 6684.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7069, over 5904.55 frames. ], batch size: 14, lr: 3.43e-03 +2024-08-06 19:59:14,854 INFO [trainer.py:765] (3/8) Epoch 25, batch 1100, train_loss[loss=3.347, NarTop10Accuracy=0.6596, over 6771.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7065, over 5942.58 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 19:59:49,039 INFO [trainer.py:765] (3/8) Epoch 25, batch 1200, train_loss[loss=3.278, NarTop10Accuracy=0.6671, over 7392.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.7056, over 5929.57 frames. ], batch size: 31, lr: 3.42e-03 +2024-08-06 20:00:25,598 INFO [trainer.py:765] (3/8) Epoch 25, batch 1300, train_loss[loss=2.879, NarTop10Accuracy=0.7579, over 5109.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7075, over 5985.72 frames. ], batch size: 6, lr: 3.42e-03 +2024-08-06 20:01:02,015 INFO [trainer.py:765] (3/8) Epoch 25, batch 1400, train_loss[loss=3.013, NarTop10Accuracy=0.7294, over 6045.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7099, over 5994.13 frames. ], batch size: 11, lr: 3.42e-03 +2024-08-06 20:01:32,823 INFO [trainer.py:765] (3/8) Epoch 25, batch 1500, train_loss[loss=3.268, NarTop10Accuracy=0.6786, over 5928.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7091, over 5940.65 frames. ], batch size: 51, lr: 3.41e-03 +2024-08-06 20:02:00,624 INFO [trainer.py:765] (3/8) Epoch 25, batch 1600, train_loss[loss=2.951, NarTop10Accuracy=0.7402, over 7284.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.71, over 5936.06 frames. ], batch size: 22, lr: 3.41e-03 +2024-08-06 20:02:27,359 INFO [trainer.py:765] (3/8) Epoch 25, batch 1700, train_loss[loss=2.924, NarTop10Accuracy=0.7479, over 6180.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7106, over 5919.67 frames. ], batch size: 13, lr: 3.41e-03 +2024-08-06 20:02:53,853 INFO [trainer.py:765] (3/8) Epoch 25, batch 1800, train_loss[loss=3.3, NarTop10Accuracy=0.6572, over 7092.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.708, over 5986.38 frames. ], batch size: 22, lr: 3.40e-03 +2024-08-06 20:03:20,341 INFO [trainer.py:765] (3/8) Epoch 25, batch 1900, train_loss[loss=3.15, NarTop10Accuracy=0.6937, over 6213.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7044, over 6035.41 frames. ], batch size: 50, lr: 3.40e-03 +2024-08-06 20:03:45,933 INFO [trainer.py:765] (3/8) Epoch 25, batch 2000, train_loss[loss=3.555, NarTop10Accuracy=0.6173, over 5937.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7027, over 6003.03 frames. ], batch size: 50, lr: 3.40e-03 +2024-08-06 20:04:11,245 INFO [trainer.py:765] (3/8) Epoch 25, batch 2100, train_loss[loss=2.872, NarTop10Accuracy=0.7477, over 4011.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7043, over 5987.06 frames. ], batch size: 4, lr: 3.40e-03 +2024-08-06 20:04:31,409 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 20:04:39,344 INFO [trainer.py:811] (3/8) Epoch 25, validation: loss=2.999, NarTop10Accuracy=0.7251, over 1905321.00 frames. +2024-08-06 20:04:39,344 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 20:04:39,840 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.185e+02 2.339e+02 2.507e+02 3.640e+02, threshold=4.678e+02, percent-clipped=0.0 +2024-08-06 20:04:44,513 INFO [trainer.py:765] (3/8) Epoch 25, batch 2200, train_loss[loss=3.313, NarTop10Accuracy=0.6623, over 7128.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7028, over 6024.53 frames. ], batch size: 31, lr: 3.39e-03 +2024-08-06 20:05:09,645 INFO [trainer.py:765] (3/8) Epoch 25, batch 2300, train_loss[loss=3.078, NarTop10Accuracy=0.7125, over 5583.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7025, over 6034.37 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 20:05:34,141 INFO [trainer.py:765] (3/8) Epoch 25, batch 2400, train_loss[loss=2.81, NarTop10Accuracy=0.7701, over 5055.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7051, over 5764.50 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:05:57,846 INFO [trainer.py:765] (3/8) Epoch 25, batch 2500, train_loss[loss=2.819, NarTop10Accuracy=0.7648, over 5037.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7106, over 5477.67 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:06:17,585 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 20:07:19,305 INFO [trainer.py:765] (3/8) Epoch 26, batch 100, train_loss[loss=3.09, NarTop10Accuracy=0.711, over 7104.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7065, over 2361.05 frames. ], batch size: 31, lr: 3.32e-03 +2024-08-06 20:07:52,382 INFO [trainer.py:765] (3/8) Epoch 26, batch 200, train_loss[loss=2.909, NarTop10Accuracy=0.7489, over 6807.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7058, over 3849.13 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 20:08:24,734 INFO [trainer.py:765] (3/8) Epoch 26, batch 300, train_loss[loss=2.975, NarTop10Accuracy=0.729, over 6882.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.707, over 4657.32 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 20:08:58,185 INFO [trainer.py:765] (3/8) Epoch 26, batch 400, train_loss[loss=2.979, NarTop10Accuracy=0.7288, over 5607.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7073, over 5128.73 frames. ], batch size: 8, lr: 3.31e-03 +2024-08-06 20:09:33,148 INFO [trainer.py:765] (3/8) Epoch 26, batch 500, train_loss[loss=2.88, NarTop10Accuracy=0.7535, over 6075.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.706, over 5413.74 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 20:10:03,890 INFO [trainer.py:765] (3/8) Epoch 26, batch 600, train_loss[loss=2.842, NarTop10Accuracy=0.7612, over 5643.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7116, over 5669.91 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 20:10:39,873 INFO [trainer.py:765] (3/8) Epoch 26, batch 700, train_loss[loss=3.394, NarTop10Accuracy=0.643, over 5022.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7075, over 5731.18 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 20:11:19,061 INFO [trainer.py:765] (3/8) Epoch 26, batch 800, train_loss[loss=3.06, NarTop10Accuracy=0.7179, over 4350.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7082, over 5790.35 frames. ], batch size: 5, lr: 3.30e-03 +2024-08-06 20:11:49,316 INFO [trainer.py:765] (3/8) Epoch 26, batch 900, train_loss[loss=2.795, NarTop10Accuracy=0.7689, over 6675.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7087, over 5795.58 frames. ], batch size: 14, lr: 3.29e-03 +2024-08-06 20:12:25,973 INFO [trainer.py:765] (3/8) Epoch 26, batch 1000, train_loss[loss=2.845, NarTop10Accuracy=0.758, over 6684.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.707, over 5882.85 frames. ], batch size: 14, lr: 3.29e-03 +2024-08-06 20:13:06,377 INFO [trainer.py:765] (3/8) Epoch 26, batch 1100, train_loss[loss=3.303, NarTop10Accuracy=0.657, over 6870.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7043, over 5915.46 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 20:13:37,536 INFO [trainer.py:765] (3/8) Epoch 26, batch 1200, train_loss[loss=3.454, NarTop10Accuracy=0.6319, over 7389.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7069, over 5918.29 frames. ], batch size: 32, lr: 3.29e-03 +2024-08-06 20:14:13,696 INFO [trainer.py:765] (3/8) Epoch 26, batch 1300, train_loss[loss=2.807, NarTop10Accuracy=0.7707, over 4230.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7075, over 5976.44 frames. ], batch size: 5, lr: 3.28e-03 +2024-08-06 20:14:50,539 INFO [trainer.py:765] (3/8) Epoch 26, batch 1400, train_loss[loss=2.875, NarTop10Accuracy=0.7523, over 6174.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7068, over 5993.23 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 20:15:21,155 INFO [trainer.py:765] (3/8) Epoch 26, batch 1500, train_loss[loss=3.112, NarTop10Accuracy=0.7076, over 6630.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7069, over 5933.78 frames. ], batch size: 50, lr: 3.28e-03 +2024-08-06 20:15:48,980 INFO [trainer.py:765] (3/8) Epoch 26, batch 1600, train_loss[loss=3.081, NarTop10Accuracy=0.7068, over 7056.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.709, over 5923.36 frames. ], batch size: 23, lr: 3.28e-03 +2024-08-06 20:15:50,003 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 20:15:58,239 INFO [trainer.py:811] (3/8) Epoch 26, validation: loss=2.899, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 20:15:58,239 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 20:15:58,779 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.166e+02 2.322e+02 2.511e+02 3.952e+02, threshold=4.644e+02, percent-clipped=0.0 +2024-08-06 20:16:23,951 INFO [trainer.py:765] (3/8) Epoch 26, batch 1700, train_loss[loss=3.141, NarTop10Accuracy=0.6973, over 6117.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7121, over 5929.17 frames. ], batch size: 13, lr: 3.28e-03 +2024-08-06 20:16:50,426 INFO [trainer.py:765] (3/8) Epoch 26, batch 1800, train_loss[loss=2.887, NarTop10Accuracy=0.7556, over 7161.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7105, over 5966.11 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 20:17:16,839 INFO [trainer.py:765] (3/8) Epoch 26, batch 1900, train_loss[loss=2.982, NarTop10Accuracy=0.7379, over 6441.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7085, over 6009.18 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 20:17:42,379 INFO [trainer.py:765] (3/8) Epoch 26, batch 2000, train_loss[loss=3.598, NarTop10Accuracy=0.5979, over 6126.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.709, over 5979.54 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 20:18:07,562 INFO [trainer.py:765] (3/8) Epoch 26, batch 2100, train_loss[loss=3.032, NarTop10Accuracy=0.7097, over 4917.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.707, over 5972.84 frames. ], batch size: 5, lr: 3.27e-03 +2024-08-06 20:18:32,776 INFO [trainer.py:765] (3/8) Epoch 26, batch 2200, train_loss[loss=2.858, NarTop10Accuracy=0.7577, over 7236.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7075, over 6013.49 frames. ], batch size: 31, lr: 3.26e-03 +2024-08-06 20:18:57,897 INFO [trainer.py:765] (3/8) Epoch 26, batch 2300, train_loss[loss=3.207, NarTop10Accuracy=0.6822, over 5640.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7054, over 6035.17 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 20:19:22,205 INFO [trainer.py:765] (3/8) Epoch 26, batch 2400, train_loss[loss=2.788, NarTop10Accuracy=0.7739, over 5052.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7098, over 5782.27 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:19:45,651 INFO [trainer.py:765] (3/8) Epoch 26, batch 2500, train_loss[loss=2.746, NarTop10Accuracy=0.771, over 5178.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7157, over 5468.01 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:20:06,170 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 20:21:04,874 INFO [trainer.py:765] (3/8) Epoch 27, batch 100, train_loss[loss=3.318, NarTop10Accuracy=0.6616, over 7407.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7099, over 2369.56 frames. ], batch size: 32, lr: 3.19e-03 +2024-08-06 20:21:39,783 INFO [trainer.py:765] (3/8) Epoch 27, batch 200, train_loss[loss=2.771, NarTop10Accuracy=0.7748, over 6786.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7093, over 3833.75 frames. ], batch size: 17, lr: 3.19e-03 +2024-08-06 20:22:13,050 INFO [trainer.py:765] (3/8) Epoch 27, batch 300, train_loss[loss=2.831, NarTop10Accuracy=0.7609, over 7023.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7081, over 4627.06 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 20:22:43,557 INFO [trainer.py:765] (3/8) Epoch 27, batch 400, train_loss[loss=2.933, NarTop10Accuracy=0.7326, over 5214.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7117, over 5078.45 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 20:23:18,084 INFO [trainer.py:765] (3/8) Epoch 27, batch 500, train_loss[loss=2.813, NarTop10Accuracy=0.7654, over 6135.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7153, over 5350.59 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 20:23:51,435 INFO [trainer.py:765] (3/8) Epoch 27, batch 600, train_loss[loss=3.29, NarTop10Accuracy=0.6657, over 5676.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7144, over 5625.25 frames. ], batch size: 9, lr: 3.18e-03 +2024-08-06 20:24:24,976 INFO [trainer.py:765] (3/8) Epoch 27, batch 700, train_loss[loss=2.661, NarTop10Accuracy=0.7856, over 4329.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.715, over 5704.31 frames. ], batch size: 5, lr: 3.18e-03 +2024-08-06 20:25:03,408 INFO [trainer.py:765] (3/8) Epoch 27, batch 800, train_loss[loss=3.222, NarTop10Accuracy=0.6801, over 5085.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7116, over 5780.98 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 20:25:34,176 INFO [trainer.py:765] (3/8) Epoch 27, batch 900, train_loss[loss=3.329, NarTop10Accuracy=0.6624, over 6510.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7106, over 5794.73 frames. ], batch size: 14, lr: 3.17e-03 +2024-08-06 20:26:10,097 INFO [trainer.py:765] (3/8) Epoch 27, batch 1000, train_loss[loss=2.757, NarTop10Accuracy=0.783, over 6138.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7108, over 5895.55 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 20:26:18,315 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 20:26:26,346 INFO [trainer.py:811] (3/8) Epoch 27, validation: loss=2.95, NarTop10Accuracy=0.735, over 1905321.00 frames. +2024-08-06 20:26:26,346 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 20:26:26,877 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.166e+02 2.331e+02 2.512e+02 4.284e+02, threshold=4.663e+02, percent-clipped=0.0 +2024-08-06 20:26:50,899 INFO [trainer.py:765] (3/8) Epoch 27, batch 1100, train_loss[loss=3.068, NarTop10Accuracy=0.7093, over 6699.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.71, over 5919.27 frames. ], batch size: 17, lr: 3.17e-03 +2024-08-06 20:27:24,544 INFO [trainer.py:765] (3/8) Epoch 27, batch 1200, train_loss[loss=2.99, NarTop10Accuracy=0.7313, over 7188.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7105, over 5922.16 frames. ], batch size: 31, lr: 3.16e-03 +2024-08-06 20:27:58,568 INFO [trainer.py:765] (3/8) Epoch 27, batch 1300, train_loss[loss=2.969, NarTop10Accuracy=0.7277, over 5166.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.712, over 5993.11 frames. ], batch size: 6, lr: 3.16e-03 +2024-08-06 20:28:36,745 INFO [trainer.py:765] (3/8) Epoch 27, batch 1400, train_loss[loss=3.557, NarTop10Accuracy=0.6177, over 5985.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7083, over 6011.22 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 20:29:04,632 INFO [trainer.py:765] (3/8) Epoch 27, batch 1500, train_loss[loss=3.008, NarTop10Accuracy=0.726, over 6456.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7104, over 5958.44 frames. ], batch size: 50, lr: 3.16e-03 +2024-08-06 20:29:32,362 INFO [trainer.py:765] (3/8) Epoch 27, batch 1600, train_loss[loss=2.877, NarTop10Accuracy=0.7582, over 7092.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.709, over 5931.74 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:29:58,977 INFO [trainer.py:765] (3/8) Epoch 27, batch 1700, train_loss[loss=3.058, NarTop10Accuracy=0.7006, over 6774.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7095, over 5923.60 frames. ], batch size: 14, lr: 3.15e-03 +2024-08-06 20:30:25,463 INFO [trainer.py:765] (3/8) Epoch 27, batch 1800, train_loss[loss=3.481, NarTop10Accuracy=0.6287, over 7089.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7086, over 5982.92 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:30:51,845 INFO [trainer.py:765] (3/8) Epoch 27, batch 1900, train_loss[loss=3.142, NarTop10Accuracy=0.6911, over 6414.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7087, over 6013.23 frames. ], batch size: 51, lr: 3.15e-03 +2024-08-06 20:31:17,390 INFO [trainer.py:765] (3/8) Epoch 27, batch 2000, train_loss[loss=3.061, NarTop10Accuracy=0.7176, over 5754.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7109, over 6004.62 frames. ], batch size: 50, lr: 3.15e-03 +2024-08-06 20:31:42,659 INFO [trainer.py:765] (3/8) Epoch 27, batch 2100, train_loss[loss=2.724, NarTop10Accuracy=0.7822, over 4830.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7097, over 5971.33 frames. ], batch size: 5, lr: 3.14e-03 +2024-08-06 20:32:07,804 INFO [trainer.py:765] (3/8) Epoch 27, batch 2200, train_loss[loss=3.499, NarTop10Accuracy=0.6271, over 7215.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.709, over 6013.39 frames. ], batch size: 31, lr: 3.14e-03 +2024-08-06 20:32:32,941 INFO [trainer.py:765] (3/8) Epoch 27, batch 2300, train_loss[loss=2.86, NarTop10Accuracy=0.7492, over 5658.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.708, over 6014.83 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 20:32:57,246 INFO [trainer.py:765] (3/8) Epoch 27, batch 2400, train_loss[loss=2.767, NarTop10Accuracy=0.783, over 5151.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7076, over 5781.36 frames. ], batch size: 7, lr: 3.14e-03 +2024-08-06 20:33:20,615 INFO [trainer.py:765] (3/8) Epoch 27, batch 2500, train_loss[loss=3.491, NarTop10Accuracy=0.6209, over 5286.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.714, over 5485.24 frames. ], batch size: 7, lr: 3.13e-03 +2024-08-06 20:33:40,704 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 20:34:35,828 INFO [trainer.py:765] (3/8) Epoch 28, batch 100, train_loss[loss=2.903, NarTop10Accuracy=0.7488, over 7158.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7107, over 2364.74 frames. ], batch size: 32, lr: 3.07e-03 +2024-08-06 20:35:07,393 INFO [trainer.py:765] (3/8) Epoch 28, batch 200, train_loss[loss=2.826, NarTop10Accuracy=0.7545, over 6918.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7112, over 3864.80 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 20:35:45,422 INFO [trainer.py:765] (3/8) Epoch 28, batch 300, train_loss[loss=3.082, NarTop10Accuracy=0.7067, over 6966.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7109, over 4662.82 frames. ], batch size: 22, lr: 3.07e-03 +2024-08-06 20:36:15,864 INFO [trainer.py:765] (3/8) Epoch 28, batch 400, train_loss[loss=3.31, NarTop10Accuracy=0.669, over 5040.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7083, over 5099.96 frames. ], batch size: 7, lr: 3.07e-03 +2024-08-06 20:36:32,406 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 20:36:40,530 INFO [trainer.py:811] (3/8) Epoch 28, validation: loss=2.963, NarTop10Accuracy=0.7327, over 1905321.00 frames. +2024-08-06 20:36:40,531 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 20:36:41,102 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.179e+02 2.348e+02 2.536e+02 3.573e+02, threshold=4.696e+02, percent-clipped=0.0 +2024-08-06 20:36:56,663 INFO [trainer.py:765] (3/8) Epoch 28, batch 500, train_loss[loss=3.204, NarTop10Accuracy=0.6858, over 6138.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7105, over 5377.03 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 20:37:29,462 INFO [trainer.py:765] (3/8) Epoch 28, batch 600, train_loss[loss=3.057, NarTop10Accuracy=0.7114, over 5784.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7092, over 5645.78 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 20:38:08,891 INFO [trainer.py:765] (3/8) Epoch 28, batch 700, train_loss[loss=3.124, NarTop10Accuracy=0.7081, over 5043.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7079, over 5707.51 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 20:38:42,488 INFO [trainer.py:765] (3/8) Epoch 28, batch 800, train_loss[loss=2.811, NarTop10Accuracy=0.765, over 4992.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7147, over 5767.84 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 20:39:15,506 INFO [trainer.py:765] (3/8) Epoch 28, batch 900, train_loss[loss=3.336, NarTop10Accuracy=0.6574, over 6714.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7141, over 5791.61 frames. ], batch size: 14, lr: 3.06e-03 +2024-08-06 20:39:53,239 INFO [trainer.py:765] (3/8) Epoch 28, batch 1000, train_loss[loss=3.232, NarTop10Accuracy=0.6759, over 6585.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7129, over 5909.31 frames. ], batch size: 14, lr: 3.05e-03 +2024-08-06 20:40:25,867 INFO [trainer.py:765] (3/8) Epoch 28, batch 1100, train_loss[loss=2.844, NarTop10Accuracy=0.76, over 6741.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7099, over 5949.42 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 20:40:59,418 INFO [trainer.py:765] (3/8) Epoch 28, batch 1200, train_loss[loss=3.361, NarTop10Accuracy=0.6568, over 7101.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7092, over 5947.15 frames. ], batch size: 31, lr: 3.05e-03 +2024-08-06 20:41:38,680 INFO [trainer.py:765] (3/8) Epoch 28, batch 1300, train_loss[loss=3.322, NarTop10Accuracy=0.66, over 5085.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7096, over 6003.18 frames. ], batch size: 6, lr: 3.05e-03 +2024-08-06 20:42:13,047 INFO [trainer.py:765] (3/8) Epoch 28, batch 1400, train_loss[loss=2.934, NarTop10Accuracy=0.7471, over 6048.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7074, over 6013.45 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 20:42:43,171 INFO [trainer.py:765] (3/8) Epoch 28, batch 1500, train_loss[loss=3.356, NarTop10Accuracy=0.6472, over 5946.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7102, over 5955.23 frames. ], batch size: 51, lr: 3.04e-03 +2024-08-06 20:43:11,080 INFO [trainer.py:765] (3/8) Epoch 28, batch 1600, train_loss[loss=2.965, NarTop10Accuracy=0.7365, over 7086.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7091, over 5929.50 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 20:43:37,785 INFO [trainer.py:765] (3/8) Epoch 28, batch 1700, train_loss[loss=2.982, NarTop10Accuracy=0.7231, over 6369.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.709, over 5909.51 frames. ], batch size: 13, lr: 3.04e-03 +2024-08-06 20:44:04,325 INFO [trainer.py:765] (3/8) Epoch 28, batch 1800, train_loss[loss=2.987, NarTop10Accuracy=0.7325, over 7281.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7096, over 5970.77 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 20:44:30,757 INFO [trainer.py:765] (3/8) Epoch 28, batch 1900, train_loss[loss=3.137, NarTop10Accuracy=0.704, over 6375.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7112, over 6017.99 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 20:44:56,328 INFO [trainer.py:765] (3/8) Epoch 28, batch 2000, train_loss[loss=3.024, NarTop10Accuracy=0.7219, over 6072.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7142, over 6001.41 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 20:45:21,650 INFO [trainer.py:765] (3/8) Epoch 28, batch 2100, train_loss[loss=2.88, NarTop10Accuracy=0.7402, over 3921.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7151, over 5966.17 frames. ], batch size: 4, lr: 3.03e-03 +2024-08-06 20:45:47,076 INFO [trainer.py:765] (3/8) Epoch 28, batch 2200, train_loss[loss=2.981, NarTop10Accuracy=0.7332, over 7350.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7123, over 6027.50 frames. ], batch size: 32, lr: 3.03e-03 +2024-08-06 20:46:12,307 INFO [trainer.py:765] (3/8) Epoch 28, batch 2300, train_loss[loss=3.429, NarTop10Accuracy=0.6401, over 5850.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7076, over 6021.47 frames. ], batch size: 9, lr: 3.03e-03 +2024-08-06 20:46:36,807 INFO [trainer.py:765] (3/8) Epoch 28, batch 2400, train_loss[loss=3.025, NarTop10Accuracy=0.7244, over 5244.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7065, over 5790.52 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:46:48,594 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 20:46:56,604 INFO [trainer.py:811] (3/8) Epoch 28, validation: loss=2.931, NarTop10Accuracy=0.7396, over 1905321.00 frames. +2024-08-06 20:46:56,605 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 20:46:57,081 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.201e+02 2.381e+02 2.551e+02 4.872e+02, threshold=4.762e+02, percent-clipped=0.1 +2024-08-06 20:47:08,292 INFO [trainer.py:765] (3/8) Epoch 28, batch 2500, train_loss[loss=3.013, NarTop10Accuracy=0.7302, over 5163.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7108, over 5492.31 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:47:28,112 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 20:48:21,052 INFO [trainer.py:765] (3/8) Epoch 29, batch 100, train_loss[loss=2.937, NarTop10Accuracy=0.7386, over 6942.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7107, over 2367.96 frames. ], batch size: 31, lr: 2.96e-03 +2024-08-06 20:48:53,405 INFO [trainer.py:765] (3/8) Epoch 29, batch 200, train_loss[loss=3.38, NarTop10Accuracy=0.6551, over 6825.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.717, over 3850.00 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 20:49:27,476 INFO [trainer.py:765] (3/8) Epoch 29, batch 300, train_loss[loss=3.161, NarTop10Accuracy=0.6886, over 7155.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.718, over 4636.33 frames. ], batch size: 22, lr: 2.96e-03 +2024-08-06 20:49:56,052 INFO [trainer.py:765] (3/8) Epoch 29, batch 400, train_loss[loss=3.359, NarTop10Accuracy=0.6518, over 5106.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7142, over 5090.86 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 20:50:29,435 INFO [trainer.py:765] (3/8) Epoch 29, batch 500, train_loss[loss=3.037, NarTop10Accuracy=0.7076, over 6126.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7158, over 5384.93 frames. ], batch size: 11, lr: 2.96e-03 +2024-08-06 20:51:00,024 INFO [trainer.py:765] (3/8) Epoch 29, batch 600, train_loss[loss=2.722, NarTop10Accuracy=0.7773, over 5829.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7157, over 5654.54 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 20:51:35,677 INFO [trainer.py:765] (3/8) Epoch 29, batch 700, train_loss[loss=2.973, NarTop10Accuracy=0.7256, over 5151.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7107, over 5730.36 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 20:52:10,724 INFO [trainer.py:765] (3/8) Epoch 29, batch 800, train_loss[loss=2.636, NarTop10Accuracy=0.8067, over 4314.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7117, over 5760.30 frames. ], batch size: 5, lr: 2.95e-03 +2024-08-06 20:52:40,743 INFO [trainer.py:765] (3/8) Epoch 29, batch 900, train_loss[loss=2.714, NarTop10Accuracy=0.7883, over 6324.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7107, over 5802.04 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 20:53:16,861 INFO [trainer.py:765] (3/8) Epoch 29, batch 1000, train_loss[loss=3.445, NarTop10Accuracy=0.6348, over 6594.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.709, over 5894.71 frames. ], batch size: 14, lr: 2.95e-03 +2024-08-06 20:53:52,902 INFO [trainer.py:765] (3/8) Epoch 29, batch 1100, train_loss[loss=3.126, NarTop10Accuracy=0.7055, over 6720.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7085, over 5927.13 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 20:54:23,689 INFO [trainer.py:765] (3/8) Epoch 29, batch 1200, train_loss[loss=3.26, NarTop10Accuracy=0.6707, over 6909.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7105, over 5933.52 frames. ], batch size: 31, lr: 2.94e-03 +2024-08-06 20:55:01,427 INFO [trainer.py:765] (3/8) Epoch 29, batch 1300, train_loss[loss=2.836, NarTop10Accuracy=0.7545, over 4257.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7108, over 5986.95 frames. ], batch size: 5, lr: 2.94e-03 +2024-08-06 20:55:32,556 INFO [trainer.py:765] (3/8) Epoch 29, batch 1400, train_loss[loss=3.373, NarTop10Accuracy=0.6548, over 6108.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7105, over 6004.12 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 20:56:04,358 INFO [trainer.py:765] (3/8) Epoch 29, batch 1500, train_loss[loss=3.406, NarTop10Accuracy=0.6448, over 6102.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7105, over 5951.88 frames. ], batch size: 51, lr: 2.94e-03 +2024-08-06 20:56:32,040 INFO [trainer.py:765] (3/8) Epoch 29, batch 1600, train_loss[loss=3.383, NarTop10Accuracy=0.6544, over 7092.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.709, over 5950.56 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:56:58,638 INFO [trainer.py:765] (3/8) Epoch 29, batch 1700, train_loss[loss=2.732, NarTop10Accuracy=0.7787, over 6567.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7104, over 5924.40 frames. ], batch size: 14, lr: 2.93e-03 +2024-08-06 20:57:24,999 INFO [trainer.py:765] (3/8) Epoch 29, batch 1800, train_loss[loss=3.039, NarTop10Accuracy=0.714, over 6972.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7119, over 5984.43 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:57:44,620 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 20:57:52,863 INFO [trainer.py:811] (3/8) Epoch 29, validation: loss=2.897, NarTop10Accuracy=0.7458, over 1905321.00 frames. +2024-08-06 20:57:52,864 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 20:57:53,424 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.206e+02 2.380e+02 2.554e+02 4.464e+02, threshold=4.759e+02, percent-clipped=0.0 +2024-08-06 20:57:59,757 INFO [trainer.py:765] (3/8) Epoch 29, batch 1900, train_loss[loss=2.986, NarTop10Accuracy=0.7327, over 5478.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7078, over 6030.11 frames. ], batch size: 50, lr: 2.93e-03 +2024-08-06 20:58:25,309 INFO [trainer.py:765] (3/8) Epoch 29, batch 2000, train_loss[loss=3.491, NarTop10Accuracy=0.624, over 6447.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7091, over 6007.87 frames. ], batch size: 51, lr: 2.93e-03 +2024-08-06 20:58:50,630 INFO [trainer.py:765] (3/8) Epoch 29, batch 2100, train_loss[loss=2.913, NarTop10Accuracy=0.7424, over 3924.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7092, over 5969.67 frames. ], batch size: 4, lr: 2.92e-03 +2024-08-06 20:59:15,806 INFO [trainer.py:765] (3/8) Epoch 29, batch 2200, train_loss[loss=2.838, NarTop10Accuracy=0.7604, over 6948.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7103, over 6006.90 frames. ], batch size: 31, lr: 2.92e-03 +2024-08-06 20:59:40,911 INFO [trainer.py:765] (3/8) Epoch 29, batch 2300, train_loss[loss=2.898, NarTop10Accuracy=0.7462, over 5796.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7068, over 6032.80 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 21:00:05,156 INFO [trainer.py:765] (3/8) Epoch 29, batch 2400, train_loss[loss=2.701, NarTop10Accuracy=0.7864, over 5205.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7095, over 5768.41 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 21:00:28,742 INFO [trainer.py:765] (3/8) Epoch 29, batch 2500, train_loss[loss=3.513, NarTop10Accuracy=0.627, over 5112.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7147, over 5453.74 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 21:00:48,499 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 21:01:41,717 INFO [trainer.py:765] (3/8) Epoch 30, batch 100, train_loss[loss=2.855, NarTop10Accuracy=0.7498, over 7551.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7211, over 2360.64 frames. ], batch size: 32, lr: 2.86e-03 +2024-08-06 21:02:17,015 INFO [trainer.py:765] (3/8) Epoch 30, batch 200, train_loss[loss=2.791, NarTop10Accuracy=0.767, over 6798.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.723, over 3843.57 frames. ], batch size: 17, lr: 2.86e-03 +2024-08-06 21:02:51,343 INFO [trainer.py:765] (3/8) Epoch 30, batch 300, train_loss[loss=2.947, NarTop10Accuracy=0.7362, over 7137.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7246, over 4649.71 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 21:03:21,643 INFO [trainer.py:765] (3/8) Epoch 30, batch 400, train_loss[loss=2.703, NarTop10Accuracy=0.7809, over 5049.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7197, over 5090.84 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 21:03:58,546 INFO [trainer.py:765] (3/8) Epoch 30, batch 500, train_loss[loss=3.371, NarTop10Accuracy=0.6534, over 6093.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7172, over 5391.83 frames. ], batch size: 11, lr: 2.86e-03 +2024-08-06 21:04:31,658 INFO [trainer.py:765] (3/8) Epoch 30, batch 600, train_loss[loss=2.875, NarTop10Accuracy=0.7455, over 5679.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7162, over 5668.94 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 21:05:03,526 INFO [trainer.py:765] (3/8) Epoch 30, batch 700, train_loss[loss=2.919, NarTop10Accuracy=0.7404, over 5106.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7192, over 5748.87 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 21:05:44,132 INFO [trainer.py:765] (3/8) Epoch 30, batch 800, train_loss[loss=2.966, NarTop10Accuracy=0.7288, over 5103.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7195, over 5806.89 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 21:06:14,844 INFO [trainer.py:765] (3/8) Epoch 30, batch 900, train_loss[loss=2.881, NarTop10Accuracy=0.7532, over 6231.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7186, over 5790.92 frames. ], batch size: 13, lr: 2.85e-03 +2024-08-06 21:06:48,952 INFO [trainer.py:765] (3/8) Epoch 30, batch 1000, train_loss[loss=2.97, NarTop10Accuracy=0.7356, over 6666.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7126, over 5907.62 frames. ], batch size: 14, lr: 2.85e-03 +2024-08-06 21:07:25,937 INFO [trainer.py:765] (3/8) Epoch 30, batch 1100, train_loss[loss=3.389, NarTop10Accuracy=0.6387, over 6747.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7097, over 5926.48 frames. ], batch size: 17, lr: 2.84e-03 +2024-08-06 21:08:02,381 INFO [trainer.py:765] (3/8) Epoch 30, batch 1200, train_loss[loss=2.895, NarTop10Accuracy=0.7464, over 7401.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7121, over 5917.38 frames. ], batch size: 31, lr: 2.84e-03 +2024-08-06 21:08:35,371 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 21:08:43,457 INFO [trainer.py:811] (3/8) Epoch 30, validation: loss=2.93, NarTop10Accuracy=0.7391, over 1905321.00 frames. +2024-08-06 21:08:43,458 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 21:08:44,198 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.209e+02 2.377e+02 2.553e+02 3.956e+02, threshold=4.754e+02, percent-clipped=0.0 +2024-08-06 21:08:44,203 INFO [trainer.py:765] (3/8) Epoch 30, batch 1300, train_loss[loss=3.335, NarTop10Accuracy=0.6647, over 4299.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7131, over 5976.26 frames. ], batch size: 5, lr: 2.84e-03 +2024-08-06 21:09:22,398 INFO [trainer.py:765] (3/8) Epoch 30, batch 1400, train_loss[loss=2.857, NarTop10Accuracy=0.7454, over 6093.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7118, over 6002.92 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 21:09:52,372 INFO [trainer.py:765] (3/8) Epoch 30, batch 1500, train_loss[loss=3.04, NarTop10Accuracy=0.7219, over 6684.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7124, over 5941.78 frames. ], batch size: 50, lr: 2.84e-03 +2024-08-06 21:10:20,083 INFO [trainer.py:765] (3/8) Epoch 30, batch 1600, train_loss[loss=3.076, NarTop10Accuracy=0.7081, over 6951.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.712, over 5929.47 frames. ], batch size: 22, lr: 2.84e-03 +2024-08-06 21:10:46,678 INFO [trainer.py:765] (3/8) Epoch 30, batch 1700, train_loss[loss=3.032, NarTop10Accuracy=0.7122, over 6216.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7114, over 5922.01 frames. ], batch size: 13, lr: 2.83e-03 +2024-08-06 21:11:13,058 INFO [trainer.py:765] (3/8) Epoch 30, batch 1800, train_loss[loss=3.51, NarTop10Accuracy=0.6198, over 7095.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7106, over 5977.62 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 21:11:39,418 INFO [trainer.py:765] (3/8) Epoch 30, batch 1900, train_loss[loss=3.085, NarTop10Accuracy=0.7118, over 6552.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7091, over 6036.69 frames. ], batch size: 50, lr: 2.83e-03 +2024-08-06 21:12:04,826 INFO [trainer.py:765] (3/8) Epoch 30, batch 2000, train_loss[loss=3.361, NarTop10Accuracy=0.6489, over 5718.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7131, over 5982.96 frames. ], batch size: 50, lr: 2.83e-03 +2024-08-06 21:12:30,088 INFO [trainer.py:765] (3/8) Epoch 30, batch 2100, train_loss[loss=2.93, NarTop10Accuracy=0.7305, over 3891.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7115, over 5969.57 frames. ], batch size: 4, lr: 2.83e-03 +2024-08-06 21:12:55,225 INFO [trainer.py:765] (3/8) Epoch 30, batch 2200, train_loss[loss=2.979, NarTop10Accuracy=0.7309, over 7350.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.711, over 6005.77 frames. ], batch size: 31, lr: 2.82e-03 +2024-08-06 21:13:20,296 INFO [trainer.py:765] (3/8) Epoch 30, batch 2300, train_loss[loss=2.757, NarTop10Accuracy=0.7693, over 5754.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7071, over 6024.98 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 21:13:44,491 INFO [trainer.py:765] (3/8) Epoch 30, batch 2400, train_loss[loss=2.905, NarTop10Accuracy=0.7616, over 5157.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7148, over 5794.32 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:07,987 INFO [trainer.py:765] (3/8) Epoch 30, batch 2500, train_loss[loss=2.985, NarTop10Accuracy=0.7333, over 5283.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7164, over 5473.88 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:27,770 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 21:15:23,632 INFO [trainer.py:765] (3/8) Epoch 31, batch 100, train_loss[loss=3.417, NarTop10Accuracy=0.6378, over 7092.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.713, over 2361.72 frames. ], batch size: 31, lr: 2.77e-03 +2024-08-06 21:15:55,127 INFO [trainer.py:765] (3/8) Epoch 31, batch 200, train_loss[loss=2.93, NarTop10Accuracy=0.7455, over 6867.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7182, over 3861.68 frames. ], batch size: 17, lr: 2.77e-03 +2024-08-06 21:16:31,215 INFO [trainer.py:765] (3/8) Epoch 31, batch 300, train_loss[loss=3.013, NarTop10Accuracy=0.7321, over 7284.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7183, over 4684.30 frames. ], batch size: 22, lr: 2.77e-03 +2024-08-06 21:17:01,625 INFO [trainer.py:765] (3/8) Epoch 31, batch 400, train_loss[loss=3.39, NarTop10Accuracy=0.6506, over 5238.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7147, over 5131.93 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 21:17:35,724 INFO [trainer.py:765] (3/8) Epoch 31, batch 500, train_loss[loss=2.812, NarTop10Accuracy=0.7583, over 6171.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7167, over 5406.19 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 21:18:07,084 INFO [trainer.py:765] (3/8) Epoch 31, batch 600, train_loss[loss=2.643, NarTop10Accuracy=0.7997, over 5709.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7142, over 5658.07 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 21:18:44,609 INFO [trainer.py:765] (3/8) Epoch 31, batch 700, train_loss[loss=3.214, NarTop10Accuracy=0.6772, over 5064.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7133, over 5736.77 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 21:18:51,094 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 21:18:59,276 INFO [trainer.py:811] (3/8) Epoch 31, validation: loss=2.984, NarTop10Accuracy=0.7279, over 1905321.00 frames. +2024-08-06 21:18:59,276 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 21:18:59,986 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.222e+02 2.378e+02 2.557e+02 4.306e+02, threshold=4.755e+02, percent-clipped=0.0 +2024-08-06 21:19:24,245 INFO [trainer.py:765] (3/8) Epoch 31, batch 800, train_loss[loss=2.646, NarTop10Accuracy=0.7929, over 4932.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7152, over 5786.21 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 21:19:56,950 INFO [trainer.py:765] (3/8) Epoch 31, batch 900, train_loss[loss=3.415, NarTop10Accuracy=0.6453, over 6240.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7157, over 5796.23 frames. ], batch size: 13, lr: 2.76e-03 +2024-08-06 21:20:33,310 INFO [trainer.py:765] (3/8) Epoch 31, batch 1000, train_loss[loss=3.432, NarTop10Accuracy=0.6329, over 6141.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7158, over 5887.74 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 21:21:10,215 INFO [trainer.py:765] (3/8) Epoch 31, batch 1100, train_loss[loss=3.231, NarTop10Accuracy=0.682, over 6681.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7156, over 5926.49 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 21:21:41,118 INFO [trainer.py:765] (3/8) Epoch 31, batch 1200, train_loss[loss=2.932, NarTop10Accuracy=0.7406, over 7287.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7178, over 5905.92 frames. ], batch size: 31, lr: 2.75e-03 +2024-08-06 21:22:19,741 INFO [trainer.py:765] (3/8) Epoch 31, batch 1300, train_loss[loss=2.901, NarTop10Accuracy=0.7496, over 5052.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7124, over 5983.62 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 21:22:53,533 INFO [trainer.py:765] (3/8) Epoch 31, batch 1400, train_loss[loss=2.876, NarTop10Accuracy=0.7462, over 6159.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7107, over 6013.83 frames. ], batch size: 11, lr: 2.75e-03 +2024-08-06 21:23:21,269 INFO [trainer.py:765] (3/8) Epoch 31, batch 1500, train_loss[loss=3.344, NarTop10Accuracy=0.6599, over 6234.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7142, over 5951.40 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 21:23:49,004 INFO [trainer.py:765] (3/8) Epoch 31, batch 1600, train_loss[loss=3.369, NarTop10Accuracy=0.6478, over 7155.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7147, over 5931.60 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 21:24:15,511 INFO [trainer.py:765] (3/8) Epoch 31, batch 1700, train_loss[loss=3.433, NarTop10Accuracy=0.6337, over 6606.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.714, over 5910.24 frames. ], batch size: 14, lr: 2.74e-03 +2024-08-06 21:24:41,995 INFO [trainer.py:765] (3/8) Epoch 31, batch 1800, train_loss[loss=2.887, NarTop10Accuracy=0.7479, over 7080.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7172, over 5981.96 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 21:25:08,356 INFO [trainer.py:765] (3/8) Epoch 31, batch 1900, train_loss[loss=3.286, NarTop10Accuracy=0.6695, over 6060.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7127, over 6034.13 frames. ], batch size: 52, lr: 2.74e-03 +2024-08-06 21:25:33,772 INFO [trainer.py:765] (3/8) Epoch 31, batch 2000, train_loss[loss=3.018, NarTop10Accuracy=0.7265, over 5685.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7137, over 6008.82 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 21:25:59,106 INFO [trainer.py:765] (3/8) Epoch 31, batch 2100, train_loss[loss=2.677, NarTop10Accuracy=0.7788, over 4878.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7146, over 5981.22 frames. ], batch size: 5, lr: 2.73e-03 +2024-08-06 21:26:24,237 INFO [trainer.py:765] (3/8) Epoch 31, batch 2200, train_loss[loss=2.976, NarTop10Accuracy=0.7299, over 7113.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7169, over 6008.32 frames. ], batch size: 31, lr: 2.73e-03 +2024-08-06 21:26:49,321 INFO [trainer.py:765] (3/8) Epoch 31, batch 2300, train_loss[loss=2.762, NarTop10Accuracy=0.7719, over 5607.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7143, over 6013.79 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 21:27:13,607 INFO [trainer.py:765] (3/8) Epoch 31, batch 2400, train_loss[loss=2.85, NarTop10Accuracy=0.7605, over 5115.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7153, over 5776.50 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 21:27:37,027 INFO [trainer.py:765] (3/8) Epoch 31, batch 2500, train_loss[loss=2.836, NarTop10Accuracy=0.7554, over 5067.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7166, over 5484.81 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 21:27:57,049 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 21:28:49,393 INFO [trainer.py:765] (3/8) Epoch 32, batch 100, train_loss[loss=2.908, NarTop10Accuracy=0.7438, over 7341.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7132, over 2369.47 frames. ], batch size: 31, lr: 2.68e-03 +2024-08-06 21:29:08,161 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 21:29:16,392 INFO [trainer.py:811] (3/8) Epoch 32, validation: loss=2.919, NarTop10Accuracy=0.7409, over 1905321.00 frames. +2024-08-06 21:29:16,393 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 21:29:16,939 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.842e+02 2.253e+02 2.413e+02 2.600e+02 5.680e+02, threshold=4.826e+02, percent-clipped=0.1 +2024-08-06 21:29:32,272 INFO [trainer.py:765] (3/8) Epoch 32, batch 200, train_loss[loss=3.286, NarTop10Accuracy=0.6734, over 6699.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7126, over 3864.95 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 21:30:05,278 INFO [trainer.py:765] (3/8) Epoch 32, batch 300, train_loss[loss=3.034, NarTop10Accuracy=0.7196, over 7098.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7145, over 4653.46 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 21:30:34,103 INFO [trainer.py:765] (3/8) Epoch 32, batch 400, train_loss[loss=2.83, NarTop10Accuracy=0.7579, over 5103.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7113, over 5105.33 frames. ], batch size: 7, lr: 2.68e-03 +2024-08-06 21:31:13,530 INFO [trainer.py:765] (3/8) Epoch 32, batch 500, train_loss[loss=3.016, NarTop10Accuracy=0.7242, over 6150.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.714, over 5374.89 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 21:31:42,486 INFO [trainer.py:765] (3/8) Epoch 32, batch 600, train_loss[loss=3.204, NarTop10Accuracy=0.6781, over 5712.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7147, over 5637.35 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 21:32:17,028 INFO [trainer.py:765] (3/8) Epoch 32, batch 700, train_loss[loss=2.833, NarTop10Accuracy=0.7585, over 4203.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7167, over 5705.64 frames. ], batch size: 5, lr: 2.67e-03 +2024-08-06 21:33:00,646 INFO [trainer.py:765] (3/8) Epoch 32, batch 800, train_loss[loss=3.178, NarTop10Accuracy=0.6794, over 4917.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7165, over 5765.19 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 21:33:28,991 INFO [trainer.py:765] (3/8) Epoch 32, batch 900, train_loss[loss=2.851, NarTop10Accuracy=0.7526, over 6267.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7188, over 5785.78 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 21:34:04,049 INFO [trainer.py:765] (3/8) Epoch 32, batch 1000, train_loss[loss=3.221, NarTop10Accuracy=0.6812, over 6546.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7171, over 5888.33 frames. ], batch size: 14, lr: 2.67e-03 +2024-08-06 21:34:46,674 INFO [trainer.py:765] (3/8) Epoch 32, batch 1100, train_loss[loss=3.24, NarTop10Accuracy=0.6771, over 6771.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7156, over 5935.38 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 21:35:18,171 INFO [trainer.py:765] (3/8) Epoch 32, batch 1200, train_loss[loss=3.194, NarTop10Accuracy=0.6836, over 7239.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7147, over 5915.38 frames. ], batch size: 31, lr: 2.66e-03 +2024-08-06 21:35:52,800 INFO [trainer.py:765] (3/8) Epoch 32, batch 1300, train_loss[loss=3.235, NarTop10Accuracy=0.6801, over 5013.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7146, over 5982.23 frames. ], batch size: 6, lr: 2.66e-03 +2024-08-06 21:36:29,478 INFO [trainer.py:765] (3/8) Epoch 32, batch 1400, train_loss[loss=3.328, NarTop10Accuracy=0.647, over 6072.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7138, over 6008.34 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 21:37:04,733 INFO [trainer.py:765] (3/8) Epoch 32, batch 1500, train_loss[loss=3.453, NarTop10Accuracy=0.638, over 5919.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.714, over 5952.38 frames. ], batch size: 50, lr: 2.66e-03 +2024-08-06 21:37:32,521 INFO [trainer.py:765] (3/8) Epoch 32, batch 1600, train_loss[loss=3.006, NarTop10Accuracy=0.7266, over 7383.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7149, over 5929.47 frames. ], batch size: 23, lr: 2.66e-03 +2024-08-06 21:37:59,159 INFO [trainer.py:765] (3/8) Epoch 32, batch 1700, train_loss[loss=3.033, NarTop10Accuracy=0.7161, over 6681.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7159, over 5923.32 frames. ], batch size: 14, lr: 2.65e-03 +2024-08-06 21:38:25,702 INFO [trainer.py:765] (3/8) Epoch 32, batch 1800, train_loss[loss=3.097, NarTop10Accuracy=0.7073, over 7050.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7152, over 5992.44 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 21:38:52,168 INFO [trainer.py:765] (3/8) Epoch 32, batch 1900, train_loss[loss=3.08, NarTop10Accuracy=0.7097, over 6192.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7107, over 6021.10 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 21:39:17,768 INFO [trainer.py:765] (3/8) Epoch 32, batch 2000, train_loss[loss=3.481, NarTop10Accuracy=0.6273, over 6060.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7127, over 5988.00 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 21:39:43,178 INFO [trainer.py:765] (3/8) Epoch 32, batch 2100, train_loss[loss=2.696, NarTop10Accuracy=0.7785, over 3957.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7131, over 5964.62 frames. ], batch size: 4, lr: 2.65e-03 +2024-08-06 21:39:54,781 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 21:40:02,942 INFO [trainer.py:811] (3/8) Epoch 32, validation: loss=2.886, NarTop10Accuracy=0.7482, over 1905321.00 frames. +2024-08-06 21:40:02,942 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 21:40:03,423 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.874e+02 2.278e+02 2.449e+02 2.609e+02 8.207e+02, threshold=4.898e+02, percent-clipped=0.3 +2024-08-06 21:40:16,628 INFO [trainer.py:765] (3/8) Epoch 32, batch 2200, train_loss[loss=3.148, NarTop10Accuracy=0.698, over 7320.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7137, over 6005.61 frames. ], batch size: 31, lr: 2.65e-03 +2024-08-06 21:40:41,717 INFO [trainer.py:765] (3/8) Epoch 32, batch 2300, train_loss[loss=3.284, NarTop10Accuracy=0.673, over 5706.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7092, over 6009.44 frames. ], batch size: 9, lr: 2.65e-03 +2024-08-06 21:41:06,072 INFO [trainer.py:765] (3/8) Epoch 32, batch 2400, train_loss[loss=3.176, NarTop10Accuracy=0.6897, over 5172.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7128, over 5773.38 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:29,537 INFO [trainer.py:765] (3/8) Epoch 32, batch 2500, train_loss[loss=2.888, NarTop10Accuracy=0.7554, over 5016.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7194, over 5483.78 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:49,758 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 21:42:47,615 INFO [trainer.py:765] (3/8) Epoch 33, batch 100, train_loss[loss=2.97, NarTop10Accuracy=0.7325, over 7131.00 frames. ], tot_loss[loss=2.998, NarTop10Accuracy=0.726, over 2356.74 frames. ], batch size: 31, lr: 2.60e-03 +2024-08-06 21:43:22,367 INFO [trainer.py:765] (3/8) Epoch 33, batch 200, train_loss[loss=2.764, NarTop10Accuracy=0.7649, over 6792.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7217, over 3857.76 frames. ], batch size: 17, lr: 2.60e-03 +2024-08-06 21:43:56,512 INFO [trainer.py:765] (3/8) Epoch 33, batch 300, train_loss[loss=3.344, NarTop10Accuracy=0.6547, over 7206.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.717, over 4656.21 frames. ], batch size: 23, lr: 2.60e-03 +2024-08-06 21:44:30,316 INFO [trainer.py:765] (3/8) Epoch 33, batch 400, train_loss[loss=2.704, NarTop10Accuracy=0.7865, over 5121.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7172, over 5109.34 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 21:45:02,869 INFO [trainer.py:765] (3/8) Epoch 33, batch 500, train_loss[loss=2.648, NarTop10Accuracy=0.7939, over 6162.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7208, over 5391.70 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 21:45:36,226 INFO [trainer.py:765] (3/8) Epoch 33, batch 600, train_loss[loss=3.52, NarTop10Accuracy=0.6273, over 5721.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7146, over 5659.13 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 21:46:11,316 INFO [trainer.py:765] (3/8) Epoch 33, batch 700, train_loss[loss=2.758, NarTop10Accuracy=0.7699, over 5097.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7144, over 5712.84 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:46:46,169 INFO [trainer.py:765] (3/8) Epoch 33, batch 800, train_loss[loss=2.699, NarTop10Accuracy=0.7868, over 4947.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7156, over 5780.59 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:47:18,907 INFO [trainer.py:765] (3/8) Epoch 33, batch 900, train_loss[loss=3.192, NarTop10Accuracy=0.6882, over 6300.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.715, over 5819.60 frames. ], batch size: 13, lr: 2.59e-03 +2024-08-06 21:47:57,315 INFO [trainer.py:765] (3/8) Epoch 33, batch 1000, train_loss[loss=2.803, NarTop10Accuracy=0.7604, over 6210.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7145, over 5906.58 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 21:48:30,908 INFO [trainer.py:765] (3/8) Epoch 33, batch 1100, train_loss[loss=2.778, NarTop10Accuracy=0.7605, over 6753.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7112, over 5943.00 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 21:49:06,659 INFO [trainer.py:765] (3/8) Epoch 33, batch 1200, train_loss[loss=2.795, NarTop10Accuracy=0.7676, over 7170.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.713, over 5927.83 frames. ], batch size: 31, lr: 2.58e-03 +2024-08-06 21:49:42,815 INFO [trainer.py:765] (3/8) Epoch 33, batch 1300, train_loss[loss=2.913, NarTop10Accuracy=0.7439, over 4323.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7151, over 5987.56 frames. ], batch size: 5, lr: 2.58e-03 +2024-08-06 21:50:17,310 INFO [trainer.py:765] (3/8) Epoch 33, batch 1400, train_loss[loss=3.228, NarTop10Accuracy=0.6741, over 6120.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7136, over 6010.01 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 21:50:45,370 INFO [trainer.py:765] (3/8) Epoch 33, batch 1500, train_loss[loss=3.1, NarTop10Accuracy=0.704, over 6021.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.715, over 5944.83 frames. ], batch size: 50, lr: 2.58e-03 +2024-08-06 21:51:04,607 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 21:51:12,661 INFO [trainer.py:811] (3/8) Epoch 33, validation: loss=2.938, NarTop10Accuracy=0.7372, over 1905321.00 frames. +2024-08-06 21:51:12,662 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 21:51:13,181 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.834e+02 2.250e+02 2.409e+02 2.586e+02 3.975e+02, threshold=4.818e+02, percent-clipped=0.0 +2024-08-06 21:51:21,261 INFO [trainer.py:765] (3/8) Epoch 33, batch 1600, train_loss[loss=3.151, NarTop10Accuracy=0.6961, over 7104.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7165, over 5929.91 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:51:47,923 INFO [trainer.py:765] (3/8) Epoch 33, batch 1700, train_loss[loss=2.795, NarTop10Accuracy=0.765, over 6519.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7141, over 5921.31 frames. ], batch size: 14, lr: 2.57e-03 +2024-08-06 21:52:14,392 INFO [trainer.py:765] (3/8) Epoch 33, batch 1800, train_loss[loss=2.812, NarTop10Accuracy=0.7637, over 7164.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7166, over 5994.43 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:52:40,856 INFO [trainer.py:765] (3/8) Epoch 33, batch 1900, train_loss[loss=3.375, NarTop10Accuracy=0.6436, over 6849.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7124, over 6035.06 frames. ], batch size: 51, lr: 2.57e-03 +2024-08-06 21:53:06,353 INFO [trainer.py:765] (3/8) Epoch 33, batch 2000, train_loss[loss=3.444, NarTop10Accuracy=0.6278, over 5583.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7166, over 6015.67 frames. ], batch size: 50, lr: 2.57e-03 +2024-08-06 21:53:31,659 INFO [trainer.py:765] (3/8) Epoch 33, batch 2100, train_loss[loss=3.467, NarTop10Accuracy=0.6335, over 4683.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7155, over 5994.96 frames. ], batch size: 5, lr: 2.57e-03 +2024-08-06 21:53:56,891 INFO [trainer.py:765] (3/8) Epoch 33, batch 2200, train_loss[loss=3.399, NarTop10Accuracy=0.6471, over 7290.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.714, over 6020.72 frames. ], batch size: 31, lr: 2.57e-03 +2024-08-06 21:54:21,990 INFO [trainer.py:765] (3/8) Epoch 33, batch 2300, train_loss[loss=2.638, NarTop10Accuracy=0.7976, over 5751.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7149, over 6022.08 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 21:54:46,430 INFO [trainer.py:765] (3/8) Epoch 33, batch 2400, train_loss[loss=2.829, NarTop10Accuracy=0.7535, over 5127.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7176, over 5783.21 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:09,862 INFO [trainer.py:765] (3/8) Epoch 33, batch 2500, train_loss[loss=2.727, NarTop10Accuracy=0.785, over 5124.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7221, over 5489.68 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:29,643 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 21:56:24,721 INFO [trainer.py:765] (3/8) Epoch 34, batch 100, train_loss[loss=3.454, NarTop10Accuracy=0.6313, over 7134.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.718, over 2359.05 frames. ], batch size: 31, lr: 2.52e-03 +2024-08-06 21:56:55,613 INFO [trainer.py:765] (3/8) Epoch 34, batch 200, train_loss[loss=3.158, NarTop10Accuracy=0.6979, over 7053.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7213, over 3835.73 frames. ], batch size: 18, lr: 2.52e-03 +2024-08-06 21:57:31,776 INFO [trainer.py:765] (3/8) Epoch 34, batch 300, train_loss[loss=2.765, NarTop10Accuracy=0.765, over 7212.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7179, over 4642.06 frames. ], batch size: 23, lr: 2.52e-03 +2024-08-06 21:58:02,724 INFO [trainer.py:765] (3/8) Epoch 34, batch 400, train_loss[loss=3.166, NarTop10Accuracy=0.6882, over 5157.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7209, over 5089.76 frames. ], batch size: 7, lr: 2.52e-03 +2024-08-06 21:58:34,690 INFO [trainer.py:765] (3/8) Epoch 34, batch 500, train_loss[loss=3.263, NarTop10Accuracy=0.677, over 6018.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7192, over 5366.83 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 21:59:09,616 INFO [trainer.py:765] (3/8) Epoch 34, batch 600, train_loss[loss=2.893, NarTop10Accuracy=0.7473, over 5733.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7187, over 5632.97 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 21:59:46,056 INFO [trainer.py:765] (3/8) Epoch 34, batch 700, train_loss[loss=3.025, NarTop10Accuracy=0.7204, over 5037.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7174, over 5714.60 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:17,575 INFO [trainer.py:765] (3/8) Epoch 34, batch 800, train_loss[loss=3.031, NarTop10Accuracy=0.7133, over 4998.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7189, over 5767.68 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:49,874 INFO [trainer.py:765] (3/8) Epoch 34, batch 900, train_loss[loss=2.884, NarTop10Accuracy=0.7439, over 6210.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7193, over 5803.86 frames. ], batch size: 13, lr: 2.51e-03 +2024-08-06 22:01:25,338 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 22:01:33,386 INFO [trainer.py:811] (3/8) Epoch 34, validation: loss=2.9, NarTop10Accuracy=0.7444, over 1905321.00 frames. +2024-08-06 22:01:33,387 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 22:01:34,091 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.259e+02 2.434e+02 2.615e+02 5.125e+02, threshold=4.868e+02, percent-clipped=0.1 +2024-08-06 22:01:35,624 INFO [trainer.py:765] (3/8) Epoch 34, batch 1000, train_loss[loss=3.261, NarTop10Accuracy=0.6726, over 6297.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7182, over 5901.79 frames. ], batch size: 13, lr: 2.51e-03 +2024-08-06 22:02:10,829 INFO [trainer.py:765] (3/8) Epoch 34, batch 1100, train_loss[loss=3.405, NarTop10Accuracy=0.6424, over 6849.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7165, over 5942.98 frames. ], batch size: 17, lr: 2.51e-03 +2024-08-06 22:02:46,786 INFO [trainer.py:765] (3/8) Epoch 34, batch 1200, train_loss[loss=2.848, NarTop10Accuracy=0.7607, over 6897.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7172, over 5931.63 frames. ], batch size: 31, lr: 2.50e-03 +2024-08-06 22:03:20,813 INFO [trainer.py:765] (3/8) Epoch 34, batch 1300, train_loss[loss=2.68, NarTop10Accuracy=0.7953, over 5004.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7169, over 5999.74 frames. ], batch size: 6, lr: 2.50e-03 +2024-08-06 22:03:52,949 INFO [trainer.py:765] (3/8) Epoch 34, batch 1400, train_loss[loss=3.287, NarTop10Accuracy=0.6653, over 5946.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7168, over 6041.01 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 22:04:20,822 INFO [trainer.py:765] (3/8) Epoch 34, batch 1500, train_loss[loss=3.061, NarTop10Accuracy=0.7237, over 5730.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.717, over 5944.58 frames. ], batch size: 50, lr: 2.50e-03 +2024-08-06 22:04:48,599 INFO [trainer.py:765] (3/8) Epoch 34, batch 1600, train_loss[loss=2.937, NarTop10Accuracy=0.7306, over 7143.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7153, over 5920.51 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 22:05:15,241 INFO [trainer.py:765] (3/8) Epoch 34, batch 1700, train_loss[loss=3.153, NarTop10Accuracy=0.693, over 6315.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7179, over 5921.11 frames. ], batch size: 13, lr: 2.50e-03 +2024-08-06 22:05:41,720 INFO [trainer.py:765] (3/8) Epoch 34, batch 1800, train_loss[loss=3.133, NarTop10Accuracy=0.6965, over 7101.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7159, over 5994.05 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 22:06:08,206 INFO [trainer.py:765] (3/8) Epoch 34, batch 1900, train_loss[loss=3.121, NarTop10Accuracy=0.7116, over 6288.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7118, over 6025.04 frames. ], batch size: 51, lr: 2.49e-03 +2024-08-06 22:06:33,770 INFO [trainer.py:765] (3/8) Epoch 34, batch 2000, train_loss[loss=3.097, NarTop10Accuracy=0.7064, over 6084.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7149, over 5987.62 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 22:06:59,126 INFO [trainer.py:765] (3/8) Epoch 34, batch 2100, train_loss[loss=2.979, NarTop10Accuracy=0.7187, over 4794.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7121, over 5980.19 frames. ], batch size: 5, lr: 2.49e-03 +2024-08-06 22:07:24,398 INFO [trainer.py:765] (3/8) Epoch 34, batch 2200, train_loss[loss=2.855, NarTop10Accuracy=0.7548, over 7242.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7125, over 6014.17 frames. ], batch size: 31, lr: 2.49e-03 +2024-08-06 22:07:49,535 INFO [trainer.py:765] (3/8) Epoch 34, batch 2300, train_loss[loss=2.675, NarTop10Accuracy=0.789, over 5631.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7124, over 6012.87 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 22:08:14,059 INFO [trainer.py:765] (3/8) Epoch 34, batch 2400, train_loss[loss=3.288, NarTop10Accuracy=0.6646, over 5073.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7144, over 5759.04 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:37,648 INFO [trainer.py:765] (3/8) Epoch 34, batch 2500, train_loss[loss=2.838, NarTop10Accuracy=0.7673, over 5112.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.72, over 5480.55 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:57,803 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 22:09:52,640 INFO [trainer.py:765] (3/8) Epoch 35, batch 100, train_loss[loss=2.926, NarTop10Accuracy=0.7424, over 7407.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7159, over 2374.82 frames. ], batch size: 31, lr: 2.45e-03 +2024-08-06 22:10:29,697 INFO [trainer.py:765] (3/8) Epoch 35, batch 200, train_loss[loss=3.141, NarTop10Accuracy=0.6962, over 6615.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7153, over 3857.48 frames. ], batch size: 17, lr: 2.45e-03 +2024-08-06 22:11:04,942 INFO [trainer.py:765] (3/8) Epoch 35, batch 300, train_loss[loss=2.792, NarTop10Accuracy=0.7637, over 7029.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7193, over 4663.44 frames. ], batch size: 22, lr: 2.44e-03 +2024-08-06 22:11:35,333 INFO [trainer.py:765] (3/8) Epoch 35, batch 400, train_loss[loss=2.964, NarTop10Accuracy=0.7347, over 5016.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7193, over 5109.00 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 22:11:40,048 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 22:11:48,129 INFO [trainer.py:811] (3/8) Epoch 35, validation: loss=2.84, NarTop10Accuracy=0.7576, over 1905321.00 frames. +2024-08-06 22:11:48,129 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 22:11:48,702 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.275e+02 2.426e+02 2.615e+02 4.095e+02, threshold=4.852e+02, percent-clipped=0.0 +2024-08-06 22:12:17,723 INFO [trainer.py:765] (3/8) Epoch 35, batch 500, train_loss[loss=2.813, NarTop10Accuracy=0.7635, over 6177.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7216, over 5395.12 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 22:12:51,424 INFO [trainer.py:765] (3/8) Epoch 35, batch 600, train_loss[loss=3.092, NarTop10Accuracy=0.7124, over 5676.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7183, over 5654.18 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 22:13:24,940 INFO [trainer.py:765] (3/8) Epoch 35, batch 700, train_loss[loss=2.556, NarTop10Accuracy=0.8049, over 5118.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.719, over 5734.25 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 22:14:01,383 INFO [trainer.py:765] (3/8) Epoch 35, batch 800, train_loss[loss=2.723, NarTop10Accuracy=0.7817, over 4446.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7177, over 5798.06 frames. ], batch size: 5, lr: 2.44e-03 +2024-08-06 22:14:34,372 INFO [trainer.py:765] (3/8) Epoch 35, batch 900, train_loss[loss=3.096, NarTop10Accuracy=0.7028, over 6636.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7206, over 5800.45 frames. ], batch size: 14, lr: 2.44e-03 +2024-08-06 22:15:09,372 INFO [trainer.py:765] (3/8) Epoch 35, batch 1000, train_loss[loss=2.853, NarTop10Accuracy=0.7504, over 6675.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.717, over 5896.79 frames. ], batch size: 14, lr: 2.43e-03 +2024-08-06 22:15:48,495 INFO [trainer.py:765] (3/8) Epoch 35, batch 1100, train_loss[loss=3.044, NarTop10Accuracy=0.7139, over 6765.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7153, over 5908.57 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 22:16:22,483 INFO [trainer.py:765] (3/8) Epoch 35, batch 1200, train_loss[loss=2.897, NarTop10Accuracy=0.7468, over 7509.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7184, over 5923.09 frames. ], batch size: 31, lr: 2.43e-03 +2024-08-06 22:16:57,060 INFO [trainer.py:765] (3/8) Epoch 35, batch 1300, train_loss[loss=2.74, NarTop10Accuracy=0.765, over 4959.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7207, over 5989.76 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 22:17:31,061 INFO [trainer.py:765] (3/8) Epoch 35, batch 1400, train_loss[loss=3.05, NarTop10Accuracy=0.7214, over 5964.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7182, over 6022.34 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 22:18:03,062 INFO [trainer.py:765] (3/8) Epoch 35, batch 1500, train_loss[loss=3.072, NarTop10Accuracy=0.7161, over 6111.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7175, over 5949.37 frames. ], batch size: 50, lr: 2.43e-03 +2024-08-06 22:18:30,728 INFO [trainer.py:765] (3/8) Epoch 35, batch 1600, train_loss[loss=2.909, NarTop10Accuracy=0.7393, over 7026.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7154, over 5932.89 frames. ], batch size: 22, lr: 2.43e-03 +2024-08-06 22:18:57,319 INFO [trainer.py:765] (3/8) Epoch 35, batch 1700, train_loss[loss=2.853, NarTop10Accuracy=0.7642, over 6687.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7138, over 5931.41 frames. ], batch size: 14, lr: 2.42e-03 +2024-08-06 22:19:23,703 INFO [trainer.py:765] (3/8) Epoch 35, batch 1800, train_loss[loss=3.4, NarTop10Accuracy=0.6455, over 7218.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.716, over 5975.41 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 22:19:50,201 INFO [trainer.py:765] (3/8) Epoch 35, batch 1900, train_loss[loss=3.095, NarTop10Accuracy=0.708, over 6252.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7155, over 6015.71 frames. ], batch size: 50, lr: 2.42e-03 +2024-08-06 22:20:15,762 INFO [trainer.py:765] (3/8) Epoch 35, batch 2000, train_loss[loss=3.05, NarTop10Accuracy=0.7152, over 6651.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7179, over 5997.75 frames. ], batch size: 50, lr: 2.42e-03 +2024-08-06 22:20:41,045 INFO [trainer.py:765] (3/8) Epoch 35, batch 2100, train_loss[loss=2.759, NarTop10Accuracy=0.7605, over 3852.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7163, over 5965.82 frames. ], batch size: 4, lr: 2.42e-03 +2024-08-06 22:21:06,226 INFO [trainer.py:765] (3/8) Epoch 35, batch 2200, train_loss[loss=2.906, NarTop10Accuracy=0.7463, over 7422.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7155, over 6011.65 frames. ], batch size: 31, lr: 2.42e-03 +2024-08-06 22:21:31,286 INFO [trainer.py:765] (3/8) Epoch 35, batch 2300, train_loss[loss=2.981, NarTop10Accuracy=0.7347, over 6174.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7155, over 6015.05 frames. ], batch size: 10, lr: 2.42e-03 +2024-08-06 22:21:55,648 INFO [trainer.py:765] (3/8) Epoch 35, batch 2400, train_loss[loss=3.398, NarTop10Accuracy=0.6478, over 5133.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7172, over 5760.56 frames. ], batch size: 7, lr: 2.42e-03 +2024-08-06 22:21:59,682 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 22:22:07,656 INFO [trainer.py:811] (3/8) Epoch 35, validation: loss=2.905, NarTop10Accuracy=0.7437, over 1905321.00 frames. +2024-08-06 22:22:07,657 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 22:22:08,116 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.895e+02 2.316e+02 2.462e+02 2.653e+02 5.566e+02, threshold=4.923e+02, percent-clipped=0.1 +2024-08-06 22:22:27,127 INFO [trainer.py:765] (3/8) Epoch 35, batch 2500, train_loss[loss=2.983, NarTop10Accuracy=0.725, over 5004.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7216, over 5465.53 frames. ], batch size: 7, lr: 2.41e-03 +2024-08-06 22:22:47,018 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 22:23:47,172 INFO [trainer.py:765] (3/8) Epoch 36, batch 100, train_loss[loss=3.177, NarTop10Accuracy=0.6969, over 7434.00 frames. ], tot_loss[loss=2.993, NarTop10Accuracy=0.7266, over 2354.14 frames. ], batch size: 33, lr: 2.38e-03 +2024-08-06 22:24:22,495 INFO [trainer.py:765] (3/8) Epoch 36, batch 200, train_loss[loss=2.836, NarTop10Accuracy=0.762, over 6810.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7196, over 3869.39 frames. ], batch size: 17, lr: 2.38e-03 +2024-08-06 22:24:54,721 INFO [trainer.py:765] (3/8) Epoch 36, batch 300, train_loss[loss=3.126, NarTop10Accuracy=0.6947, over 6969.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7178, over 4683.84 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 22:25:29,276 INFO [trainer.py:765] (3/8) Epoch 36, batch 400, train_loss[loss=2.85, NarTop10Accuracy=0.7556, over 5184.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7219, over 5128.47 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 22:26:01,819 INFO [trainer.py:765] (3/8) Epoch 36, batch 500, train_loss[loss=3.411, NarTop10Accuracy=0.6384, over 6576.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7213, over 5396.81 frames. ], batch size: 12, lr: 2.37e-03 +2024-08-06 22:26:35,026 INFO [trainer.py:765] (3/8) Epoch 36, batch 600, train_loss[loss=3.08, NarTop10Accuracy=0.7183, over 6141.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7216, over 5643.71 frames. ], batch size: 10, lr: 2.37e-03 +2024-08-06 22:27:10,991 INFO [trainer.py:765] (3/8) Epoch 36, batch 700, train_loss[loss=3.194, NarTop10Accuracy=0.6813, over 5043.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7218, over 5715.95 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 22:27:44,915 INFO [trainer.py:765] (3/8) Epoch 36, batch 800, train_loss[loss=3.344, NarTop10Accuracy=0.6523, over 4254.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7181, over 5768.29 frames. ], batch size: 5, lr: 2.37e-03 +2024-08-06 22:28:17,813 INFO [trainer.py:765] (3/8) Epoch 36, batch 900, train_loss[loss=2.818, NarTop10Accuracy=0.7668, over 6237.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7209, over 5800.07 frames. ], batch size: 13, lr: 2.37e-03 +2024-08-06 22:28:56,984 INFO [trainer.py:765] (3/8) Epoch 36, batch 1000, train_loss[loss=3.332, NarTop10Accuracy=0.6552, over 6231.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7202, over 5902.25 frames. ], batch size: 13, lr: 2.37e-03 +2024-08-06 22:29:29,365 INFO [trainer.py:765] (3/8) Epoch 36, batch 1100, train_loss[loss=2.784, NarTop10Accuracy=0.7695, over 6897.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7192, over 5932.55 frames. ], batch size: 17, lr: 2.36e-03 +2024-08-06 22:30:05,681 INFO [trainer.py:765] (3/8) Epoch 36, batch 1200, train_loss[loss=3.115, NarTop10Accuracy=0.7089, over 7302.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7199, over 5931.15 frames. ], batch size: 31, lr: 2.36e-03 +2024-08-06 22:30:42,576 INFO [trainer.py:765] (3/8) Epoch 36, batch 1300, train_loss[loss=2.665, NarTop10Accuracy=0.7836, over 5214.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.719, over 5990.01 frames. ], batch size: 6, lr: 2.36e-03 +2024-08-06 22:31:15,939 INFO [trainer.py:765] (3/8) Epoch 36, batch 1400, train_loss[loss=3.072, NarTop10Accuracy=0.697, over 6003.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7217, over 6010.62 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 22:31:43,749 INFO [trainer.py:765] (3/8) Epoch 36, batch 1500, train_loss[loss=3.316, NarTop10Accuracy=0.6648, over 6456.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7202, over 5963.09 frames. ], batch size: 51, lr: 2.36e-03 +2024-08-06 22:32:11,460 INFO [trainer.py:765] (3/8) Epoch 36, batch 1600, train_loss[loss=3.411, NarTop10Accuracy=0.6454, over 7140.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7198, over 5936.21 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 22:32:38,109 INFO [trainer.py:765] (3/8) Epoch 36, batch 1700, train_loss[loss=3.35, NarTop10Accuracy=0.6567, over 6321.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7176, over 5929.35 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 22:33:04,555 INFO [trainer.py:765] (3/8) Epoch 36, batch 1800, train_loss[loss=3.152, NarTop10Accuracy=0.687, over 7035.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7184, over 5992.42 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 22:33:15,171 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 22:33:23,567 INFO [trainer.py:811] (3/8) Epoch 36, validation: loss=2.897, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 22:33:23,568 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 22:33:24,096 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.876e+02 2.309e+02 2.476e+02 2.664e+02 4.811e+02, threshold=4.951e+02, percent-clipped=0.0 +2024-08-06 22:33:39,456 INFO [trainer.py:765] (3/8) Epoch 36, batch 1900, train_loss[loss=3.036, NarTop10Accuracy=0.7226, over 6114.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7176, over 6029.69 frames. ], batch size: 51, lr: 2.35e-03 +2024-08-06 22:34:05,077 INFO [trainer.py:765] (3/8) Epoch 36, batch 2000, train_loss[loss=3.173, NarTop10Accuracy=0.7014, over 5688.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7179, over 5996.88 frames. ], batch size: 50, lr: 2.35e-03 +2024-08-06 22:34:30,514 INFO [trainer.py:765] (3/8) Epoch 36, batch 2100, train_loss[loss=2.864, NarTop10Accuracy=0.7565, over 4731.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7179, over 5963.20 frames. ], batch size: 5, lr: 2.35e-03 +2024-08-06 22:34:55,938 INFO [trainer.py:765] (3/8) Epoch 36, batch 2200, train_loss[loss=3.324, NarTop10Accuracy=0.6501, over 7215.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7154, over 5995.51 frames. ], batch size: 31, lr: 2.35e-03 +2024-08-06 22:35:21,145 INFO [trainer.py:765] (3/8) Epoch 36, batch 2300, train_loss[loss=3.4, NarTop10Accuracy=0.6535, over 5748.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7133, over 6011.78 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 22:35:45,601 INFO [trainer.py:765] (3/8) Epoch 36, batch 2400, train_loss[loss=3.296, NarTop10Accuracy=0.6601, over 5091.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7166, over 5793.41 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:09,183 INFO [trainer.py:765] (3/8) Epoch 36, batch 2500, train_loss[loss=2.802, NarTop10Accuracy=0.7617, over 5079.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7211, over 5496.09 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:29,223 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 22:37:29,726 INFO [trainer.py:765] (3/8) Epoch 37, batch 100, train_loss[loss=2.821, NarTop10Accuracy=0.7546, over 7305.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7145, over 2349.49 frames. ], batch size: 31, lr: 2.31e-03 +2024-08-06 22:38:01,273 INFO [trainer.py:765] (3/8) Epoch 37, batch 200, train_loss[loss=2.801, NarTop10Accuracy=0.7785, over 6942.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7204, over 3838.30 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 22:38:35,956 INFO [trainer.py:765] (3/8) Epoch 37, batch 300, train_loss[loss=3.159, NarTop10Accuracy=0.6963, over 6948.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7212, over 4626.07 frames. ], batch size: 22, lr: 2.31e-03 +2024-08-06 22:39:09,307 INFO [trainer.py:765] (3/8) Epoch 37, batch 400, train_loss[loss=2.599, NarTop10Accuracy=0.8066, over 5049.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7242, over 5072.69 frames. ], batch size: 7, lr: 2.31e-03 +2024-08-06 22:39:43,862 INFO [trainer.py:765] (3/8) Epoch 37, batch 500, train_loss[loss=3.476, NarTop10Accuracy=0.6248, over 6078.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7236, over 5362.96 frames. ], batch size: 11, lr: 2.31e-03 +2024-08-06 22:40:17,334 INFO [trainer.py:765] (3/8) Epoch 37, batch 600, train_loss[loss=2.747, NarTop10Accuracy=0.786, over 5778.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7224, over 5634.00 frames. ], batch size: 9, lr: 2.31e-03 +2024-08-06 22:40:51,616 INFO [trainer.py:765] (3/8) Epoch 37, batch 700, train_loss[loss=3.098, NarTop10Accuracy=0.7075, over 5148.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7168, over 5722.40 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:41:30,565 INFO [trainer.py:765] (3/8) Epoch 37, batch 800, train_loss[loss=2.742, NarTop10Accuracy=0.7852, over 4977.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7179, over 5779.95 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:41:59,084 INFO [trainer.py:765] (3/8) Epoch 37, batch 900, train_loss[loss=2.994, NarTop10Accuracy=0.7302, over 6297.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7208, over 5791.03 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 22:42:38,268 INFO [trainer.py:765] (3/8) Epoch 37, batch 1000, train_loss[loss=3.133, NarTop10Accuracy=0.7034, over 6573.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7181, over 5893.55 frames. ], batch size: 14, lr: 2.30e-03 +2024-08-06 22:43:15,907 INFO [trainer.py:765] (3/8) Epoch 37, batch 1100, train_loss[loss=3.074, NarTop10Accuracy=0.7159, over 6801.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7173, over 5909.87 frames. ], batch size: 17, lr: 2.30e-03 +2024-08-06 22:43:47,740 INFO [trainer.py:765] (3/8) Epoch 37, batch 1200, train_loss[loss=2.917, NarTop10Accuracy=0.7538, over 7443.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7171, over 5907.39 frames. ], batch size: 31, lr: 2.30e-03 +2024-08-06 22:44:11,755 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 22:44:20,075 INFO [trainer.py:811] (3/8) Epoch 37, validation: loss=2.92, NarTop10Accuracy=0.7407, over 1905321.00 frames. +2024-08-06 22:44:20,076 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 22:44:20,606 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.887e+02 2.309e+02 2.481e+02 2.647e+02 8.766e+02, threshold=4.961e+02, percent-clipped=0.1 +2024-08-06 22:44:32,784 INFO [trainer.py:765] (3/8) Epoch 37, batch 1300, train_loss[loss=2.789, NarTop10Accuracy=0.7732, over 5082.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7196, over 5972.93 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:45:10,388 INFO [trainer.py:765] (3/8) Epoch 37, batch 1400, train_loss[loss=2.661, NarTop10Accuracy=0.8055, over 6150.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7206, over 6033.85 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 22:45:40,513 INFO [trainer.py:765] (3/8) Epoch 37, batch 1500, train_loss[loss=2.971, NarTop10Accuracy=0.7399, over 5982.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7183, over 5961.09 frames. ], batch size: 51, lr: 2.29e-03 +2024-08-06 22:46:08,438 INFO [trainer.py:765] (3/8) Epoch 37, batch 1600, train_loss[loss=3.456, NarTop10Accuracy=0.6334, over 7146.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7161, over 5937.96 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 22:46:35,187 INFO [trainer.py:765] (3/8) Epoch 37, batch 1700, train_loss[loss=3.343, NarTop10Accuracy=0.6477, over 6714.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7179, over 5930.49 frames. ], batch size: 14, lr: 2.29e-03 +2024-08-06 22:47:01,793 INFO [trainer.py:765] (3/8) Epoch 37, batch 1800, train_loss[loss=2.764, NarTop10Accuracy=0.7753, over 7365.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7185, over 5995.96 frames. ], batch size: 23, lr: 2.29e-03 +2024-08-06 22:47:28,312 INFO [trainer.py:765] (3/8) Epoch 37, batch 1900, train_loss[loss=3.055, NarTop10Accuracy=0.717, over 5916.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7176, over 6013.54 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:47:53,925 INFO [trainer.py:765] (3/8) Epoch 37, batch 2000, train_loss[loss=3.233, NarTop10Accuracy=0.6655, over 5985.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7192, over 5974.69 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:48:19,325 INFO [trainer.py:765] (3/8) Epoch 37, batch 2100, train_loss[loss=2.87, NarTop10Accuracy=0.7457, over 3948.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.718, over 5956.77 frames. ], batch size: 4, lr: 2.29e-03 +2024-08-06 22:48:44,708 INFO [trainer.py:765] (3/8) Epoch 37, batch 2200, train_loss[loss=2.941, NarTop10Accuracy=0.736, over 7407.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.717, over 6015.62 frames. ], batch size: 31, lr: 2.29e-03 +2024-08-06 22:49:09,913 INFO [trainer.py:765] (3/8) Epoch 37, batch 2300, train_loss[loss=2.787, NarTop10Accuracy=0.7806, over 5853.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.716, over 6024.07 frames. ], batch size: 9, lr: 2.29e-03 +2024-08-06 22:49:34,318 INFO [trainer.py:765] (3/8) Epoch 37, batch 2400, train_loss[loss=3.272, NarTop10Accuracy=0.6803, over 5229.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7195, over 5773.10 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:49:57,860 INFO [trainer.py:765] (3/8) Epoch 37, batch 2500, train_loss[loss=3.28, NarTop10Accuracy=0.6624, over 5058.00 frames. ], tot_loss[loss=2.997, NarTop10Accuracy=0.7261, over 5448.22 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:50:18,339 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 22:51:16,152 INFO [trainer.py:765] (3/8) Epoch 38, batch 100, train_loss[loss=2.966, NarTop10Accuracy=0.7248, over 7281.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7215, over 2362.53 frames. ], batch size: 31, lr: 2.25e-03 +2024-08-06 22:51:53,015 INFO [trainer.py:765] (3/8) Epoch 38, batch 200, train_loss[loss=3.234, NarTop10Accuracy=0.6833, over 6825.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7206, over 3843.40 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 22:52:25,203 INFO [trainer.py:765] (3/8) Epoch 38, batch 300, train_loss[loss=2.913, NarTop10Accuracy=0.7453, over 7080.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7185, over 4643.94 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 22:52:55,627 INFO [trainer.py:765] (3/8) Epoch 38, batch 400, train_loss[loss=3.194, NarTop10Accuracy=0.6793, over 5151.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7201, over 5091.59 frames. ], batch size: 7, lr: 2.25e-03 +2024-08-06 22:53:32,229 INFO [trainer.py:765] (3/8) Epoch 38, batch 500, train_loss[loss=2.811, NarTop10Accuracy=0.7723, over 6072.00 frames. ], tot_loss[loss=2.988, NarTop10Accuracy=0.7275, over 5384.22 frames. ], batch size: 11, lr: 2.25e-03 +2024-08-06 22:54:05,498 INFO [trainer.py:765] (3/8) Epoch 38, batch 600, train_loss[loss=3.205, NarTop10Accuracy=0.6781, over 5781.00 frames. ], tot_loss[loss=3.001, NarTop10Accuracy=0.725, over 5655.67 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 22:54:36,004 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 22:54:43,918 INFO [trainer.py:811] (3/8) Epoch 38, validation: loss=2.939, NarTop10Accuracy=0.7369, over 1905321.00 frames. +2024-08-06 22:54:43,919 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 22:54:44,427 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.313e+02 2.478e+02 2.663e+02 7.254e+02, threshold=4.957e+02, percent-clipped=0.3 +2024-08-06 22:54:46,659 INFO [trainer.py:765] (3/8) Epoch 38, batch 700, train_loss[loss=2.749, NarTop10Accuracy=0.7673, over 5067.00 frames. ], tot_loss[loss=3.005, NarTop10Accuracy=0.7242, over 5706.18 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:24,938 INFO [trainer.py:765] (3/8) Epoch 38, batch 800, train_loss[loss=2.934, NarTop10Accuracy=0.7355, over 5040.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7209, over 5759.03 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:59,704 INFO [trainer.py:765] (3/8) Epoch 38, batch 900, train_loss[loss=2.834, NarTop10Accuracy=0.7597, over 6669.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7214, over 5779.56 frames. ], batch size: 14, lr: 2.24e-03 +2024-08-06 22:56:32,091 INFO [trainer.py:765] (3/8) Epoch 38, batch 1000, train_loss[loss=3.241, NarTop10Accuracy=0.6681, over 6543.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7205, over 5867.07 frames. ], batch size: 14, lr: 2.24e-03 +2024-08-06 22:57:08,991 INFO [trainer.py:765] (3/8) Epoch 38, batch 1100, train_loss[loss=3.171, NarTop10Accuracy=0.6833, over 7041.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7176, over 5908.26 frames. ], batch size: 18, lr: 2.24e-03 +2024-08-06 22:57:42,662 INFO [trainer.py:765] (3/8) Epoch 38, batch 1200, train_loss[loss=2.858, NarTop10Accuracy=0.755, over 7068.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7187, over 5916.31 frames. ], batch size: 31, lr: 2.24e-03 +2024-08-06 22:58:16,546 INFO [trainer.py:765] (3/8) Epoch 38, batch 1300, train_loss[loss=3.259, NarTop10Accuracy=0.6743, over 5160.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7199, over 5987.15 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:58:49,811 INFO [trainer.py:765] (3/8) Epoch 38, batch 1400, train_loss[loss=2.866, NarTop10Accuracy=0.7562, over 6102.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7156, over 6027.43 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 22:59:22,854 INFO [trainer.py:765] (3/8) Epoch 38, batch 1500, train_loss[loss=3.557, NarTop10Accuracy=0.6068, over 6729.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7195, over 5976.90 frames. ], batch size: 53, lr: 2.23e-03 +2024-08-06 22:59:50,644 INFO [trainer.py:765] (3/8) Epoch 38, batch 1600, train_loss[loss=3.385, NarTop10Accuracy=0.6462, over 7134.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7187, over 5957.81 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 23:00:17,316 INFO [trainer.py:765] (3/8) Epoch 38, batch 1700, train_loss[loss=2.906, NarTop10Accuracy=0.737, over 6192.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7154, over 5940.31 frames. ], batch size: 13, lr: 2.23e-03 +2024-08-06 23:00:43,764 INFO [trainer.py:765] (3/8) Epoch 38, batch 1800, train_loss[loss=3.364, NarTop10Accuracy=0.6519, over 7086.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7164, over 6001.82 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 23:01:10,193 INFO [trainer.py:765] (3/8) Epoch 38, batch 1900, train_loss[loss=3.458, NarTop10Accuracy=0.6393, over 5940.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7158, over 6041.34 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 23:01:35,681 INFO [trainer.py:765] (3/8) Epoch 38, batch 2000, train_loss[loss=3.247, NarTop10Accuracy=0.6776, over 6411.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7152, over 5996.83 frames. ], batch size: 51, lr: 2.23e-03 +2024-08-06 23:02:01,051 INFO [trainer.py:765] (3/8) Epoch 38, batch 2100, train_loss[loss=2.841, NarTop10Accuracy=0.7541, over 4797.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7178, over 5990.93 frames. ], batch size: 5, lr: 2.23e-03 +2024-08-06 23:02:26,314 INFO [trainer.py:765] (3/8) Epoch 38, batch 2200, train_loss[loss=2.842, NarTop10Accuracy=0.7589, over 7305.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7174, over 6010.44 frames. ], batch size: 31, lr: 2.23e-03 +2024-08-06 23:02:51,420 INFO [trainer.py:765] (3/8) Epoch 38, batch 2300, train_loss[loss=2.63, NarTop10Accuracy=0.8007, over 5769.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7182, over 6006.23 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 23:03:16,348 INFO [trainer.py:765] (3/8) Epoch 38, batch 2400, train_loss[loss=2.909, NarTop10Accuracy=0.739, over 5244.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7196, over 5787.56 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:39,824 INFO [trainer.py:765] (3/8) Epoch 38, batch 2500, train_loss[loss=3.174, NarTop10Accuracy=0.6776, over 5217.00 frames. ], tot_loss[loss=3.004, NarTop10Accuracy=0.7238, over 5467.54 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:59,877 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 23:04:58,941 INFO [trainer.py:765] (3/8) Epoch 39, batch 100, train_loss[loss=3.286, NarTop10Accuracy=0.6665, over 6975.00 frames. ], tot_loss[loss=2.992, NarTop10Accuracy=0.7273, over 2368.65 frames. ], batch size: 31, lr: 2.19e-03 +2024-08-06 23:05:03,469 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 23:05:11,563 INFO [trainer.py:811] (3/8) Epoch 39, validation: loss=2.9, NarTop10Accuracy=0.7445, over 1905321.00 frames. +2024-08-06 23:05:11,564 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29618MB +2024-08-06 23:05:12,137 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 2.316e+02 2.500e+02 2.688e+02 4.683e+02, threshold=5.001e+02, percent-clipped=0.0 +2024-08-06 23:05:40,163 INFO [trainer.py:765] (3/8) Epoch 39, batch 200, train_loss[loss=2.792, NarTop10Accuracy=0.7795, over 6720.00 frames. ], tot_loss[loss=3.003, NarTop10Accuracy=0.7249, over 3862.18 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 23:06:17,293 INFO [trainer.py:765] (3/8) Epoch 39, batch 300, train_loss[loss=3.024, NarTop10Accuracy=0.721, over 6933.00 frames. ], tot_loss[loss=2.997, NarTop10Accuracy=0.7262, over 4661.02 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 23:06:48,276 INFO [trainer.py:765] (3/8) Epoch 39, batch 400, train_loss[loss=2.917, NarTop10Accuracy=0.7489, over 5355.00 frames. ], tot_loss[loss=2.991, NarTop10Accuracy=0.7274, over 5110.41 frames. ], batch size: 7, lr: 2.19e-03 +2024-08-06 23:07:19,175 INFO [trainer.py:765] (3/8) Epoch 39, batch 500, train_loss[loss=3.359, NarTop10Accuracy=0.6588, over 6099.00 frames. ], tot_loss[loss=3.004, NarTop10Accuracy=0.7249, over 5384.82 frames. ], batch size: 11, lr: 2.19e-03 +2024-08-06 23:07:52,563 INFO [trainer.py:765] (3/8) Epoch 39, batch 600, train_loss[loss=2.652, NarTop10Accuracy=0.8027, over 5709.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7216, over 5659.00 frames. ], batch size: 9, lr: 2.19e-03 +2024-08-06 23:08:33,695 INFO [trainer.py:765] (3/8) Epoch 39, batch 700, train_loss[loss=3.257, NarTop10Accuracy=0.6738, over 4992.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7199, over 5711.41 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:09:05,861 INFO [trainer.py:765] (3/8) Epoch 39, batch 800, train_loss[loss=2.687, NarTop10Accuracy=0.7942, over 4956.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7197, over 5776.20 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:09:38,865 INFO [trainer.py:765] (3/8) Epoch 39, batch 900, train_loss[loss=3.361, NarTop10Accuracy=0.6438, over 6639.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7212, over 5782.52 frames. ], batch size: 14, lr: 2.18e-03 +2024-08-06 23:10:18,460 INFO [trainer.py:765] (3/8) Epoch 39, batch 1000, train_loss[loss=2.817, NarTop10Accuracy=0.7595, over 6609.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7233, over 5916.42 frames. ], batch size: 14, lr: 2.18e-03 +2024-08-06 23:10:53,934 INFO [trainer.py:765] (3/8) Epoch 39, batch 1100, train_loss[loss=2.753, NarTop10Accuracy=0.7737, over 6786.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7194, over 5926.39 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 23:11:27,822 INFO [trainer.py:765] (3/8) Epoch 39, batch 1200, train_loss[loss=2.919, NarTop10Accuracy=0.7437, over 7344.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7215, over 5926.91 frames. ], batch size: 32, lr: 2.18e-03 +2024-08-06 23:12:07,253 INFO [trainer.py:765] (3/8) Epoch 39, batch 1300, train_loss[loss=2.747, NarTop10Accuracy=0.7682, over 5133.00 frames. ], tot_loss[loss=3.011, NarTop10Accuracy=0.723, over 6001.36 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:12:39,302 INFO [trainer.py:765] (3/8) Epoch 39, batch 1400, train_loss[loss=2.965, NarTop10Accuracy=0.7404, over 6186.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.721, over 6014.42 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 23:13:09,756 INFO [trainer.py:765] (3/8) Epoch 39, batch 1500, train_loss[loss=3.553, NarTop10Accuracy=0.6183, over 6045.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7208, over 5947.62 frames. ], batch size: 50, lr: 2.18e-03 +2024-08-06 23:13:37,586 INFO [trainer.py:765] (3/8) Epoch 39, batch 1600, train_loss[loss=2.97, NarTop10Accuracy=0.7342, over 7095.00 frames. ], tot_loss[loss=3.006, NarTop10Accuracy=0.7241, over 5932.73 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:04,220 INFO [trainer.py:765] (3/8) Epoch 39, batch 1700, train_loss[loss=3.419, NarTop10Accuracy=0.6449, over 6696.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7181, over 5914.39 frames. ], batch size: 14, lr: 2.17e-03 +2024-08-06 23:14:30,768 INFO [trainer.py:765] (3/8) Epoch 39, batch 1800, train_loss[loss=2.842, NarTop10Accuracy=0.7641, over 6885.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7173, over 5977.41 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:57,180 INFO [trainer.py:765] (3/8) Epoch 39, batch 1900, train_loss[loss=2.981, NarTop10Accuracy=0.7291, over 6036.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7152, over 6026.64 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 23:15:22,751 INFO [trainer.py:765] (3/8) Epoch 39, batch 2000, train_loss[loss=3.258, NarTop10Accuracy=0.6723, over 6111.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7202, over 5993.49 frames. ], batch size: 51, lr: 2.17e-03 +2024-08-06 23:15:48,060 INFO [trainer.py:765] (3/8) Epoch 39, batch 2100, train_loss[loss=3.257, NarTop10Accuracy=0.6732, over 4809.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7195, over 5965.55 frames. ], batch size: 5, lr: 2.17e-03 +2024-08-06 23:15:51,871 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 23:16:02,156 INFO [trainer.py:811] (3/8) Epoch 39, validation: loss=2.85, NarTop10Accuracy=0.7552, over 1905321.00 frames. +2024-08-06 23:16:02,156 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30367MB +2024-08-06 23:16:02,645 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.369e+02 2.530e+02 2.720e+02 6.127e+02, threshold=5.059e+02, percent-clipped=0.2 +2024-08-06 23:16:23,652 INFO [trainer.py:765] (3/8) Epoch 39, batch 2200, train_loss[loss=3.228, NarTop10Accuracy=0.6752, over 7377.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7197, over 6011.15 frames. ], batch size: 31, lr: 2.17e-03 +2024-08-06 23:16:48,847 INFO [trainer.py:765] (3/8) Epoch 39, batch 2300, train_loss[loss=2.872, NarTop10Accuracy=0.7571, over 5742.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7171, over 6027.38 frames. ], batch size: 9, lr: 2.17e-03 +2024-08-06 23:17:13,136 INFO [trainer.py:765] (3/8) Epoch 39, batch 2400, train_loss[loss=2.656, NarTop10Accuracy=0.7924, over 5061.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7218, over 5779.35 frames. ], batch size: 7, lr: 2.17e-03 +2024-08-06 23:17:36,711 INFO [trainer.py:765] (3/8) Epoch 39, batch 2500, train_loss[loss=2.861, NarTop10Accuracy=0.7513, over 5037.00 frames. ], tot_loss[loss=2.993, NarTop10Accuracy=0.7259, over 5472.13 frames. ], batch size: 7, lr: 2.16e-03 +2024-08-06 23:17:56,449 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 23:18:48,946 INFO [trainer.py:765] (3/8) Epoch 40, batch 100, train_loss[loss=2.999, NarTop10Accuracy=0.7225, over 7410.00 frames. ], tot_loss[loss=3.011, NarTop10Accuracy=0.723, over 2385.22 frames. ], batch size: 31, lr: 2.14e-03 +2024-08-06 23:19:23,035 INFO [trainer.py:765] (3/8) Epoch 40, batch 200, train_loss[loss=2.857, NarTop10Accuracy=0.7473, over 6684.00 frames. ], tot_loss[loss=2.991, NarTop10Accuracy=0.7268, over 3868.60 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 23:19:57,187 INFO [trainer.py:765] (3/8) Epoch 40, batch 300, train_loss[loss=2.834, NarTop10Accuracy=0.7551, over 7191.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7234, over 4664.94 frames. ], batch size: 22, lr: 2.13e-03 +2024-08-06 23:20:30,182 INFO [trainer.py:765] (3/8) Epoch 40, batch 400, train_loss[loss=2.91, NarTop10Accuracy=0.7472, over 4965.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7235, over 5113.54 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 23:21:00,250 INFO [trainer.py:765] (3/8) Epoch 40, batch 500, train_loss[loss=2.841, NarTop10Accuracy=0.7587, over 6153.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7235, over 5395.15 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 23:21:34,881 INFO [trainer.py:765] (3/8) Epoch 40, batch 600, train_loss[loss=2.773, NarTop10Accuracy=0.7672, over 5586.00 frames. ], tot_loss[loss=3.001, NarTop10Accuracy=0.725, over 5654.68 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 23:22:11,097 INFO [trainer.py:765] (3/8) Epoch 40, batch 700, train_loss[loss=3.111, NarTop10Accuracy=0.7002, over 5163.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7234, over 5729.47 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:22:44,753 INFO [trainer.py:765] (3/8) Epoch 40, batch 800, train_loss[loss=2.797, NarTop10Accuracy=0.7628, over 5115.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7211, over 5776.69 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:23:16,635 INFO [trainer.py:765] (3/8) Epoch 40, batch 900, train_loss[loss=3.358, NarTop10Accuracy=0.6557, over 6393.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7218, over 5786.99 frames. ], batch size: 13, lr: 2.13e-03 +2024-08-06 23:23:55,591 INFO [trainer.py:765] (3/8) Epoch 40, batch 1000, train_loss[loss=3.388, NarTop10Accuracy=0.6442, over 6273.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7208, over 5897.37 frames. ], batch size: 13, lr: 2.13e-03 +2024-08-06 23:24:30,208 INFO [trainer.py:765] (3/8) Epoch 40, batch 1100, train_loss[loss=2.763, NarTop10Accuracy=0.7694, over 7011.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7206, over 5930.92 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 23:25:03,090 INFO [trainer.py:765] (3/8) Epoch 40, batch 1200, train_loss[loss=2.909, NarTop10Accuracy=0.7395, over 7071.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7204, over 5938.08 frames. ], batch size: 31, lr: 2.12e-03 +2024-08-06 23:25:41,842 INFO [trainer.py:765] (3/8) Epoch 40, batch 1300, train_loss[loss=2.732, NarTop10Accuracy=0.7726, over 5073.00 frames. ], tot_loss[loss=3.011, NarTop10Accuracy=0.7228, over 5989.56 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 23:26:13,384 INFO [trainer.py:765] (3/8) Epoch 40, batch 1400, train_loss[loss=2.787, NarTop10Accuracy=0.775, over 6159.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7203, over 6007.31 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 23:26:43,377 INFO [trainer.py:765] (3/8) Epoch 40, batch 1500, train_loss[loss=3.196, NarTop10Accuracy=0.6817, over 5871.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7227, over 5936.05 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:26:54,419 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 23:27:02,676 INFO [trainer.py:811] (3/8) Epoch 40, validation: loss=2.86, NarTop10Accuracy=0.7522, over 1905321.00 frames. +2024-08-06 23:27:02,677 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30367MB +2024-08-06 23:27:03,156 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.329e+02 2.511e+02 2.723e+02 1.241e+03, threshold=5.022e+02, percent-clipped=0.2 +2024-08-06 23:27:19,382 INFO [trainer.py:765] (3/8) Epoch 40, batch 1600, train_loss[loss=2.95, NarTop10Accuracy=0.7356, over 6984.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7211, over 5926.73 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:27:46,056 INFO [trainer.py:765] (3/8) Epoch 40, batch 1700, train_loss[loss=3.396, NarTop10Accuracy=0.6433, over 6285.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7221, over 5923.04 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 23:28:12,578 INFO [trainer.py:765] (3/8) Epoch 40, batch 1800, train_loss[loss=2.986, NarTop10Accuracy=0.7188, over 7038.00 frames. ], tot_loss[loss=2.997, NarTop10Accuracy=0.7259, over 5993.16 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:28:38,909 INFO [trainer.py:765] (3/8) Epoch 40, batch 1900, train_loss[loss=3.114, NarTop10Accuracy=0.7006, over 5964.00 frames. ], tot_loss[loss=3.003, NarTop10Accuracy=0.7247, over 6028.79 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:04,444 INFO [trainer.py:765] (3/8) Epoch 40, batch 2000, train_loss[loss=3.529, NarTop10Accuracy=0.6164, over 5595.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7232, over 6000.07 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:29,750 INFO [trainer.py:765] (3/8) Epoch 40, batch 2100, train_loss[loss=2.75, NarTop10Accuracy=0.7702, over 4011.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7232, over 5980.47 frames. ], batch size: 4, lr: 2.11e-03 +2024-08-06 23:29:54,939 INFO [trainer.py:765] (3/8) Epoch 40, batch 2200, train_loss[loss=3.225, NarTop10Accuracy=0.6751, over 7080.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7202, over 6012.18 frames. ], batch size: 31, lr: 2.11e-03 +2024-08-06 23:30:20,012 INFO [trainer.py:765] (3/8) Epoch 40, batch 2300, train_loss[loss=2.856, NarTop10Accuracy=0.7473, over 5622.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7189, over 6024.47 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 23:30:44,295 INFO [trainer.py:765] (3/8) Epoch 40, batch 2400, train_loss[loss=2.692, NarTop10Accuracy=0.7959, over 5097.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7205, over 5757.58 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:07,738 INFO [trainer.py:765] (3/8) Epoch 40, batch 2500, train_loss[loss=3.073, NarTop10Accuracy=0.7086, over 5322.00 frames. ], tot_loss[loss=2.984, NarTop10Accuracy=0.7283, over 5476.77 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:27,485 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 23:31:27,488 INFO [trainer.py:1069] (3/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-14-23-41-4 b/libritts-r/log/log-train-2024-08-06-14-23-41-4 new file mode 100644 index 0000000000000000000000000000000000000000..449125be24d4d901431239d2f0ca254988438073 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-14-23-41-4 @@ -0,0 +1,1260 @@ +2024-08-06 14:23:41,788 INFO [trainer.py:870] (4/8) Training started +2024-08-06 14:23:41,789 INFO [trainer.py:889] (4/8) Device: cuda:4 +2024-08-06 14:23:41,789 INFO [trainer.py:890] (4/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 100000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 14:23:41,789 INFO [trainer.py:892] (4/8) About to create model +2024-08-06 14:23:42,584 INFO [trainer.py:899] (4/8) Number of model parameters: 367386628 +2024-08-06 14:23:42,585 INFO [checkpoint.py:112] (4/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 14:23:47,587 INFO [trainer.py:914] (4/8) Using DDP +2024-08-06 14:23:49,643 INFO [datamodule.py:427] (4/8) About to get train cuts +2024-08-06 14:23:49,645 INFO [datamodule.py:434] (4/8) About to get dev cuts +2024-08-06 14:23:49,646 INFO [datamodule.py:292] (4/8) Disable SpecAugment +2024-08-06 14:23:49,646 INFO [datamodule.py:294] (4/8) About to create train dataset +2024-08-06 14:23:49,647 INFO [datamodule.py:323] (4/8) Using DynamicBucketingSampler +2024-08-06 14:23:50,269 INFO [datamodule.py:344] (4/8) About to create train dataloader +2024-08-06 14:23:50,270 INFO [datamodule.py:367] (4/8) About to create dev dataset +2024-08-06 14:23:50,602 INFO [datamodule.py:388] (4/8) About to create dev dataloader +2024-08-06 14:24:38,249 INFO [trainer.py:765] (4/8) Epoch 1, batch 100, train_loss[loss=106.9, NarTop10Accuracy=0.02083, over 7263.00 frames. ], tot_loss[loss=74.22, NarTop10Accuracy=0.04496, over 2352.16 frames. ], batch size: 31, lr: 2.25e-02 +2024-08-06 14:25:07,518 INFO [trainer.py:765] (4/8) Epoch 1, batch 200, train_loss[loss=133.9, NarTop10Accuracy=0.01548, over 6891.00 frames. ], tot_loss[loss=97.68, NarTop10Accuracy=0.04142, over 3862.76 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 14:25:37,111 INFO [trainer.py:765] (4/8) Epoch 1, batch 300, train_loss[loss=99.71, NarTop10Accuracy=0.02296, over 7125.00 frames. ], tot_loss[loss=85.35, NarTop10Accuracy=0.04282, over 4657.50 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 14:26:07,482 INFO [trainer.py:765] (4/8) Epoch 1, batch 400, train_loss[loss=53.17, NarTop10Accuracy=0.02305, over 5133.00 frames. ], tot_loss[loss=67.96, NarTop10Accuracy=0.04728, over 5101.12 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 14:26:35,357 INFO [trainer.py:765] (4/8) Epoch 1, batch 500, train_loss[loss=14.77, NarTop10Accuracy=0.02519, over 6039.00 frames. ], tot_loss[loss=49.13, NarTop10Accuracy=0.05014, over 5373.17 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 14:27:04,000 INFO [trainer.py:765] (4/8) Epoch 1, batch 600, train_loss[loss=6.172, NarTop10Accuracy=0.1872, over 5658.00 frames. ], tot_loss[loss=33.43, NarTop10Accuracy=0.05587, over 5653.66 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 14:27:39,490 INFO [trainer.py:765] (4/8) Epoch 1, batch 700, train_loss[loss=6.809, NarTop10Accuracy=0.1082, over 5073.00 frames. ], tot_loss[loss=23.38, NarTop10Accuracy=0.06459, over 5734.64 frames. ], batch size: 6, lr: 2.99e-02 +2024-08-06 14:28:08,832 INFO [trainer.py:765] (4/8) Epoch 1, batch 800, train_loss[loss=6.48, NarTop10Accuracy=0.1294, over 4425.00 frames. ], tot_loss[loss=17.14, NarTop10Accuracy=0.08496, over 5790.91 frames. ], batch size: 5, lr: 2.98e-02 +2024-08-06 14:28:36,758 INFO [trainer.py:765] (4/8) Epoch 1, batch 900, train_loss[loss=5.851, NarTop10Accuracy=0.1589, over 6615.00 frames. ], tot_loss[loss=12.79, NarTop10Accuracy=0.113, over 5800.33 frames. ], batch size: 14, lr: 2.98e-02 +2024-08-06 14:29:12,586 INFO [trainer.py:765] (4/8) Epoch 1, batch 1000, train_loss[loss=5.578, NarTop10Accuracy=0.2194, over 6165.00 frames. ], tot_loss[loss=10.11, NarTop10Accuracy=0.1343, over 5888.51 frames. ], batch size: 13, lr: 2.97e-02 +2024-08-06 14:29:42,825 INFO [trainer.py:765] (4/8) Epoch 1, batch 1100, train_loss[loss=5.681, NarTop10Accuracy=0.196, over 6762.00 frames. ], tot_loss[loss=8.423, NarTop10Accuracy=0.1542, over 5904.54 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 14:30:11,468 INFO [trainer.py:765] (4/8) Epoch 1, batch 1200, train_loss[loss=5.958, NarTop10Accuracy=0.1501, over 7260.00 frames. ], tot_loss[loss=7.351, NarTop10Accuracy=0.1711, over 5918.74 frames. ], batch size: 31, lr: 2.96e-02 +2024-08-06 14:30:48,747 INFO [trainer.py:765] (4/8) Epoch 1, batch 1300, train_loss[loss=5.341, NarTop10Accuracy=0.2774, over 5139.00 frames. ], tot_loss[loss=6.679, NarTop10Accuracy=0.1872, over 5989.25 frames. ], batch size: 6, lr: 2.95e-02 +2024-08-06 14:31:18,143 INFO [trainer.py:765] (4/8) Epoch 1, batch 1400, train_loss[loss=5.591, NarTop10Accuracy=0.2127, over 6189.00 frames. ], tot_loss[loss=6.251, NarTop10Accuracy=0.198, over 6004.68 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 14:31:46,026 INFO [trainer.py:765] (4/8) Epoch 1, batch 1500, train_loss[loss=5.788, NarTop10Accuracy=0.1813, over 6327.00 frames. ], tot_loss[loss=5.965, NarTop10Accuracy=0.2105, over 5950.52 frames. ], batch size: 50, lr: 2.94e-02 +2024-08-06 14:32:13,691 INFO [trainer.py:765] (4/8) Epoch 1, batch 1600, train_loss[loss=5.667, NarTop10Accuracy=0.1998, over 7182.00 frames. ], tot_loss[loss=5.791, NarTop10Accuracy=0.2177, over 5924.47 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 14:32:40,198 INFO [trainer.py:765] (4/8) Epoch 1, batch 1700, train_loss[loss=5.231, NarTop10Accuracy=0.2883, over 6195.00 frames. ], tot_loss[loss=5.667, NarTop10Accuracy=0.2255, over 5920.15 frames. ], batch size: 13, lr: 2.92e-02 +2024-08-06 14:33:06,499 INFO [trainer.py:765] (4/8) Epoch 1, batch 1800, train_loss[loss=5.481, NarTop10Accuracy=0.2219, over 7254.00 frames. ], tot_loss[loss=5.575, NarTop10Accuracy=0.2331, over 5977.88 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 14:33:32,625 INFO [trainer.py:765] (4/8) Epoch 1, batch 1900, train_loss[loss=5.716, NarTop10Accuracy=0.1903, over 6150.00 frames. ], tot_loss[loss=5.51, NarTop10Accuracy=0.2405, over 6021.45 frames. ], batch size: 50, lr: 2.90e-02 +2024-08-06 14:33:58,014 INFO [trainer.py:765] (4/8) Epoch 1, batch 2000, train_loss[loss=5.56, NarTop10Accuracy=0.2315, over 6456.00 frames. ], tot_loss[loss=5.451, NarTop10Accuracy=0.249, over 6014.64 frames. ], batch size: 50, lr: 2.89e-02 +2024-08-06 14:33:58,015 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 14:34:06,103 INFO [trainer.py:811] (4/8) Epoch 1, validation: loss=5.397, NarTop10Accuracy=0.2581, over 1905321.00 frames. +2024-08-06 14:34:06,104 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 26462MB +2024-08-06 14:34:06,612 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 4.749e+01 2.278e+02 7.300e+02 1.664e+04 7.177e+05, threshold=1.460e+03, percent-clipped=0.0 +2024-08-06 14:34:32,061 INFO [trainer.py:765] (4/8) Epoch 1, batch 2100, train_loss[loss=5.187, NarTop10Accuracy=0.2958, over 4017.00 frames. ], tot_loss[loss=5.385, NarTop10Accuracy=0.2598, over 5979.06 frames. ], batch size: 4, lr: 2.88e-02 +2024-08-06 14:34:57,303 INFO [trainer.py:765] (4/8) Epoch 1, batch 2200, train_loss[loss=5.44, NarTop10Accuracy=0.2485, over 7404.00 frames. ], tot_loss[loss=5.346, NarTop10Accuracy=0.2657, over 6006.35 frames. ], batch size: 32, lr: 2.87e-02 +2024-08-06 14:35:22,455 INFO [trainer.py:765] (4/8) Epoch 1, batch 2300, train_loss[loss=5.326, NarTop10Accuracy=0.2631, over 5673.00 frames. ], tot_loss[loss=5.339, NarTop10Accuracy=0.2665, over 6024.02 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 14:35:46,815 INFO [trainer.py:765] (4/8) Epoch 1, batch 2400, train_loss[loss=5.406, NarTop10Accuracy=0.2526, over 5178.00 frames. ], tot_loss[loss=5.284, NarTop10Accuracy=0.2769, over 5766.88 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 14:36:10,408 INFO [trainer.py:765] (4/8) Epoch 1, batch 2500, train_loss[loss=5.052, NarTop10Accuracy=0.3257, over 5352.00 frames. ], tot_loss[loss=5.217, NarTop10Accuracy=0.2886, over 5482.39 frames. ], batch size: 7, lr: 2.84e-02 +2024-08-06 14:36:31,007 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 14:37:29,669 INFO [trainer.py:765] (4/8) Epoch 2, batch 100, train_loss[loss=4.983, NarTop10Accuracy=0.3321, over 7494.00 frames. ], tot_loss[loss=5.187, NarTop10Accuracy=0.2957, over 2365.70 frames. ], batch size: 32, lr: 2.77e-02 +2024-08-06 14:38:10,014 INFO [trainer.py:765] (4/8) Epoch 2, batch 200, train_loss[loss=5.135, NarTop10Accuracy=0.3065, over 6768.00 frames. ], tot_loss[loss=5.159, NarTop10Accuracy=0.2999, over 3860.07 frames. ], batch size: 17, lr: 2.76e-02 +2024-08-06 14:38:38,297 INFO [trainer.py:765] (4/8) Epoch 2, batch 300, train_loss[loss=5.184, NarTop10Accuracy=0.2962, over 7026.00 frames. ], tot_loss[loss=5.137, NarTop10Accuracy=0.3036, over 4663.32 frames. ], batch size: 22, lr: 2.75e-02 +2024-08-06 14:39:06,998 INFO [trainer.py:765] (4/8) Epoch 2, batch 400, train_loss[loss=5.036, NarTop10Accuracy=0.3214, over 5193.00 frames. ], tot_loss[loss=5.115, NarTop10Accuracy=0.3071, over 5110.12 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 14:39:46,118 INFO [trainer.py:765] (4/8) Epoch 2, batch 500, train_loss[loss=4.872, NarTop10Accuracy=0.3493, over 6105.00 frames. ], tot_loss[loss=5.076, NarTop10Accuracy=0.315, over 5391.47 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 14:40:15,082 INFO [trainer.py:765] (4/8) Epoch 2, batch 600, train_loss[loss=4.753, NarTop10Accuracy=0.3896, over 5706.00 frames. ], tot_loss[loss=5.052, NarTop10Accuracy=0.3197, over 5655.03 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 14:40:44,589 INFO [trainer.py:765] (4/8) Epoch 2, batch 700, train_loss[loss=5.025, NarTop10Accuracy=0.3314, over 5022.00 frames. ], tot_loss[loss=5.036, NarTop10Accuracy=0.3225, over 5732.88 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 14:41:24,512 INFO [trainer.py:765] (4/8) Epoch 2, batch 800, train_loss[loss=4.941, NarTop10Accuracy=0.3415, over 4290.00 frames. ], tot_loss[loss=5.021, NarTop10Accuracy=0.3249, over 5771.04 frames. ], batch size: 5, lr: 2.69e-02 +2024-08-06 14:41:54,404 INFO [trainer.py:765] (4/8) Epoch 2, batch 900, train_loss[loss=4.726, NarTop10Accuracy=0.3778, over 6720.00 frames. ], tot_loss[loss=4.985, NarTop10Accuracy=0.3316, over 5794.38 frames. ], batch size: 14, lr: 2.68e-02 +2024-08-06 14:42:23,901 INFO [trainer.py:765] (4/8) Epoch 2, batch 1000, train_loss[loss=4.831, NarTop10Accuracy=0.3599, over 6669.00 frames. ], tot_loss[loss=4.952, NarTop10Accuracy=0.3377, over 5893.94 frames. ], batch size: 14, lr: 2.66e-02 +2024-08-06 14:42:56,254 INFO [trainer.py:765] (4/8) Epoch 2, batch 1100, train_loss[loss=4.867, NarTop10Accuracy=0.3562, over 6915.00 frames. ], tot_loss[loss=4.934, NarTop10Accuracy=0.3413, over 5922.75 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 14:43:35,186 INFO [trainer.py:765] (4/8) Epoch 2, batch 1200, train_loss[loss=4.714, NarTop10Accuracy=0.387, over 7470.00 frames. ], tot_loss[loss=4.909, NarTop10Accuracy=0.3461, over 5906.24 frames. ], batch size: 31, lr: 2.64e-02 +2024-08-06 14:44:04,345 INFO [trainer.py:765] (4/8) Epoch 2, batch 1300, train_loss[loss=4.845, NarTop10Accuracy=0.3592, over 4905.00 frames. ], tot_loss[loss=4.869, NarTop10Accuracy=0.3533, over 5992.07 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 14:44:33,728 INFO [trainer.py:765] (4/8) Epoch 2, batch 1400, train_loss[loss=5.029, NarTop10Accuracy=0.3247, over 6120.00 frames. ], tot_loss[loss=4.855, NarTop10Accuracy=0.3564, over 6016.47 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 14:44:40,441 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 14:44:48,506 INFO [trainer.py:811] (4/8) Epoch 2, validation: loss=4.808, NarTop10Accuracy=0.3642, over 1905321.00 frames. +2024-08-06 14:44:48,506 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27356MB +2024-08-06 14:44:49,204 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 6.328e+01 1.178e+02 1.410e+02 1.789e+02 6.269e+02, threshold=2.821e+02, percent-clipped=0.0 +2024-08-06 14:45:09,806 INFO [trainer.py:765] (4/8) Epoch 2, batch 1500, train_loss[loss=4.779, NarTop10Accuracy=0.372, over 6327.00 frames. ], tot_loss[loss=4.831, NarTop10Accuracy=0.3606, over 5943.92 frames. ], batch size: 51, lr: 2.60e-02 +2024-08-06 14:45:37,660 INFO [trainer.py:765] (4/8) Epoch 2, batch 1600, train_loss[loss=4.765, NarTop10Accuracy=0.3742, over 7113.00 frames. ], tot_loss[loss=4.8, NarTop10Accuracy=0.3666, over 5921.48 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 14:46:04,368 INFO [trainer.py:765] (4/8) Epoch 2, batch 1700, train_loss[loss=4.756, NarTop10Accuracy=0.372, over 6198.00 frames. ], tot_loss[loss=4.789, NarTop10Accuracy=0.3691, over 5899.56 frames. ], batch size: 13, lr: 2.58e-02 +2024-08-06 14:46:31,033 INFO [trainer.py:765] (4/8) Epoch 2, batch 1800, train_loss[loss=4.78, NarTop10Accuracy=0.3722, over 6996.00 frames. ], tot_loss[loss=4.773, NarTop10Accuracy=0.372, over 5988.57 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 14:46:57,531 INFO [trainer.py:765] (4/8) Epoch 2, batch 1900, train_loss[loss=4.739, NarTop10Accuracy=0.3737, over 6135.00 frames. ], tot_loss[loss=4.752, NarTop10Accuracy=0.3759, over 6027.10 frames. ], batch size: 53, lr: 2.55e-02 +2024-08-06 14:47:23,233 INFO [trainer.py:765] (4/8) Epoch 2, batch 2000, train_loss[loss=4.75, NarTop10Accuracy=0.3835, over 5940.00 frames. ], tot_loss[loss=4.724, NarTop10Accuracy=0.3812, over 6007.84 frames. ], batch size: 50, lr: 2.54e-02 +2024-08-06 14:47:48,588 INFO [trainer.py:765] (4/8) Epoch 2, batch 2100, train_loss[loss=4.823, NarTop10Accuracy=0.3596, over 4026.00 frames. ], tot_loss[loss=4.712, NarTop10Accuracy=0.3832, over 5972.64 frames. ], batch size: 4, lr: 2.53e-02 +2024-08-06 14:48:13,764 INFO [trainer.py:765] (4/8) Epoch 2, batch 2200, train_loss[loss=4.647, NarTop10Accuracy=0.3897, over 7305.00 frames. ], tot_loss[loss=4.672, NarTop10Accuracy=0.3908, over 6015.55 frames. ], batch size: 31, lr: 2.51e-02 +2024-08-06 14:48:38,951 INFO [trainer.py:765] (4/8) Epoch 2, batch 2300, train_loss[loss=4.859, NarTop10Accuracy=0.3563, over 5718.00 frames. ], tot_loss[loss=4.678, NarTop10Accuracy=0.3894, over 6013.04 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 14:49:03,319 INFO [trainer.py:765] (4/8) Epoch 2, batch 2400, train_loss[loss=4.334, NarTop10Accuracy=0.4646, over 5088.00 frames. ], tot_loss[loss=4.647, NarTop10Accuracy=0.3954, over 5779.63 frames. ], batch size: 7, lr: 2.49e-02 +2024-08-06 14:49:26,867 INFO [trainer.py:765] (4/8) Epoch 2, batch 2500, train_loss[loss=4.633, NarTop10Accuracy=0.3957, over 5151.00 frames. ], tot_loss[loss=4.619, NarTop10Accuracy=0.4008, over 5484.17 frames. ], batch size: 7, lr: 2.48e-02 +2024-08-06 14:49:46,802 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 14:50:51,117 INFO [trainer.py:765] (4/8) Epoch 3, batch 100, train_loss[loss=4.654, NarTop10Accuracy=0.385, over 7356.00 frames. ], tot_loss[loss=4.586, NarTop10Accuracy=0.4076, over 2369.28 frames. ], batch size: 31, lr: 2.36e-02 +2024-08-06 14:51:20,387 INFO [trainer.py:765] (4/8) Epoch 3, batch 200, train_loss[loss=4.603, NarTop10Accuracy=0.3984, over 6894.00 frames. ], tot_loss[loss=4.549, NarTop10Accuracy=0.4148, over 3862.32 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 14:51:50,954 INFO [trainer.py:765] (4/8) Epoch 3, batch 300, train_loss[loss=4.699, NarTop10Accuracy=0.3826, over 7017.00 frames. ], tot_loss[loss=4.526, NarTop10Accuracy=0.4192, over 4665.53 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 14:52:32,359 INFO [trainer.py:765] (4/8) Epoch 3, batch 400, train_loss[loss=4.545, NarTop10Accuracy=0.4116, over 5163.00 frames. ], tot_loss[loss=4.497, NarTop10Accuracy=0.4252, over 5098.38 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 14:53:00,680 INFO [trainer.py:765] (4/8) Epoch 3, batch 500, train_loss[loss=4.423, NarTop10Accuracy=0.4433, over 6069.00 frames. ], tot_loss[loss=4.491, NarTop10Accuracy=0.4262, over 5373.12 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 14:53:29,551 INFO [trainer.py:765] (4/8) Epoch 3, batch 600, train_loss[loss=4.282, NarTop10Accuracy=0.4664, over 5613.00 frames. ], tot_loss[loss=4.478, NarTop10Accuracy=0.429, over 5644.85 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 14:54:12,466 INFO [trainer.py:765] (4/8) Epoch 3, batch 700, train_loss[loss=4.441, NarTop10Accuracy=0.4389, over 5016.00 frames. ], tot_loss[loss=4.45, NarTop10Accuracy=0.4347, over 5721.70 frames. ], batch size: 6, lr: 2.29e-02 +2024-08-06 14:54:44,785 INFO [trainer.py:765] (4/8) Epoch 3, batch 800, train_loss[loss=4.153, NarTop10Accuracy=0.4893, over 5031.00 frames. ], tot_loss[loss=4.427, NarTop10Accuracy=0.4391, over 5774.85 frames. ], batch size: 6, lr: 2.28e-02 +2024-08-06 14:54:58,684 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 14:55:06,655 INFO [trainer.py:811] (4/8) Epoch 3, validation: loss=4.276, NarTop10Accuracy=0.4689, over 1905321.00 frames. +2024-08-06 14:55:06,656 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27356MB +2024-08-06 14:55:07,183 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 8.443e+01 1.396e+02 1.639e+02 2.017e+02 7.124e+02, threshold=3.277e+02, percent-clipped=4.5 +2024-08-06 14:55:21,052 INFO [trainer.py:765] (4/8) Epoch 3, batch 900, train_loss[loss=4.153, NarTop10Accuracy=0.4938, over 6390.00 frames. ], tot_loss[loss=4.391, NarTop10Accuracy=0.4461, over 5785.87 frames. ], batch size: 13, lr: 2.26e-02 +2024-08-06 14:56:04,958 INFO [trainer.py:765] (4/8) Epoch 3, batch 1000, train_loss[loss=4.124, NarTop10Accuracy=0.5011, over 6243.00 frames. ], tot_loss[loss=4.366, NarTop10Accuracy=0.4511, over 5879.98 frames. ], batch size: 13, lr: 2.25e-02 +2024-08-06 14:56:37,301 INFO [trainer.py:765] (4/8) Epoch 3, batch 1100, train_loss[loss=4.422, NarTop10Accuracy=0.4279, over 6579.00 frames. ], tot_loss[loss=4.348, NarTop10Accuracy=0.4544, over 5918.50 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 14:57:06,377 INFO [trainer.py:765] (4/8) Epoch 3, batch 1200, train_loss[loss=4.308, NarTop10Accuracy=0.4475, over 7182.00 frames. ], tot_loss[loss=4.331, NarTop10Accuracy=0.4576, over 5919.47 frames. ], batch size: 31, lr: 2.23e-02 +2024-08-06 14:57:51,631 INFO [trainer.py:765] (4/8) Epoch 3, batch 1300, train_loss[loss=4.112, NarTop10Accuracy=0.5039, over 5100.00 frames. ], tot_loss[loss=4.304, NarTop10Accuracy=0.4631, over 5994.55 frames. ], batch size: 6, lr: 2.22e-02 +2024-08-06 14:58:22,900 INFO [trainer.py:765] (4/8) Epoch 3, batch 1400, train_loss[loss=4.1, NarTop10Accuracy=0.5005, over 6117.00 frames. ], tot_loss[loss=4.294, NarTop10Accuracy=0.4647, over 6021.33 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 14:58:50,855 INFO [trainer.py:765] (4/8) Epoch 3, batch 1500, train_loss[loss=4.339, NarTop10Accuracy=0.4654, over 5736.00 frames. ], tot_loss[loss=4.27, NarTop10Accuracy=0.4694, over 5948.51 frames. ], batch size: 50, lr: 2.20e-02 +2024-08-06 14:59:18,715 INFO [trainer.py:765] (4/8) Epoch 3, batch 1600, train_loss[loss=3.932, NarTop10Accuracy=0.5352, over 7194.00 frames. ], tot_loss[loss=4.253, NarTop10Accuracy=0.4725, over 5942.22 frames. ], batch size: 22, lr: 2.19e-02 +2024-08-06 14:59:45,953 INFO [trainer.py:765] (4/8) Epoch 3, batch 1700, train_loss[loss=4.044, NarTop10Accuracy=0.5174, over 6528.00 frames. ], tot_loss[loss=4.232, NarTop10Accuracy=0.4766, over 5922.57 frames. ], batch size: 14, lr: 2.18e-02 +2024-08-06 15:00:12,498 INFO [trainer.py:765] (4/8) Epoch 3, batch 1800, train_loss[loss=4.041, NarTop10Accuracy=0.5179, over 7068.00 frames. ], tot_loss[loss=4.215, NarTop10Accuracy=0.4801, over 5986.63 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 15:00:38,949 INFO [trainer.py:765] (4/8) Epoch 3, batch 1900, train_loss[loss=4.624, NarTop10Accuracy=0.3976, over 5904.00 frames. ], tot_loss[loss=4.197, NarTop10Accuracy=0.4843, over 6024.64 frames. ], batch size: 52, lr: 2.16e-02 +2024-08-06 15:01:04,606 INFO [trainer.py:765] (4/8) Epoch 3, batch 2000, train_loss[loss=4.521, NarTop10Accuracy=0.4144, over 5823.00 frames. ], tot_loss[loss=4.17, NarTop10Accuracy=0.4895, over 5988.92 frames. ], batch size: 50, lr: 2.15e-02 +2024-08-06 15:01:29,899 INFO [trainer.py:765] (4/8) Epoch 3, batch 2100, train_loss[loss=3.928, NarTop10Accuracy=0.5386, over 4803.00 frames. ], tot_loss[loss=4.148, NarTop10Accuracy=0.4935, over 5967.62 frames. ], batch size: 5, lr: 2.14e-02 +2024-08-06 15:01:55,182 INFO [trainer.py:765] (4/8) Epoch 3, batch 2200, train_loss[loss=3.961, NarTop10Accuracy=0.5316, over 7011.00 frames. ], tot_loss[loss=4.122, NarTop10Accuracy=0.4991, over 6012.29 frames. ], batch size: 31, lr: 2.13e-02 +2024-08-06 15:02:20,410 INFO [trainer.py:765] (4/8) Epoch 3, batch 2300, train_loss[loss=4.458, NarTop10Accuracy=0.4342, over 5721.00 frames. ], tot_loss[loss=4.131, NarTop10Accuracy=0.4975, over 6027.00 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 15:02:44,664 INFO [trainer.py:765] (4/8) Epoch 3, batch 2400, train_loss[loss=4.152, NarTop10Accuracy=0.4884, over 5229.00 frames. ], tot_loss[loss=4.103, NarTop10Accuracy=0.5032, over 5756.56 frames. ], batch size: 7, lr: 2.11e-02 +2024-08-06 15:03:08,234 INFO [trainer.py:765] (4/8) Epoch 3, batch 2500, train_loss[loss=3.853, NarTop10Accuracy=0.5569, over 5118.00 frames. ], tot_loss[loss=4.048, NarTop10Accuracy=0.5147, over 5479.20 frames. ], batch size: 7, lr: 2.10e-02 +2024-08-06 15:03:28,349 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 15:04:28,130 INFO [trainer.py:765] (4/8) Epoch 4, batch 100, train_loss[loss=3.878, NarTop10Accuracy=0.5446, over 7209.00 frames. ], tot_loss[loss=4.035, NarTop10Accuracy=0.517, over 2366.50 frames. ], batch size: 31, lr: 1.97e-02 +2024-08-06 15:04:59,841 INFO [trainer.py:765] (4/8) Epoch 4, batch 200, train_loss[loss=3.821, NarTop10Accuracy=0.5554, over 6888.00 frames. ], tot_loss[loss=4.005, NarTop10Accuracy=0.5231, over 3859.55 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 15:05:27,508 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 15:05:35,694 INFO [trainer.py:811] (4/8) Epoch 4, validation: loss=3.804, NarTop10Accuracy=0.5644, over 1905321.00 frames. +2024-08-06 15:05:35,695 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27356MB +2024-08-06 15:05:36,238 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.765e+02 1.975e+02 2.270e+02 5.852e+02, threshold=3.949e+02, percent-clipped=2.8 +2024-08-06 15:05:43,889 INFO [trainer.py:765] (4/8) Epoch 4, batch 300, train_loss[loss=3.868, NarTop10Accuracy=0.5582, over 7239.00 frames. ], tot_loss[loss=3.992, NarTop10Accuracy=0.5258, over 4657.70 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 15:06:16,124 INFO [trainer.py:765] (4/8) Epoch 4, batch 400, train_loss[loss=3.819, NarTop10Accuracy=0.5639, over 5349.00 frames. ], tot_loss[loss=4.005, NarTop10Accuracy=0.5234, over 5115.90 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 15:06:46,473 INFO [trainer.py:765] (4/8) Epoch 4, batch 500, train_loss[loss=4.156, NarTop10Accuracy=0.4833, over 5994.00 frames. ], tot_loss[loss=3.987, NarTop10Accuracy=0.5272, over 5401.97 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 15:07:23,818 INFO [trainer.py:765] (4/8) Epoch 4, batch 600, train_loss[loss=3.637, NarTop10Accuracy=0.5894, over 5694.00 frames. ], tot_loss[loss=3.974, NarTop10Accuracy=0.5295, over 5653.17 frames. ], batch size: 9, lr: 1.93e-02 +2024-08-06 15:07:59,001 INFO [trainer.py:765] (4/8) Epoch 4, batch 700, train_loss[loss=4.274, NarTop10Accuracy=0.4614, over 5136.00 frames. ], tot_loss[loss=3.974, NarTop10Accuracy=0.5296, over 5719.17 frames. ], batch size: 6, lr: 1.92e-02 +2024-08-06 15:08:32,430 INFO [trainer.py:765] (4/8) Epoch 4, batch 800, train_loss[loss=3.36, NarTop10Accuracy=0.6409, over 5175.00 frames. ], tot_loss[loss=3.96, NarTop10Accuracy=0.5323, over 5791.46 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 15:09:10,689 INFO [trainer.py:765] (4/8) Epoch 4, batch 900, train_loss[loss=3.627, NarTop10Accuracy=0.5981, over 6111.00 frames. ], tot_loss[loss=3.927, NarTop10Accuracy=0.5393, over 5786.61 frames. ], batch size: 13, lr: 1.90e-02 +2024-08-06 15:09:46,075 INFO [trainer.py:765] (4/8) Epoch 4, batch 1000, train_loss[loss=3.636, NarTop10Accuracy=0.592, over 6237.00 frames. ], tot_loss[loss=3.921, NarTop10Accuracy=0.5404, over 5895.13 frames. ], batch size: 13, lr: 1.89e-02 +2024-08-06 15:10:18,139 INFO [trainer.py:765] (4/8) Epoch 4, batch 1100, train_loss[loss=3.882, NarTop10Accuracy=0.5455, over 6867.00 frames. ], tot_loss[loss=3.907, NarTop10Accuracy=0.5435, over 5934.97 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 15:10:55,075 INFO [trainer.py:765] (4/8) Epoch 4, batch 1200, train_loss[loss=4.31, NarTop10Accuracy=0.4531, over 7113.00 frames. ], tot_loss[loss=3.904, NarTop10Accuracy=0.5438, over 5932.64 frames. ], batch size: 31, lr: 1.88e-02 +2024-08-06 15:11:32,074 INFO [trainer.py:765] (4/8) Epoch 4, batch 1300, train_loss[loss=3.512, NarTop10Accuracy=0.6291, over 5055.00 frames. ], tot_loss[loss=3.865, NarTop10Accuracy=0.5517, over 5992.71 frames. ], batch size: 6, lr: 1.87e-02 +2024-08-06 15:12:05,688 INFO [trainer.py:765] (4/8) Epoch 4, batch 1400, train_loss[loss=3.67, NarTop10Accuracy=0.5999, over 6057.00 frames. ], tot_loss[loss=3.856, NarTop10Accuracy=0.5538, over 5999.72 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 15:12:33,695 INFO [trainer.py:765] (4/8) Epoch 4, batch 1500, train_loss[loss=3.845, NarTop10Accuracy=0.5672, over 5805.00 frames. ], tot_loss[loss=3.86, NarTop10Accuracy=0.5529, over 5943.15 frames. ], batch size: 50, lr: 1.85e-02 +2024-08-06 15:13:01,510 INFO [trainer.py:765] (4/8) Epoch 4, batch 1600, train_loss[loss=3.812, NarTop10Accuracy=0.5678, over 7086.00 frames. ], tot_loss[loss=3.852, NarTop10Accuracy=0.5546, over 5937.46 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 15:13:28,132 INFO [trainer.py:765] (4/8) Epoch 4, batch 1700, train_loss[loss=3.656, NarTop10Accuracy=0.595, over 6597.00 frames. ], tot_loss[loss=3.828, NarTop10Accuracy=0.5593, over 5915.58 frames. ], batch size: 14, lr: 1.84e-02 +2024-08-06 15:13:54,558 INFO [trainer.py:765] (4/8) Epoch 4, batch 1800, train_loss[loss=3.932, NarTop10Accuracy=0.5465, over 7134.00 frames. ], tot_loss[loss=3.827, NarTop10Accuracy=0.5594, over 5974.57 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 15:14:20,998 INFO [trainer.py:765] (4/8) Epoch 4, batch 1900, train_loss[loss=3.787, NarTop10Accuracy=0.5686, over 5676.00 frames. ], tot_loss[loss=3.848, NarTop10Accuracy=0.5552, over 6021.45 frames. ], batch size: 50, lr: 1.82e-02 +2024-08-06 15:14:46,671 INFO [trainer.py:765] (4/8) Epoch 4, batch 2000, train_loss[loss=3.72, NarTop10Accuracy=0.5847, over 5940.00 frames. ], tot_loss[loss=3.821, NarTop10Accuracy=0.5608, over 6000.89 frames. ], batch size: 51, lr: 1.81e-02 +2024-08-06 15:15:11,858 INFO [trainer.py:765] (4/8) Epoch 4, batch 2100, train_loss[loss=3.593, NarTop10Accuracy=0.5992, over 4773.00 frames. ], tot_loss[loss=3.812, NarTop10Accuracy=0.562, over 5982.67 frames. ], batch size: 5, lr: 1.81e-02 +2024-08-06 15:15:37,089 INFO [trainer.py:765] (4/8) Epoch 4, batch 2200, train_loss[loss=3.774, NarTop10Accuracy=0.5774, over 7401.00 frames. ], tot_loss[loss=3.802, NarTop10Accuracy=0.564, over 6016.10 frames. ], batch size: 31, lr: 1.80e-02 +2024-08-06 15:15:55,089 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 15:16:03,243 INFO [trainer.py:811] (4/8) Epoch 4, validation: loss=3.665, NarTop10Accuracy=0.5912, over 1905321.00 frames. +2024-08-06 15:16:03,243 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27356MB +2024-08-06 15:16:03,741 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.414e+02 1.889e+02 2.096e+02 2.369e+02 1.168e+03, threshold=4.192e+02, percent-clipped=1.7 +2024-08-06 15:16:10,347 INFO [trainer.py:765] (4/8) Epoch 4, batch 2300, train_loss[loss=3.645, NarTop10Accuracy=0.5987, over 5703.00 frames. ], tot_loss[loss=3.814, NarTop10Accuracy=0.5618, over 6029.16 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 15:16:34,840 INFO [trainer.py:765] (4/8) Epoch 4, batch 2400, train_loss[loss=3.313, NarTop10Accuracy=0.6544, over 5088.00 frames. ], tot_loss[loss=3.78, NarTop10Accuracy=0.5684, over 5783.26 frames. ], batch size: 7, lr: 1.79e-02 +2024-08-06 15:16:58,535 INFO [trainer.py:765] (4/8) Epoch 4, batch 2500, train_loss[loss=3.431, NarTop10Accuracy=0.6503, over 5121.00 frames. ], tot_loss[loss=3.767, NarTop10Accuracy=0.5714, over 5489.80 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 15:17:18,371 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 15:18:24,100 INFO [trainer.py:765] (4/8) Epoch 5, batch 100, train_loss[loss=3.562, NarTop10Accuracy=0.6205, over 7200.00 frames. ], tot_loss[loss=3.778, NarTop10Accuracy=0.5692, over 2371.91 frames. ], batch size: 31, lr: 1.66e-02 +2024-08-06 15:18:59,675 INFO [trainer.py:765] (4/8) Epoch 5, batch 200, train_loss[loss=4.033, NarTop10Accuracy=0.5126, over 7221.00 frames. ], tot_loss[loss=3.759, NarTop10Accuracy=0.5733, over 3874.25 frames. ], batch size: 18, lr: 1.65e-02 +2024-08-06 15:19:32,888 INFO [trainer.py:765] (4/8) Epoch 5, batch 300, train_loss[loss=3.964, NarTop10Accuracy=0.5257, over 6888.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.5811, over 4675.45 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 15:20:01,656 INFO [trainer.py:765] (4/8) Epoch 5, batch 400, train_loss[loss=3.521, NarTop10Accuracy=0.621, over 5163.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.581, over 5123.18 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 15:20:38,299 INFO [trainer.py:765] (4/8) Epoch 5, batch 500, train_loss[loss=3.909, NarTop10Accuracy=0.5439, over 6174.00 frames. ], tot_loss[loss=3.734, NarTop10Accuracy=0.5779, over 5382.99 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 15:21:13,711 INFO [trainer.py:765] (4/8) Epoch 5, batch 600, train_loss[loss=3.833, NarTop10Accuracy=0.5501, over 5688.00 frames. ], tot_loss[loss=3.72, NarTop10Accuracy=0.5805, over 5653.92 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 15:21:45,881 INFO [trainer.py:765] (4/8) Epoch 5, batch 700, train_loss[loss=3.57, NarTop10Accuracy=0.6122, over 4212.00 frames. ], tot_loss[loss=3.716, NarTop10Accuracy=0.5815, over 5700.66 frames. ], batch size: 5, lr: 1.62e-02 +2024-08-06 15:22:24,498 INFO [trainer.py:765] (4/8) Epoch 5, batch 800, train_loss[loss=3.965, NarTop10Accuracy=0.5297, over 5022.00 frames. ], tot_loss[loss=3.707, NarTop10Accuracy=0.5835, over 5746.15 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 15:22:56,783 INFO [trainer.py:765] (4/8) Epoch 5, batch 900, train_loss[loss=3.512, NarTop10Accuracy=0.6159, over 6609.00 frames. ], tot_loss[loss=3.696, NarTop10Accuracy=0.5856, over 5775.64 frames. ], batch size: 14, lr: 1.61e-02 +2024-08-06 15:23:31,914 INFO [trainer.py:765] (4/8) Epoch 5, batch 1000, train_loss[loss=3.532, NarTop10Accuracy=0.6177, over 6321.00 frames. ], tot_loss[loss=3.68, NarTop10Accuracy=0.5886, over 5886.91 frames. ], batch size: 13, lr: 1.60e-02 +2024-08-06 15:24:09,572 INFO [trainer.py:765] (4/8) Epoch 5, batch 1100, train_loss[loss=3.539, NarTop10Accuracy=0.6228, over 6765.00 frames. ], tot_loss[loss=3.682, NarTop10Accuracy=0.589, over 5931.39 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 15:24:44,529 INFO [trainer.py:765] (4/8) Epoch 5, batch 1200, train_loss[loss=3.527, NarTop10Accuracy=0.6216, over 7326.00 frames. ], tot_loss[loss=3.685, NarTop10Accuracy=0.5881, over 5933.73 frames. ], batch size: 32, lr: 1.59e-02 +2024-08-06 15:25:19,380 INFO [trainer.py:765] (4/8) Epoch 5, batch 1300, train_loss[loss=3.979, NarTop10Accuracy=0.5253, over 4971.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.592, over 5989.61 frames. ], batch size: 6, lr: 1.59e-02 +2024-08-06 15:25:51,694 INFO [trainer.py:765] (4/8) Epoch 5, batch 1400, train_loss[loss=4.036, NarTop10Accuracy=0.5187, over 6027.00 frames. ], tot_loss[loss=3.675, NarTop10Accuracy=0.5902, over 6015.46 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 15:26:26,195 INFO [trainer.py:765] (4/8) Epoch 5, batch 1500, train_loss[loss=3.618, NarTop10Accuracy=0.6069, over 6102.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5911, over 5950.45 frames. ], batch size: 50, lr: 1.58e-02 +2024-08-06 15:26:54,130 INFO [trainer.py:765] (4/8) Epoch 5, batch 1600, train_loss[loss=3.356, NarTop10Accuracy=0.6533, over 7068.00 frames. ], tot_loss[loss=3.681, NarTop10Accuracy=0.589, over 5923.98 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 15:27:19,603 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 15:27:27,821 INFO [trainer.py:811] (4/8) Epoch 5, validation: loss=3.552, NarTop10Accuracy=0.6147, over 1905321.00 frames. +2024-08-06 15:27:27,822 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27356MB +2024-08-06 15:27:28,341 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.756e+02 1.962e+02 2.205e+02 5.880e+02, threshold=3.924e+02, percent-clipped=0.8 +2024-08-06 15:27:29,131 INFO [trainer.py:765] (4/8) Epoch 5, batch 1700, train_loss[loss=3.677, NarTop10Accuracy=0.5881, over 6720.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5908, over 5934.18 frames. ], batch size: 14, lr: 1.56e-02 +2024-08-06 15:27:55,652 INFO [trainer.py:765] (4/8) Epoch 5, batch 1800, train_loss[loss=3.956, NarTop10Accuracy=0.5262, over 7311.00 frames. ], tot_loss[loss=3.674, NarTop10Accuracy=0.5898, over 5978.16 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 15:28:22,172 INFO [trainer.py:765] (4/8) Epoch 5, batch 1900, train_loss[loss=3.709, NarTop10Accuracy=0.589, over 5844.00 frames. ], tot_loss[loss=3.679, NarTop10Accuracy=0.589, over 6018.65 frames. ], batch size: 52, lr: 1.55e-02 +2024-08-06 15:28:47,893 INFO [trainer.py:765] (4/8) Epoch 5, batch 2000, train_loss[loss=3.569, NarTop10Accuracy=0.6195, over 6192.00 frames. ], tot_loss[loss=3.677, NarTop10Accuracy=0.5893, over 5996.44 frames. ], batch size: 51, lr: 1.55e-02 +2024-08-06 15:29:13,769 INFO [trainer.py:765] (4/8) Epoch 5, batch 2100, train_loss[loss=3.383, NarTop10Accuracy=0.6587, over 3798.00 frames. ], tot_loss[loss=3.689, NarTop10Accuracy=0.5868, over 5973.32 frames. ], batch size: 4, lr: 1.54e-02 +2024-08-06 15:29:39,177 INFO [trainer.py:765] (4/8) Epoch 5, batch 2200, train_loss[loss=4.057, NarTop10Accuracy=0.5108, over 7401.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5916, over 6007.58 frames. ], batch size: 32, lr: 1.54e-02 +2024-08-06 15:30:04,430 INFO [trainer.py:765] (4/8) Epoch 5, batch 2300, train_loss[loss=3.246, NarTop10Accuracy=0.6714, over 5784.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5909, over 6014.75 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 15:30:28,862 INFO [trainer.py:765] (4/8) Epoch 5, batch 2400, train_loss[loss=3.383, NarTop10Accuracy=0.6529, over 5055.00 frames. ], tot_loss[loss=3.646, NarTop10Accuracy=0.5958, over 5773.10 frames. ], batch size: 7, lr: 1.53e-02 +2024-08-06 15:30:52,503 INFO [trainer.py:765] (4/8) Epoch 5, batch 2500, train_loss[loss=3.404, NarTop10Accuracy=0.6454, over 5151.00 frames. ], tot_loss[loss=3.609, NarTop10Accuracy=0.603, over 5465.51 frames. ], batch size: 7, lr: 1.52e-02 +2024-08-06 15:31:12,414 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 15:32:14,415 INFO [trainer.py:765] (4/8) Epoch 6, batch 100, train_loss[loss=3.405, NarTop10Accuracy=0.6416, over 7386.00 frames. ], tot_loss[loss=3.626, NarTop10Accuracy=0.6004, over 2386.72 frames. ], batch size: 32, lr: 1.42e-02 +2024-08-06 15:32:46,015 INFO [trainer.py:765] (4/8) Epoch 6, batch 200, train_loss[loss=4.047, NarTop10Accuracy=0.5003, over 6789.00 frames. ], tot_loss[loss=3.606, NarTop10Accuracy=0.6043, over 3859.23 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 15:33:21,243 INFO [trainer.py:765] (4/8) Epoch 6, batch 300, train_loss[loss=3.503, NarTop10Accuracy=0.6318, over 7152.00 frames. ], tot_loss[loss=3.601, NarTop10Accuracy=0.605, over 4654.32 frames. ], batch size: 22, lr: 1.41e-02 +2024-08-06 15:33:56,035 INFO [trainer.py:765] (4/8) Epoch 6, batch 400, train_loss[loss=3.453, NarTop10Accuracy=0.6265, over 5214.00 frames. ], tot_loss[loss=3.591, NarTop10Accuracy=0.6072, over 5089.09 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 15:34:26,759 INFO [trainer.py:765] (4/8) Epoch 6, batch 500, train_loss[loss=3.394, NarTop10Accuracy=0.6526, over 6039.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.6103, over 5374.47 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 15:35:01,458 INFO [trainer.py:765] (4/8) Epoch 6, batch 600, train_loss[loss=3.298, NarTop10Accuracy=0.6721, over 5883.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6091, over 5638.02 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 15:35:32,735 INFO [trainer.py:765] (4/8) Epoch 6, batch 700, train_loss[loss=3.591, NarTop10Accuracy=0.6136, over 5121.00 frames. ], tot_loss[loss=3.58, NarTop10Accuracy=0.6099, over 5698.61 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 15:36:06,844 INFO [trainer.py:765] (4/8) Epoch 6, batch 800, train_loss[loss=3.809, NarTop10Accuracy=0.5489, over 5283.00 frames. ], tot_loss[loss=3.591, NarTop10Accuracy=0.6072, over 5763.84 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 15:36:40,385 INFO [trainer.py:765] (4/8) Epoch 6, batch 900, train_loss[loss=4.068, NarTop10Accuracy=0.5072, over 6291.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6101, over 5779.05 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 15:37:15,272 INFO [trainer.py:765] (4/8) Epoch 6, batch 1000, train_loss[loss=3.266, NarTop10Accuracy=0.6727, over 6579.00 frames. ], tot_loss[loss=3.59, NarTop10Accuracy=0.6072, over 5866.73 frames. ], batch size: 14, lr: 1.38e-02 +2024-08-06 15:37:50,508 INFO [trainer.py:765] (4/8) Epoch 6, batch 1100, train_loss[loss=3.536, NarTop10Accuracy=0.622, over 6768.00 frames. ], tot_loss[loss=3.593, NarTop10Accuracy=0.6064, over 5913.92 frames. ], batch size: 17, lr: 1.38e-02 +2024-08-06 15:37:55,828 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 15:38:04,436 INFO [trainer.py:811] (4/8) Epoch 6, validation: loss=3.421, NarTop10Accuracy=0.6418, over 1905321.00 frames. +2024-08-06 15:38:04,437 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27356MB +2024-08-06 15:38:04,965 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.809e+02 1.991e+02 2.234e+02 5.215e+02, threshold=3.983e+02, percent-clipped=0.5 +2024-08-06 15:38:36,169 INFO [trainer.py:765] (4/8) Epoch 6, batch 1200, train_loss[loss=3.43, NarTop10Accuracy=0.6318, over 7320.00 frames. ], tot_loss[loss=3.58, NarTop10Accuracy=0.6092, over 5923.93 frames. ], batch size: 31, lr: 1.37e-02 +2024-08-06 15:39:08,242 INFO [trainer.py:765] (4/8) Epoch 6, batch 1300, train_loss[loss=3.323, NarTop10Accuracy=0.6618, over 4398.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.6101, over 5989.65 frames. ], batch size: 5, lr: 1.37e-02 +2024-08-06 15:39:44,070 INFO [trainer.py:765] (4/8) Epoch 6, batch 1400, train_loss[loss=3.471, NarTop10Accuracy=0.6306, over 6135.00 frames. ], tot_loss[loss=3.574, NarTop10Accuracy=0.6109, over 6025.10 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 15:40:15,383 INFO [trainer.py:765] (4/8) Epoch 6, batch 1500, train_loss[loss=3.963, NarTop10Accuracy=0.5223, over 6033.00 frames. ], tot_loss[loss=3.573, NarTop10Accuracy=0.6104, over 5955.89 frames. ], batch size: 50, lr: 1.36e-02 +2024-08-06 15:40:43,106 INFO [trainer.py:765] (4/8) Epoch 6, batch 1600, train_loss[loss=3.409, NarTop10Accuracy=0.6413, over 7359.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.6113, over 5930.43 frames. ], batch size: 23, lr: 1.35e-02 +2024-08-06 15:41:09,789 INFO [trainer.py:765] (4/8) Epoch 6, batch 1700, train_loss[loss=3.395, NarTop10Accuracy=0.65, over 6255.00 frames. ], tot_loss[loss=3.556, NarTop10Accuracy=0.6142, over 5928.90 frames. ], batch size: 13, lr: 1.35e-02 +2024-08-06 15:41:36,317 INFO [trainer.py:765] (4/8) Epoch 6, batch 1800, train_loss[loss=3.367, NarTop10Accuracy=0.6516, over 7140.00 frames. ], tot_loss[loss=3.563, NarTop10Accuracy=0.6123, over 5983.66 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 15:42:02,720 INFO [trainer.py:765] (4/8) Epoch 6, batch 1900, train_loss[loss=3.833, NarTop10Accuracy=0.5565, over 6012.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6083, over 6026.36 frames. ], batch size: 51, lr: 1.34e-02 +2024-08-06 15:42:28,319 INFO [trainer.py:765] (4/8) Epoch 6, batch 2000, train_loss[loss=3.526, NarTop10Accuracy=0.6214, over 5970.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.6108, over 6005.95 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 15:42:53,668 INFO [trainer.py:765] (4/8) Epoch 6, batch 2100, train_loss[loss=3.385, NarTop10Accuracy=0.6564, over 4812.00 frames. ], tot_loss[loss=3.56, NarTop10Accuracy=0.6132, over 5974.84 frames. ], batch size: 5, lr: 1.33e-02 +2024-08-06 15:43:18,977 INFO [trainer.py:765] (4/8) Epoch 6, batch 2200, train_loss[loss=3.667, NarTop10Accuracy=0.5933, over 7131.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6116, over 6017.77 frames. ], batch size: 31, lr: 1.33e-02 +2024-08-06 15:43:44,105 INFO [trainer.py:765] (4/8) Epoch 6, batch 2300, train_loss[loss=3.224, NarTop10Accuracy=0.6811, over 5682.00 frames. ], tot_loss[loss=3.574, NarTop10Accuracy=0.6107, over 6024.96 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 15:44:08,620 INFO [trainer.py:765] (4/8) Epoch 6, batch 2400, train_loss[loss=3.364, NarTop10Accuracy=0.6586, over 5031.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.6167, over 5776.84 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:32,132 INFO [trainer.py:765] (4/8) Epoch 6, batch 2500, train_loss[loss=3.39, NarTop10Accuracy=0.6428, over 5307.00 frames. ], tot_loss[loss=3.525, NarTop10Accuracy=0.6206, over 5479.01 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:51,677 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 15:45:58,043 INFO [trainer.py:765] (4/8) Epoch 7, batch 100, train_loss[loss=3.302, NarTop10Accuracy=0.6689, over 7098.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.6189, over 2369.42 frames. ], batch size: 31, lr: 1.24e-02 +2024-08-06 15:46:33,614 INFO [trainer.py:765] (4/8) Epoch 7, batch 200, train_loss[loss=3.454, NarTop10Accuracy=0.6284, over 6894.00 frames. ], tot_loss[loss=3.531, NarTop10Accuracy=0.6191, over 3864.21 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 15:47:03,246 INFO [trainer.py:765] (4/8) Epoch 7, batch 300, train_loss[loss=3.745, NarTop10Accuracy=0.5787, over 7056.00 frames. ], tot_loss[loss=3.541, NarTop10Accuracy=0.6172, over 4659.61 frames. ], batch size: 22, lr: 1.23e-02 +2024-08-06 15:47:34,496 INFO [trainer.py:765] (4/8) Epoch 7, batch 400, train_loss[loss=3.526, NarTop10Accuracy=0.6278, over 5037.00 frames. ], tot_loss[loss=3.531, NarTop10Accuracy=0.6191, over 5103.77 frames. ], batch size: 7, lr: 1.23e-02 +2024-08-06 15:48:13,730 INFO [trainer.py:765] (4/8) Epoch 7, batch 500, train_loss[loss=3.589, NarTop10Accuracy=0.6064, over 6093.00 frames. ], tot_loss[loss=3.525, NarTop10Accuracy=0.6201, over 5389.87 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 15:48:26,370 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 15:48:34,533 INFO [trainer.py:811] (4/8) Epoch 7, validation: loss=3.326, NarTop10Accuracy=0.6612, over 1905321.00 frames. +2024-08-06 15:48:34,534 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27360MB +2024-08-06 15:48:35,078 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.860e+02 2.018e+02 2.241e+02 5.111e+02, threshold=4.035e+02, percent-clipped=0.3 +2024-08-06 15:48:52,720 INFO [trainer.py:765] (4/8) Epoch 7, batch 600, train_loss[loss=3.163, NarTop10Accuracy=0.6997, over 5622.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.6188, over 5647.99 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 15:49:24,912 INFO [trainer.py:765] (4/8) Epoch 7, batch 700, train_loss[loss=3.813, NarTop10Accuracy=0.5544, over 5055.00 frames. ], tot_loss[loss=3.515, NarTop10Accuracy=0.6226, over 5718.13 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 15:50:04,381 INFO [trainer.py:765] (4/8) Epoch 7, batch 800, train_loss[loss=3.242, NarTop10Accuracy=0.685, over 4950.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6255, over 5787.78 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 15:50:34,549 INFO [trainer.py:765] (4/8) Epoch 7, batch 900, train_loss[loss=3.236, NarTop10Accuracy=0.6882, over 6261.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6272, over 5805.94 frames. ], batch size: 13, lr: 1.21e-02 +2024-08-06 15:51:07,155 INFO [trainer.py:765] (4/8) Epoch 7, batch 1000, train_loss[loss=3.243, NarTop10Accuracy=0.6864, over 6252.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6274, over 5929.27 frames. ], batch size: 13, lr: 1.20e-02 +2024-08-06 15:51:51,758 INFO [trainer.py:765] (4/8) Epoch 7, batch 1100, train_loss[loss=3.343, NarTop10Accuracy=0.6605, over 6816.00 frames. ], tot_loss[loss=3.496, NarTop10Accuracy=0.6264, over 5943.36 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 15:52:22,699 INFO [trainer.py:765] (4/8) Epoch 7, batch 1200, train_loss[loss=3.318, NarTop10Accuracy=0.6643, over 7389.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6281, over 5920.44 frames. ], batch size: 31, lr: 1.20e-02 +2024-08-06 15:52:52,008 INFO [trainer.py:765] (4/8) Epoch 7, batch 1300, train_loss[loss=3.427, NarTop10Accuracy=0.6299, over 4902.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6269, over 5993.35 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 15:53:33,842 INFO [trainer.py:765] (4/8) Epoch 7, batch 1400, train_loss[loss=3.328, NarTop10Accuracy=0.6573, over 6117.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6258, over 5998.92 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 15:54:04,600 INFO [trainer.py:765] (4/8) Epoch 7, batch 1500, train_loss[loss=3.73, NarTop10Accuracy=0.5772, over 5700.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6297, over 5949.36 frames. ], batch size: 50, lr: 1.19e-02 +2024-08-06 15:54:32,385 INFO [trainer.py:765] (4/8) Epoch 7, batch 1600, train_loss[loss=3.723, NarTop10Accuracy=0.5778, over 7029.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6295, over 5930.55 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 15:54:59,055 INFO [trainer.py:765] (4/8) Epoch 7, batch 1700, train_loss[loss=3.526, NarTop10Accuracy=0.6198, over 6612.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6258, over 5921.29 frames. ], batch size: 14, lr: 1.18e-02 +2024-08-06 15:55:25,512 INFO [trainer.py:765] (4/8) Epoch 7, batch 1800, train_loss[loss=3.878, NarTop10Accuracy=0.5465, over 7113.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6263, over 5980.11 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 15:55:52,083 INFO [trainer.py:765] (4/8) Epoch 7, batch 1900, train_loss[loss=3.332, NarTop10Accuracy=0.6628, over 6033.00 frames. ], tot_loss[loss=3.516, NarTop10Accuracy=0.622, over 6024.69 frames. ], batch size: 50, lr: 1.18e-02 +2024-08-06 15:56:17,592 INFO [trainer.py:765] (4/8) Epoch 7, batch 2000, train_loss[loss=3.721, NarTop10Accuracy=0.5771, over 5994.00 frames. ], tot_loss[loss=3.502, NarTop10Accuracy=0.6247, over 5997.76 frames. ], batch size: 50, lr: 1.17e-02 +2024-08-06 15:56:42,858 INFO [trainer.py:765] (4/8) Epoch 7, batch 2100, train_loss[loss=3.533, NarTop10Accuracy=0.5997, over 3960.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.6285, over 5976.95 frames. ], batch size: 4, lr: 1.17e-02 +2024-08-06 15:57:08,079 INFO [trainer.py:765] (4/8) Epoch 7, batch 2200, train_loss[loss=3.483, NarTop10Accuracy=0.6271, over 7224.00 frames. ], tot_loss[loss=3.505, NarTop10Accuracy=0.6242, over 6011.70 frames. ], batch size: 31, lr: 1.17e-02 +2024-08-06 15:57:33,178 INFO [trainer.py:765] (4/8) Epoch 7, batch 2300, train_loss[loss=3.352, NarTop10Accuracy=0.6582, over 5577.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6228, over 6022.51 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 15:57:57,619 INFO [trainer.py:765] (4/8) Epoch 7, batch 2400, train_loss[loss=3.189, NarTop10Accuracy=0.6828, over 5229.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6271, over 5773.06 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 15:58:21,090 INFO [trainer.py:765] (4/8) Epoch 7, batch 2500, train_loss[loss=3.557, NarTop10Accuracy=0.6066, over 5730.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6315, over 5479.67 frames. ], batch size: 8, lr: 1.16e-02 +2024-08-06 15:58:31,565 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 15:58:39,769 INFO [trainer.py:811] (4/8) Epoch 7, validation: loss=3.381, NarTop10Accuracy=0.6488, over 1905321.00 frames. +2024-08-06 15:58:39,770 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27360MB +2024-08-06 15:58:40,221 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.831e+02 1.996e+02 2.207e+02 5.229e+02, threshold=3.992e+02, percent-clipped=0.2 +2024-08-06 15:58:49,111 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 15:59:52,877 INFO [trainer.py:765] (4/8) Epoch 8, batch 100, train_loss[loss=3.608, NarTop10Accuracy=0.5939, over 7539.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6358, over 2360.25 frames. ], batch size: 32, lr: 1.09e-02 +2024-08-06 16:00:27,881 INFO [trainer.py:765] (4/8) Epoch 8, batch 200, train_loss[loss=3.348, NarTop10Accuracy=0.6592, over 6516.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6302, over 3852.35 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 16:00:58,563 INFO [trainer.py:765] (4/8) Epoch 8, batch 300, train_loss[loss=3.295, NarTop10Accuracy=0.6712, over 7278.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6335, over 4659.40 frames. ], batch size: 23, lr: 1.08e-02 +2024-08-06 16:01:29,760 INFO [trainer.py:765] (4/8) Epoch 8, batch 400, train_loss[loss=3.597, NarTop10Accuracy=0.5971, over 5214.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6313, over 5122.70 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 16:02:04,066 INFO [trainer.py:765] (4/8) Epoch 8, batch 500, train_loss[loss=3.768, NarTop10Accuracy=0.5623, over 6168.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6337, over 5385.37 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 16:02:41,836 INFO [trainer.py:765] (4/8) Epoch 8, batch 600, train_loss[loss=3.129, NarTop10Accuracy=0.7053, over 5760.00 frames. ], tot_loss[loss=3.474, NarTop10Accuracy=0.6303, over 5642.61 frames. ], batch size: 9, lr: 1.08e-02 +2024-08-06 16:03:11,500 INFO [trainer.py:765] (4/8) Epoch 8, batch 700, train_loss[loss=3.667, NarTop10Accuracy=0.5858, over 4266.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.629, over 5702.94 frames. ], batch size: 5, lr: 1.07e-02 +2024-08-06 16:03:50,084 INFO [trainer.py:765] (4/8) Epoch 8, batch 800, train_loss[loss=3.336, NarTop10Accuracy=0.6514, over 5154.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.6318, over 5768.77 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 16:04:27,588 INFO [trainer.py:765] (4/8) Epoch 8, batch 900, train_loss[loss=3.365, NarTop10Accuracy=0.6597, over 6528.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6345, over 5812.51 frames. ], batch size: 14, lr: 1.07e-02 +2024-08-06 16:04:57,466 INFO [trainer.py:765] (4/8) Epoch 8, batch 1000, train_loss[loss=3.683, NarTop10Accuracy=0.5814, over 6645.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6369, over 5902.06 frames. ], batch size: 14, lr: 1.07e-02 +2024-08-06 16:05:37,294 INFO [trainer.py:765] (4/8) Epoch 8, batch 1100, train_loss[loss=3.667, NarTop10Accuracy=0.5928, over 6807.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6377, over 5961.87 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 16:06:15,858 INFO [trainer.py:765] (4/8) Epoch 8, batch 1200, train_loss[loss=3.492, NarTop10Accuracy=0.6243, over 7110.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6356, over 5949.20 frames. ], batch size: 31, lr: 1.06e-02 +2024-08-06 16:06:45,187 INFO [trainer.py:765] (4/8) Epoch 8, batch 1300, train_loss[loss=3.046, NarTop10Accuracy=0.7258, over 4989.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6376, over 6020.72 frames. ], batch size: 6, lr: 1.06e-02 +2024-08-06 16:07:24,235 INFO [trainer.py:765] (4/8) Epoch 8, batch 1400, train_loss[loss=3.48, NarTop10Accuracy=0.6204, over 6129.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6371, over 6046.38 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 16:07:52,169 INFO [trainer.py:765] (4/8) Epoch 8, batch 1500, train_loss[loss=3.446, NarTop10Accuracy=0.64, over 5610.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6414, over 5981.61 frames. ], batch size: 51, lr: 1.05e-02 +2024-08-06 16:08:19,948 INFO [trainer.py:765] (4/8) Epoch 8, batch 1600, train_loss[loss=3.142, NarTop10Accuracy=0.6966, over 7089.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6421, over 5949.01 frames. ], batch size: 22, lr: 1.05e-02 +2024-08-06 16:08:46,617 INFO [trainer.py:765] (4/8) Epoch 8, batch 1700, train_loss[loss=3.275, NarTop10Accuracy=0.6789, over 6354.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6413, over 5941.09 frames. ], batch size: 13, lr: 1.05e-02 +2024-08-06 16:09:13,106 INFO [trainer.py:765] (4/8) Epoch 8, batch 1800, train_loss[loss=3.279, NarTop10Accuracy=0.6754, over 7173.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6424, over 5987.29 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 16:09:39,636 INFO [trainer.py:765] (4/8) Epoch 8, batch 1900, train_loss[loss=3.818, NarTop10Accuracy=0.5566, over 6060.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6448, over 6032.80 frames. ], batch size: 50, lr: 1.04e-02 +2024-08-06 16:09:56,940 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 16:10:04,970 INFO [trainer.py:811] (4/8) Epoch 8, validation: loss=3.282, NarTop10Accuracy=0.6699, over 1905321.00 frames. +2024-08-06 16:10:04,970 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27360MB +2024-08-06 16:10:05,469 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.814e+02 1.981e+02 2.158e+02 5.862e+02, threshold=3.962e+02, percent-clipped=0.1 +2024-08-06 16:10:13,202 INFO [trainer.py:765] (4/8) Epoch 8, batch 2000, train_loss[loss=3.933, NarTop10Accuracy=0.535, over 6426.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.643, over 6001.70 frames. ], batch size: 50, lr: 1.04e-02 +2024-08-06 16:10:38,513 INFO [trainer.py:765] (4/8) Epoch 8, batch 2100, train_loss[loss=3.379, NarTop10Accuracy=0.6465, over 4743.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6443, over 5971.79 frames. ], batch size: 5, lr: 1.04e-02 +2024-08-06 16:11:03,746 INFO [trainer.py:765] (4/8) Epoch 8, batch 2200, train_loss[loss=3.579, NarTop10Accuracy=0.6122, over 7386.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6419, over 6010.62 frames. ], batch size: 31, lr: 1.04e-02 +2024-08-06 16:11:28,903 INFO [trainer.py:765] (4/8) Epoch 8, batch 2300, train_loss[loss=3.611, NarTop10Accuracy=0.6017, over 5751.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6385, over 6015.58 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 16:11:53,091 INFO [trainer.py:765] (4/8) Epoch 8, batch 2400, train_loss[loss=3.578, NarTop10Accuracy=0.6012, over 5319.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6408, over 5756.32 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 16:12:16,443 INFO [trainer.py:765] (4/8) Epoch 8, batch 2500, train_loss[loss=3.224, NarTop10Accuracy=0.6737, over 5007.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6417, over 5453.29 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 16:12:36,389 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 16:13:37,514 INFO [trainer.py:765] (4/8) Epoch 9, batch 100, train_loss[loss=3.26, NarTop10Accuracy=0.6813, over 7260.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6521, over 2365.02 frames. ], batch size: 31, lr: 9.72e-03 +2024-08-06 16:14:14,440 INFO [trainer.py:765] (4/8) Epoch 9, batch 200, train_loss[loss=3.649, NarTop10Accuracy=0.5968, over 6801.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6536, over 3838.75 frames. ], batch size: 17, lr: 9.70e-03 +2024-08-06 16:14:44,507 INFO [trainer.py:765] (4/8) Epoch 9, batch 300, train_loss[loss=3.378, NarTop10Accuracy=0.6491, over 7074.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6506, over 4641.76 frames. ], batch size: 22, lr: 9.68e-03 +2024-08-06 16:15:14,914 INFO [trainer.py:765] (4/8) Epoch 9, batch 400, train_loss[loss=3.22, NarTop10Accuracy=0.6892, over 5211.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6539, over 5091.87 frames. ], batch size: 7, lr: 9.65e-03 +2024-08-06 16:15:50,336 INFO [trainer.py:765] (4/8) Epoch 9, batch 500, train_loss[loss=3.362, NarTop10Accuracy=0.6591, over 6057.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6569, over 5387.15 frames. ], batch size: 11, lr: 9.63e-03 +2024-08-06 16:16:23,972 INFO [trainer.py:765] (4/8) Epoch 9, batch 600, train_loss[loss=3.592, NarTop10Accuracy=0.6058, over 5691.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6574, over 5652.09 frames. ], batch size: 9, lr: 9.61e-03 +2024-08-06 16:16:57,145 INFO [trainer.py:765] (4/8) Epoch 9, batch 700, train_loss[loss=3.293, NarTop10Accuracy=0.6705, over 4230.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6547, over 5695.00 frames. ], batch size: 5, lr: 9.59e-03 +2024-08-06 16:17:32,052 INFO [trainer.py:765] (4/8) Epoch 9, batch 800, train_loss[loss=3.211, NarTop10Accuracy=0.6788, over 5052.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6475, over 5757.28 frames. ], batch size: 6, lr: 9.57e-03 +2024-08-06 16:18:07,816 INFO [trainer.py:765] (4/8) Epoch 9, batch 900, train_loss[loss=3.16, NarTop10Accuracy=0.7036, over 6189.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6492, over 5799.33 frames. ], batch size: 13, lr: 9.55e-03 +2024-08-06 16:18:39,345 INFO [trainer.py:765] (4/8) Epoch 9, batch 1000, train_loss[loss=3.084, NarTop10Accuracy=0.706, over 6255.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6473, over 5906.37 frames. ], batch size: 13, lr: 9.53e-03 +2024-08-06 16:19:15,382 INFO [trainer.py:765] (4/8) Epoch 9, batch 1100, train_loss[loss=3.382, NarTop10Accuracy=0.6534, over 6846.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6466, over 5945.57 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 16:19:53,877 INFO [trainer.py:765] (4/8) Epoch 9, batch 1200, train_loss[loss=3.756, NarTop10Accuracy=0.564, over 7413.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6451, over 5961.66 frames. ], batch size: 32, lr: 9.48e-03 +2024-08-06 16:20:24,906 INFO [trainer.py:765] (4/8) Epoch 9, batch 1300, train_loss[loss=3.098, NarTop10Accuracy=0.7073, over 4299.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6455, over 6020.70 frames. ], batch size: 5, lr: 9.46e-03 +2024-08-06 16:20:56,580 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 16:21:04,483 INFO [trainer.py:811] (4/8) Epoch 9, validation: loss=3.266, NarTop10Accuracy=0.6725, over 1905321.00 frames. +2024-08-06 16:21:04,484 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27360MB +2024-08-06 16:21:05,035 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 1.808e+02 1.967e+02 2.142e+02 6.126e+02, threshold=3.935e+02, percent-clipped=0.5 +2024-08-06 16:21:06,691 INFO [trainer.py:765] (4/8) Epoch 9, batch 1400, train_loss[loss=3.685, NarTop10Accuracy=0.5834, over 6177.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.643, over 6021.58 frames. ], batch size: 11, lr: 9.44e-03 +2024-08-06 16:21:38,897 INFO [trainer.py:765] (4/8) Epoch 9, batch 1500, train_loss[loss=3.371, NarTop10Accuracy=0.6492, over 6327.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6477, over 5953.27 frames. ], batch size: 51, lr: 9.42e-03 +2024-08-06 16:22:06,721 INFO [trainer.py:765] (4/8) Epoch 9, batch 1600, train_loss[loss=3.284, NarTop10Accuracy=0.6712, over 7152.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6503, over 5929.63 frames. ], batch size: 22, lr: 9.40e-03 +2024-08-06 16:22:33,470 INFO [trainer.py:765] (4/8) Epoch 9, batch 1700, train_loss[loss=3.533, NarTop10Accuracy=0.6268, over 6123.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6473, over 5908.94 frames. ], batch size: 13, lr: 9.38e-03 +2024-08-06 16:23:00,063 INFO [trainer.py:765] (4/8) Epoch 9, batch 1800, train_loss[loss=3.19, NarTop10Accuracy=0.6883, over 7350.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6506, over 5980.15 frames. ], batch size: 23, lr: 9.36e-03 +2024-08-06 16:23:26,782 INFO [trainer.py:765] (4/8) Epoch 9, batch 1900, train_loss[loss=3.494, NarTop10Accuracy=0.6373, over 6183.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6485, over 6030.45 frames. ], batch size: 50, lr: 9.34e-03 +2024-08-06 16:23:52,485 INFO [trainer.py:765] (4/8) Epoch 9, batch 2000, train_loss[loss=3.976, NarTop10Accuracy=0.5262, over 5928.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6485, over 5997.29 frames. ], batch size: 50, lr: 9.32e-03 +2024-08-06 16:24:17,962 INFO [trainer.py:765] (4/8) Epoch 9, batch 2100, train_loss[loss=2.885, NarTop10Accuracy=0.7434, over 3963.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6487, over 5981.65 frames. ], batch size: 4, lr: 9.30e-03 +2024-08-06 16:24:43,421 INFO [trainer.py:765] (4/8) Epoch 9, batch 2200, train_loss[loss=3.751, NarTop10Accuracy=0.576, over 7326.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6462, over 6015.27 frames. ], batch size: 32, lr: 9.28e-03 +2024-08-06 16:25:08,720 INFO [trainer.py:765] (4/8) Epoch 9, batch 2300, train_loss[loss=3.36, NarTop10Accuracy=0.6486, over 5706.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6437, over 6013.74 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 16:25:33,164 INFO [trainer.py:765] (4/8) Epoch 9, batch 2400, train_loss[loss=3.195, NarTop10Accuracy=0.6859, over 5118.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6444, over 5762.98 frames. ], batch size: 7, lr: 9.25e-03 +2024-08-06 16:25:56,768 INFO [trainer.py:765] (4/8) Epoch 9, batch 2500, train_loss[loss=3.033, NarTop10Accuracy=0.7284, over 5091.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.652, over 5463.80 frames. ], batch size: 7, lr: 9.23e-03 +2024-08-06 16:26:16,448 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 16:27:19,583 INFO [trainer.py:765] (4/8) Epoch 10, batch 100, train_loss[loss=3.274, NarTop10Accuracy=0.6762, over 7248.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6545, over 2364.57 frames. ], batch size: 31, lr: 8.76e-03 +2024-08-06 16:27:52,627 INFO [trainer.py:765] (4/8) Epoch 10, batch 200, train_loss[loss=3.159, NarTop10Accuracy=0.6931, over 6849.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6565, over 3851.29 frames. ], batch size: 17, lr: 8.74e-03 +2024-08-06 16:28:23,057 INFO [trainer.py:765] (4/8) Epoch 10, batch 300, train_loss[loss=3.138, NarTop10Accuracy=0.6978, over 7173.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6559, over 4655.95 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 16:28:59,200 INFO [trainer.py:765] (4/8) Epoch 10, batch 400, train_loss[loss=3.14, NarTop10Accuracy=0.6962, over 5199.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6573, over 5095.18 frames. ], batch size: 7, lr: 8.71e-03 +2024-08-06 16:29:29,217 INFO [trainer.py:765] (4/8) Epoch 10, batch 500, train_loss[loss=3.018, NarTop10Accuracy=0.7181, over 6111.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6591, over 5382.80 frames. ], batch size: 11, lr: 8.69e-03 +2024-08-06 16:30:02,765 INFO [trainer.py:765] (4/8) Epoch 10, batch 600, train_loss[loss=3.515, NarTop10Accuracy=0.6181, over 5859.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6559, over 5641.27 frames. ], batch size: 9, lr: 8.67e-03 +2024-08-06 16:30:34,264 INFO [trainer.py:765] (4/8) Epoch 10, batch 700, train_loss[loss=3.394, NarTop10Accuracy=0.6501, over 5268.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.655, over 5707.09 frames. ], batch size: 6, lr: 8.65e-03 +2024-08-06 16:31:09,842 INFO [trainer.py:765] (4/8) Epoch 10, batch 800, train_loss[loss=3.52, NarTop10Accuracy=0.6095, over 5091.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6541, over 5778.37 frames. ], batch size: 6, lr: 8.64e-03 +2024-08-06 16:31:16,257 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 16:31:24,565 INFO [trainer.py:811] (4/8) Epoch 10, validation: loss=3.184, NarTop10Accuracy=0.6898, over 1905321.00 frames. +2024-08-06 16:31:24,566 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 16:31:25,154 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.851e+02 2.012e+02 2.196e+02 4.599e+02, threshold=4.024e+02, percent-clipped=0.1 +2024-08-06 16:31:50,345 INFO [trainer.py:765] (4/8) Epoch 10, batch 900, train_loss[loss=3.236, NarTop10Accuracy=0.6742, over 6612.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6589, over 5809.42 frames. ], batch size: 14, lr: 8.62e-03 +2024-08-06 16:32:28,589 INFO [trainer.py:765] (4/8) Epoch 10, batch 1000, train_loss[loss=3.08, NarTop10Accuracy=0.7201, over 6165.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6576, over 5913.22 frames. ], batch size: 13, lr: 8.60e-03 +2024-08-06 16:33:06,376 INFO [trainer.py:765] (4/8) Epoch 10, batch 1100, train_loss[loss=3.086, NarTop10Accuracy=0.7075, over 6756.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6563, over 5936.98 frames. ], batch size: 17, lr: 8.59e-03 +2024-08-06 16:33:40,960 INFO [trainer.py:765] (4/8) Epoch 10, batch 1200, train_loss[loss=3.236, NarTop10Accuracy=0.6684, over 7071.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6584, over 5930.66 frames. ], batch size: 31, lr: 8.57e-03 +2024-08-06 16:34:16,170 INFO [trainer.py:765] (4/8) Epoch 10, batch 1300, train_loss[loss=3.128, NarTop10Accuracy=0.7012, over 5175.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6571, over 6019.15 frames. ], batch size: 6, lr: 8.55e-03 +2024-08-06 16:34:51,201 INFO [trainer.py:765] (4/8) Epoch 10, batch 1400, train_loss[loss=3.442, NarTop10Accuracy=0.6376, over 6297.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6521, over 6044.74 frames. ], batch size: 11, lr: 8.54e-03 +2024-08-06 16:35:22,159 INFO [trainer.py:765] (4/8) Epoch 10, batch 1500, train_loss[loss=3.599, NarTop10Accuracy=0.6024, over 6129.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6559, over 5964.94 frames. ], batch size: 50, lr: 8.52e-03 +2024-08-06 16:35:50,137 INFO [trainer.py:765] (4/8) Epoch 10, batch 1600, train_loss[loss=3.667, NarTop10Accuracy=0.5881, over 7062.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6592, over 5921.30 frames. ], batch size: 22, lr: 8.50e-03 +2024-08-06 16:36:16,976 INFO [trainer.py:765] (4/8) Epoch 10, batch 1700, train_loss[loss=3.392, NarTop10Accuracy=0.646, over 6639.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6577, over 5912.08 frames. ], batch size: 14, lr: 8.49e-03 +2024-08-06 16:36:43,648 INFO [trainer.py:765] (4/8) Epoch 10, batch 1800, train_loss[loss=3.248, NarTop10Accuracy=0.6774, over 7161.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6603, over 5971.53 frames. ], batch size: 22, lr: 8.47e-03 +2024-08-06 16:37:10,290 INFO [trainer.py:765] (4/8) Epoch 10, batch 1900, train_loss[loss=3.306, NarTop10Accuracy=0.6713, over 6192.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6602, over 6018.67 frames. ], batch size: 52, lr: 8.45e-03 +2024-08-06 16:37:36,089 INFO [trainer.py:765] (4/8) Epoch 10, batch 2000, train_loss[loss=3.248, NarTop10Accuracy=0.6819, over 6879.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6611, over 5993.33 frames. ], batch size: 50, lr: 8.44e-03 +2024-08-06 16:38:01,650 INFO [trainer.py:765] (4/8) Epoch 10, batch 2100, train_loss[loss=3.326, NarTop10Accuracy=0.6524, over 3909.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6581, over 5978.90 frames. ], batch size: 4, lr: 8.42e-03 +2024-08-06 16:38:27,120 INFO [trainer.py:765] (4/8) Epoch 10, batch 2200, train_loss[loss=3.812, NarTop10Accuracy=0.5626, over 7455.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.657, over 6009.80 frames. ], batch size: 31, lr: 8.41e-03 +2024-08-06 16:38:52,447 INFO [trainer.py:765] (4/8) Epoch 10, batch 2300, train_loss[loss=3.212, NarTop10Accuracy=0.6822, over 5628.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6567, over 6020.22 frames. ], batch size: 9, lr: 8.39e-03 +2024-08-06 16:39:17,006 INFO [trainer.py:765] (4/8) Epoch 10, batch 2400, train_loss[loss=3.156, NarTop10Accuracy=0.6912, over 5199.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6624, over 5779.30 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 16:39:40,802 INFO [trainer.py:765] (4/8) Epoch 10, batch 2500, train_loss[loss=3.592, NarTop10Accuracy=0.6009, over 5238.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.6672, over 5470.90 frames. ], batch size: 7, lr: 8.36e-03 +2024-08-06 16:40:00,620 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 16:41:06,234 INFO [trainer.py:765] (4/8) Epoch 11, batch 100, train_loss[loss=3.661, NarTop10Accuracy=0.5899, over 7182.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.65, over 2369.52 frames. ], batch size: 31, lr: 7.97e-03 +2024-08-06 16:41:39,021 INFO [trainer.py:765] (4/8) Epoch 11, batch 200, train_loss[loss=3.563, NarTop10Accuracy=0.6075, over 6864.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6587, over 3851.77 frames. ], batch size: 17, lr: 7.95e-03 +2024-08-06 16:41:53,189 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 16:42:01,355 INFO [trainer.py:811] (4/8) Epoch 11, validation: loss=3.116, NarTop10Accuracy=0.7034, over 1905321.00 frames. +2024-08-06 16:42:01,356 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 16:42:01,879 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 1.889e+02 2.046e+02 2.249e+02 5.417e+02, threshold=4.093e+02, percent-clipped=0.2 +2024-08-06 16:42:17,974 INFO [trainer.py:765] (4/8) Epoch 11, batch 300, train_loss[loss=2.994, NarTop10Accuracy=0.7331, over 7140.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6657, over 4651.77 frames. ], batch size: 22, lr: 7.94e-03 +2024-08-06 16:42:55,153 INFO [trainer.py:765] (4/8) Epoch 11, batch 400, train_loss[loss=3.435, NarTop10Accuracy=0.6376, over 5148.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6667, over 5093.41 frames. ], batch size: 7, lr: 7.92e-03 +2024-08-06 16:43:25,718 INFO [trainer.py:765] (4/8) Epoch 11, batch 500, train_loss[loss=3.124, NarTop10Accuracy=0.6988, over 6051.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.669, over 5380.48 frames. ], batch size: 11, lr: 7.91e-03 +2024-08-06 16:44:02,241 INFO [trainer.py:765] (4/8) Epoch 11, batch 600, train_loss[loss=3.499, NarTop10Accuracy=0.6359, over 5751.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.666, over 5646.10 frames. ], batch size: 9, lr: 7.89e-03 +2024-08-06 16:44:35,715 INFO [trainer.py:765] (4/8) Epoch 11, batch 700, train_loss[loss=3.41, NarTop10Accuracy=0.6252, over 5235.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6673, over 5718.85 frames. ], batch size: 6, lr: 7.88e-03 +2024-08-06 16:45:10,467 INFO [trainer.py:765] (4/8) Epoch 11, batch 800, train_loss[loss=2.929, NarTop10Accuracy=0.745, over 5178.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6635, over 5790.47 frames. ], batch size: 6, lr: 7.86e-03 +2024-08-06 16:45:46,457 INFO [trainer.py:765] (4/8) Epoch 11, batch 900, train_loss[loss=3.728, NarTop10Accuracy=0.5812, over 6297.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.664, over 5823.55 frames. ], batch size: 13, lr: 7.85e-03 +2024-08-06 16:46:20,309 INFO [trainer.py:765] (4/8) Epoch 11, batch 1000, train_loss[loss=3.328, NarTop10Accuracy=0.6506, over 6273.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.6643, over 5928.39 frames. ], batch size: 13, lr: 7.84e-03 +2024-08-06 16:46:53,457 INFO [trainer.py:765] (4/8) Epoch 11, batch 1100, train_loss[loss=3.125, NarTop10Accuracy=0.7089, over 7140.00 frames. ], tot_loss[loss=3.296, NarTop10Accuracy=0.6667, over 5949.22 frames. ], batch size: 18, lr: 7.82e-03 +2024-08-06 16:47:33,029 INFO [trainer.py:765] (4/8) Epoch 11, batch 1200, train_loss[loss=3.452, NarTop10Accuracy=0.6337, over 7434.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6651, over 5923.05 frames. ], batch size: 33, lr: 7.81e-03 +2024-08-06 16:48:06,481 INFO [trainer.py:765] (4/8) Epoch 11, batch 1300, train_loss[loss=2.845, NarTop10Accuracy=0.7432, over 4299.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6636, over 5977.42 frames. ], batch size: 5, lr: 7.79e-03 +2024-08-06 16:48:41,353 INFO [trainer.py:765] (4/8) Epoch 11, batch 1400, train_loss[loss=3.614, NarTop10Accuracy=0.594, over 6051.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6582, over 5990.14 frames. ], batch size: 11, lr: 7.78e-03 +2024-08-06 16:49:09,344 INFO [trainer.py:765] (4/8) Epoch 11, batch 1500, train_loss[loss=3.295, NarTop10Accuracy=0.6654, over 5799.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6589, over 5935.73 frames. ], batch size: 50, lr: 7.77e-03 +2024-08-06 16:49:37,102 INFO [trainer.py:765] (4/8) Epoch 11, batch 1600, train_loss[loss=3.311, NarTop10Accuracy=0.6613, over 7098.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6631, over 5897.57 frames. ], batch size: 22, lr: 7.75e-03 +2024-08-06 16:50:03,791 INFO [trainer.py:765] (4/8) Epoch 11, batch 1700, train_loss[loss=3.411, NarTop10Accuracy=0.6398, over 6594.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.665, over 5900.04 frames. ], batch size: 14, lr: 7.74e-03 +2024-08-06 16:50:30,352 INFO [trainer.py:765] (4/8) Epoch 11, batch 1800, train_loss[loss=3.333, NarTop10Accuracy=0.6513, over 7035.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6608, over 5970.09 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 16:50:56,821 INFO [trainer.py:765] (4/8) Epoch 11, batch 1900, train_loss[loss=3.792, NarTop10Accuracy=0.5626, over 5595.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.66, over 6015.78 frames. ], batch size: 50, lr: 7.71e-03 +2024-08-06 16:51:22,404 INFO [trainer.py:765] (4/8) Epoch 11, batch 2000, train_loss[loss=3.835, NarTop10Accuracy=0.5573, over 5982.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6617, over 5992.19 frames. ], batch size: 50, lr: 7.70e-03 +2024-08-06 16:51:47,793 INFO [trainer.py:765] (4/8) Epoch 11, batch 2100, train_loss[loss=3.036, NarTop10Accuracy=0.7207, over 3807.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6625, over 5968.70 frames. ], batch size: 4, lr: 7.68e-03 +2024-08-06 16:52:13,117 INFO [trainer.py:765] (4/8) Epoch 11, batch 2200, train_loss[loss=3.298, NarTop10Accuracy=0.6622, over 7029.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6629, over 6008.66 frames. ], batch size: 31, lr: 7.67e-03 +2024-08-06 16:52:23,898 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 16:52:32,079 INFO [trainer.py:811] (4/8) Epoch 11, validation: loss=3.101, NarTop10Accuracy=0.7058, over 1905321.00 frames. +2024-08-06 16:52:32,080 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 16:52:32,593 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.920e+02 2.088e+02 2.244e+02 3.599e+02, threshold=4.177e+02, percent-clipped=0.0 +2024-08-06 16:52:46,444 INFO [trainer.py:765] (4/8) Epoch 11, batch 2300, train_loss[loss=3.197, NarTop10Accuracy=0.6923, over 5691.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6606, over 6015.51 frames. ], batch size: 9, lr: 7.66e-03 +2024-08-06 16:53:10,886 INFO [trainer.py:765] (4/8) Epoch 11, batch 2400, train_loss[loss=3.427, NarTop10Accuracy=0.6447, over 5118.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6643, over 5777.54 frames. ], batch size: 7, lr: 7.64e-03 +2024-08-06 16:53:34,371 INFO [trainer.py:765] (4/8) Epoch 11, batch 2500, train_loss[loss=3.611, NarTop10Accuracy=0.5951, over 5166.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6663, over 5480.54 frames. ], batch size: 7, lr: 7.63e-03 +2024-08-06 16:53:54,150 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 16:54:58,524 INFO [trainer.py:765] (4/8) Epoch 12, batch 100, train_loss[loss=3.605, NarTop10Accuracy=0.5995, over 7194.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.6656, over 2362.90 frames. ], batch size: 31, lr: 7.30e-03 +2024-08-06 16:55:32,431 INFO [trainer.py:765] (4/8) Epoch 12, batch 200, train_loss[loss=3.145, NarTop10Accuracy=0.6986, over 6855.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6709, over 3849.18 frames. ], batch size: 17, lr: 7.29e-03 +2024-08-06 16:56:05,095 INFO [trainer.py:765] (4/8) Epoch 12, batch 300, train_loss[loss=3.089, NarTop10Accuracy=0.7063, over 6996.00 frames. ], tot_loss[loss=3.252, NarTop10Accuracy=0.6762, over 4657.96 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 16:56:36,425 INFO [trainer.py:765] (4/8) Epoch 12, batch 400, train_loss[loss=3.164, NarTop10Accuracy=0.6936, over 5052.00 frames. ], tot_loss[loss=3.26, NarTop10Accuracy=0.6742, over 5118.92 frames. ], batch size: 7, lr: 7.26e-03 +2024-08-06 16:57:10,502 INFO [trainer.py:765] (4/8) Epoch 12, batch 500, train_loss[loss=3.64, NarTop10Accuracy=0.5963, over 6174.00 frames. ], tot_loss[loss=3.274, NarTop10Accuracy=0.6713, over 5419.69 frames. ], batch size: 11, lr: 7.25e-03 +2024-08-06 16:57:45,483 INFO [trainer.py:765] (4/8) Epoch 12, batch 600, train_loss[loss=2.874, NarTop10Accuracy=0.7543, over 5610.00 frames. ], tot_loss[loss=3.272, NarTop10Accuracy=0.6719, over 5667.99 frames. ], batch size: 9, lr: 7.24e-03 +2024-08-06 16:58:17,004 INFO [trainer.py:765] (4/8) Epoch 12, batch 700, train_loss[loss=3.461, NarTop10Accuracy=0.626, over 4350.00 frames. ], tot_loss[loss=3.288, NarTop10Accuracy=0.6686, over 5729.80 frames. ], batch size: 5, lr: 7.22e-03 +2024-08-06 16:58:53,468 INFO [trainer.py:765] (4/8) Epoch 12, batch 800, train_loss[loss=3.303, NarTop10Accuracy=0.6664, over 4995.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6682, over 5770.36 frames. ], batch size: 6, lr: 7.21e-03 +2024-08-06 16:59:27,205 INFO [trainer.py:765] (4/8) Epoch 12, batch 900, train_loss[loss=3.212, NarTop10Accuracy=0.6876, over 6150.00 frames. ], tot_loss[loss=3.267, NarTop10Accuracy=0.6724, over 5794.17 frames. ], batch size: 13, lr: 7.20e-03 +2024-08-06 17:00:01,573 INFO [trainer.py:765] (4/8) Epoch 12, batch 1000, train_loss[loss=3.007, NarTop10Accuracy=0.7182, over 6666.00 frames. ], tot_loss[loss=3.282, NarTop10Accuracy=0.6694, over 5890.75 frames. ], batch size: 14, lr: 7.19e-03 +2024-08-06 17:00:39,188 INFO [trainer.py:765] (4/8) Epoch 12, batch 1100, train_loss[loss=3.496, NarTop10Accuracy=0.63, over 7035.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6653, over 5916.01 frames. ], batch size: 17, lr: 7.18e-03 +2024-08-06 17:01:13,963 INFO [trainer.py:765] (4/8) Epoch 12, batch 1200, train_loss[loss=3.184, NarTop10Accuracy=0.6957, over 7302.00 frames. ], tot_loss[loss=3.27, NarTop10Accuracy=0.6715, over 5921.75 frames. ], batch size: 31, lr: 7.17e-03 +2024-08-06 17:01:48,107 INFO [trainer.py:765] (4/8) Epoch 12, batch 1300, train_loss[loss=3.398, NarTop10Accuracy=0.6379, over 4179.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.6688, over 5980.03 frames. ], batch size: 5, lr: 7.15e-03 +2024-08-06 17:02:22,322 INFO [trainer.py:765] (4/8) Epoch 12, batch 1400, train_loss[loss=3.649, NarTop10Accuracy=0.6013, over 6081.00 frames. ], tot_loss[loss=3.288, NarTop10Accuracy=0.6683, over 6018.24 frames. ], batch size: 11, lr: 7.14e-03 +2024-08-06 17:02:52,877 INFO [trainer.py:765] (4/8) Epoch 12, batch 1500, train_loss[loss=3.392, NarTop10Accuracy=0.6556, over 5757.00 frames. ], tot_loss[loss=3.264, NarTop10Accuracy=0.673, over 5952.44 frames. ], batch size: 50, lr: 7.13e-03 +2024-08-06 17:03:20,690 INFO [trainer.py:765] (4/8) Epoch 12, batch 1600, train_loss[loss=3.228, NarTop10Accuracy=0.6778, over 7095.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.6692, over 5933.14 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 17:03:38,296 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 17:03:46,474 INFO [trainer.py:811] (4/8) Epoch 12, validation: loss=3.054, NarTop10Accuracy=0.7153, over 1905321.00 frames. +2024-08-06 17:03:46,474 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 17:03:46,988 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.899e+02 2.078e+02 2.276e+02 5.455e+02, threshold=4.157e+02, percent-clipped=0.1 +2024-08-06 17:03:55,603 INFO [trainer.py:765] (4/8) Epoch 12, batch 1700, train_loss[loss=3.432, NarTop10Accuracy=0.644, over 6117.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6683, over 5925.82 frames. ], batch size: 13, lr: 7.11e-03 +2024-08-06 17:04:22,121 INFO [trainer.py:765] (4/8) Epoch 12, batch 1800, train_loss[loss=3.607, NarTop10Accuracy=0.6025, over 7110.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6695, over 5997.31 frames. ], batch size: 22, lr: 7.10e-03 +2024-08-06 17:04:48,591 INFO [trainer.py:765] (4/8) Epoch 12, batch 1900, train_loss[loss=3.275, NarTop10Accuracy=0.6757, over 6273.00 frames. ], tot_loss[loss=3.284, NarTop10Accuracy=0.6695, over 6043.43 frames. ], batch size: 50, lr: 7.08e-03 +2024-08-06 17:05:14,197 INFO [trainer.py:765] (4/8) Epoch 12, batch 2000, train_loss[loss=3.541, NarTop10Accuracy=0.6095, over 6198.00 frames. ], tot_loss[loss=3.275, NarTop10Accuracy=0.6715, over 6013.67 frames. ], batch size: 51, lr: 7.07e-03 +2024-08-06 17:05:39,467 INFO [trainer.py:765] (4/8) Epoch 12, batch 2100, train_loss[loss=3.402, NarTop10Accuracy=0.6477, over 4863.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6701, over 5979.23 frames. ], batch size: 5, lr: 7.06e-03 +2024-08-06 17:06:04,691 INFO [trainer.py:765] (4/8) Epoch 12, batch 2200, train_loss[loss=3.516, NarTop10Accuracy=0.6209, over 7209.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6674, over 6012.35 frames. ], batch size: 31, lr: 7.05e-03 +2024-08-06 17:06:29,847 INFO [trainer.py:765] (4/8) Epoch 12, batch 2300, train_loss[loss=3.261, NarTop10Accuracy=0.6715, over 5763.00 frames. ], tot_loss[loss=3.29, NarTop10Accuracy=0.6679, over 6015.04 frames. ], batch size: 9, lr: 7.04e-03 +2024-08-06 17:06:54,200 INFO [trainer.py:765] (4/8) Epoch 12, batch 2400, train_loss[loss=3.143, NarTop10Accuracy=0.6941, over 5100.00 frames. ], tot_loss[loss=3.279, NarTop10Accuracy=0.6698, over 5784.91 frames. ], batch size: 7, lr: 7.03e-03 +2024-08-06 17:07:17,645 INFO [trainer.py:765] (4/8) Epoch 12, batch 2500, train_loss[loss=3.253, NarTop10Accuracy=0.6701, over 5115.00 frames. ], tot_loss[loss=3.252, NarTop10Accuracy=0.6749, over 5476.30 frames. ], batch size: 7, lr: 7.02e-03 +2024-08-06 17:07:37,805 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 17:08:40,079 INFO [trainer.py:765] (4/8) Epoch 13, batch 100, train_loss[loss=3.003, NarTop10Accuracy=0.7316, over 7326.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6663, over 2369.18 frames. ], batch size: 31, lr: 6.73e-03 +2024-08-06 17:09:14,120 INFO [trainer.py:765] (4/8) Epoch 13, batch 200, train_loss[loss=3.007, NarTop10Accuracy=0.7242, over 6750.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6673, over 3868.52 frames. ], batch size: 17, lr: 6.72e-03 +2024-08-06 17:09:46,277 INFO [trainer.py:765] (4/8) Epoch 13, batch 300, train_loss[loss=3.507, NarTop10Accuracy=0.6221, over 7257.00 frames. ], tot_loss[loss=3.263, NarTop10Accuracy=0.6736, over 4672.02 frames. ], batch size: 23, lr: 6.71e-03 +2024-08-06 17:10:19,164 INFO [trainer.py:765] (4/8) Epoch 13, batch 400, train_loss[loss=2.906, NarTop10Accuracy=0.7415, over 5085.00 frames. ], tot_loss[loss=3.245, NarTop10Accuracy=0.6771, over 5122.41 frames. ], batch size: 7, lr: 6.70e-03 +2024-08-06 17:10:49,335 INFO [trainer.py:765] (4/8) Epoch 13, batch 500, train_loss[loss=3.207, NarTop10Accuracy=0.6817, over 5934.00 frames. ], tot_loss[loss=3.243, NarTop10Accuracy=0.6776, over 5410.04 frames. ], batch size: 11, lr: 6.69e-03 +2024-08-06 17:11:26,245 INFO [trainer.py:765] (4/8) Epoch 13, batch 600, train_loss[loss=3, NarTop10Accuracy=0.7232, over 5748.00 frames. ], tot_loss[loss=3.241, NarTop10Accuracy=0.6783, over 5658.19 frames. ], batch size: 9, lr: 6.68e-03 +2024-08-06 17:11:57,381 INFO [trainer.py:765] (4/8) Epoch 13, batch 700, train_loss[loss=3.154, NarTop10Accuracy=0.6898, over 4941.00 frames. ], tot_loss[loss=3.245, NarTop10Accuracy=0.6774, over 5722.69 frames. ], batch size: 6, lr: 6.67e-03 +2024-08-06 17:12:33,442 INFO [trainer.py:765] (4/8) Epoch 13, batch 800, train_loss[loss=2.949, NarTop10Accuracy=0.7427, over 4401.00 frames. ], tot_loss[loss=3.25, NarTop10Accuracy=0.6761, over 5782.43 frames. ], batch size: 5, lr: 6.66e-03 +2024-08-06 17:13:10,031 INFO [trainer.py:765] (4/8) Epoch 13, batch 900, train_loss[loss=3.278, NarTop10Accuracy=0.6694, over 6741.00 frames. ], tot_loss[loss=3.244, NarTop10Accuracy=0.6774, over 5792.31 frames. ], batch size: 14, lr: 6.65e-03 +2024-08-06 17:13:41,443 INFO [trainer.py:765] (4/8) Epoch 13, batch 1000, train_loss[loss=3.543, NarTop10Accuracy=0.6139, over 6687.00 frames. ], tot_loss[loss=3.25, NarTop10Accuracy=0.6763, over 5892.48 frames. ], batch size: 14, lr: 6.64e-03 +2024-08-06 17:14:15,537 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 17:14:23,644 INFO [trainer.py:811] (4/8) Epoch 13, validation: loss=3.099, NarTop10Accuracy=0.7062, over 1905321.00 frames. +2024-08-06 17:14:23,645 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 17:14:24,470 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 1.948e+02 2.091e+02 2.295e+02 3.353e+02, threshold=4.181e+02, percent-clipped=0.0 +2024-08-06 17:14:26,697 INFO [trainer.py:765] (4/8) Epoch 13, batch 1100, train_loss[loss=3.426, NarTop10Accuracy=0.6408, over 6681.00 frames. ], tot_loss[loss=3.256, NarTop10Accuracy=0.6749, over 5920.68 frames. ], batch size: 17, lr: 6.63e-03 +2024-08-06 17:15:03,475 INFO [trainer.py:765] (4/8) Epoch 13, batch 1200, train_loss[loss=3.47, NarTop10Accuracy=0.6322, over 7119.00 frames. ], tot_loss[loss=3.257, NarTop10Accuracy=0.6746, over 5918.74 frames. ], batch size: 31, lr: 6.62e-03 +2024-08-06 17:15:35,514 INFO [trainer.py:765] (4/8) Epoch 13, batch 1300, train_loss[loss=3.024, NarTop10Accuracy=0.7153, over 4311.00 frames. ], tot_loss[loss=3.258, NarTop10Accuracy=0.6744, over 5995.26 frames. ], batch size: 5, lr: 6.61e-03 +2024-08-06 17:16:11,783 INFO [trainer.py:765] (4/8) Epoch 13, batch 1400, train_loss[loss=3.059, NarTop10Accuracy=0.713, over 6138.00 frames. ], tot_loss[loss=3.259, NarTop10Accuracy=0.6742, over 6005.76 frames. ], batch size: 11, lr: 6.60e-03 +2024-08-06 17:16:39,788 INFO [trainer.py:765] (4/8) Epoch 13, batch 1500, train_loss[loss=3.583, NarTop10Accuracy=0.6047, over 5718.00 frames. ], tot_loss[loss=3.255, NarTop10Accuracy=0.675, over 5935.15 frames. ], batch size: 51, lr: 6.59e-03 +2024-08-06 17:17:07,603 INFO [trainer.py:765] (4/8) Epoch 13, batch 1600, train_loss[loss=3.039, NarTop10Accuracy=0.7277, over 7035.00 frames. ], tot_loss[loss=3.268, NarTop10Accuracy=0.6726, over 5929.74 frames. ], batch size: 22, lr: 6.58e-03 +2024-08-06 17:17:34,259 INFO [trainer.py:765] (4/8) Epoch 13, batch 1700, train_loss[loss=3.138, NarTop10Accuracy=0.6805, over 6636.00 frames. ], tot_loss[loss=3.262, NarTop10Accuracy=0.6737, over 5901.16 frames. ], batch size: 14, lr: 6.57e-03 +2024-08-06 17:18:00,762 INFO [trainer.py:765] (4/8) Epoch 13, batch 1800, train_loss[loss=3.086, NarTop10Accuracy=0.7071, over 6867.00 frames. ], tot_loss[loss=3.256, NarTop10Accuracy=0.6752, over 5980.53 frames. ], batch size: 22, lr: 6.56e-03 +2024-08-06 17:18:27,244 INFO [trainer.py:765] (4/8) Epoch 13, batch 1900, train_loss[loss=3.547, NarTop10Accuracy=0.6149, over 5835.00 frames. ], tot_loss[loss=3.251, NarTop10Accuracy=0.676, over 6033.26 frames. ], batch size: 50, lr: 6.55e-03 +2024-08-06 17:18:52,777 INFO [trainer.py:765] (4/8) Epoch 13, batch 2000, train_loss[loss=3.574, NarTop10Accuracy=0.6157, over 6267.00 frames. ], tot_loss[loss=3.237, NarTop10Accuracy=0.6787, over 6001.07 frames. ], batch size: 50, lr: 6.54e-03 +2024-08-06 17:19:18,147 INFO [trainer.py:765] (4/8) Epoch 13, batch 2100, train_loss[loss=2.968, NarTop10Accuracy=0.7355, over 3879.00 frames. ], tot_loss[loss=3.236, NarTop10Accuracy=0.6788, over 5958.88 frames. ], batch size: 4, lr: 6.53e-03 +2024-08-06 17:19:43,412 INFO [trainer.py:765] (4/8) Epoch 13, batch 2200, train_loss[loss=3.433, NarTop10Accuracy=0.6338, over 7071.00 frames. ], tot_loss[loss=3.243, NarTop10Accuracy=0.6768, over 5997.10 frames. ], batch size: 31, lr: 6.52e-03 +2024-08-06 17:20:08,543 INFO [trainer.py:765] (4/8) Epoch 13, batch 2300, train_loss[loss=3.712, NarTop10Accuracy=0.575, over 5706.00 frames. ], tot_loss[loss=3.26, NarTop10Accuracy=0.6732, over 6007.78 frames. ], batch size: 9, lr: 6.51e-03 +2024-08-06 17:20:32,939 INFO [trainer.py:765] (4/8) Epoch 13, batch 2400, train_loss[loss=3.584, NarTop10Accuracy=0.6043, over 5247.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6788, over 5760.10 frames. ], batch size: 7, lr: 6.50e-03 +2024-08-06 17:20:56,410 INFO [trainer.py:765] (4/8) Epoch 13, batch 2500, train_loss[loss=3.482, NarTop10Accuracy=0.6257, over 5235.00 frames. ], tot_loss[loss=3.218, NarTop10Accuracy=0.6816, over 5473.60 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 17:21:16,322 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 17:22:19,315 INFO [trainer.py:765] (4/8) Epoch 14, batch 100, train_loss[loss=2.96, NarTop10Accuracy=0.735, over 7314.00 frames. ], tot_loss[loss=3.209, NarTop10Accuracy=0.6844, over 2392.00 frames. ], batch size: 31, lr: 6.24e-03 +2024-08-06 17:22:50,377 INFO [trainer.py:765] (4/8) Epoch 14, batch 200, train_loss[loss=3.282, NarTop10Accuracy=0.6663, over 6843.00 frames. ], tot_loss[loss=3.229, NarTop10Accuracy=0.6796, over 3864.48 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 17:23:23,879 INFO [trainer.py:765] (4/8) Epoch 14, batch 300, train_loss[loss=3.181, NarTop10Accuracy=0.6922, over 7065.00 frames. ], tot_loss[loss=3.205, NarTop10Accuracy=0.6849, over 4664.16 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 17:23:57,485 INFO [trainer.py:765] (4/8) Epoch 14, batch 400, train_loss[loss=3.003, NarTop10Accuracy=0.7231, over 5169.00 frames. ], tot_loss[loss=3.223, NarTop10Accuracy=0.6809, over 5110.03 frames. ], batch size: 7, lr: 6.22e-03 +2024-08-06 17:24:32,113 INFO [trainer.py:765] (4/8) Epoch 14, batch 500, train_loss[loss=3.362, NarTop10Accuracy=0.6493, over 5994.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6793, over 5390.55 frames. ], batch size: 11, lr: 6.21e-03 +2024-08-06 17:24:36,213 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 17:24:44,275 INFO [trainer.py:811] (4/8) Epoch 14, validation: loss=3.004, NarTop10Accuracy=0.726, over 1905321.00 frames. +2024-08-06 17:24:44,275 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 17:24:44,822 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 1.969e+02 2.114e+02 2.287e+02 4.406e+02, threshold=4.227e+02, percent-clipped=0.1 +2024-08-06 17:25:12,913 INFO [trainer.py:765] (4/8) Epoch 14, batch 600, train_loss[loss=2.879, NarTop10Accuracy=0.7499, over 5652.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6791, over 5644.01 frames. ], batch size: 9, lr: 6.20e-03 +2024-08-06 17:25:48,547 INFO [trainer.py:765] (4/8) Epoch 14, batch 700, train_loss[loss=3.463, NarTop10Accuracy=0.6359, over 4311.00 frames. ], tot_loss[loss=3.223, NarTop10Accuracy=0.6809, over 5724.56 frames. ], batch size: 5, lr: 6.19e-03 +2024-08-06 17:26:25,278 INFO [trainer.py:765] (4/8) Epoch 14, batch 800, train_loss[loss=2.93, NarTop10Accuracy=0.741, over 4287.00 frames. ], tot_loss[loss=3.212, NarTop10Accuracy=0.6833, over 5780.08 frames. ], batch size: 5, lr: 6.18e-03 +2024-08-06 17:26:57,658 INFO [trainer.py:765] (4/8) Epoch 14, batch 900, train_loss[loss=3.283, NarTop10Accuracy=0.6646, over 6093.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.6841, over 5801.50 frames. ], batch size: 13, lr: 6.17e-03 +2024-08-06 17:27:31,715 INFO [trainer.py:765] (4/8) Epoch 14, batch 1000, train_loss[loss=3.49, NarTop10Accuracy=0.6236, over 6243.00 frames. ], tot_loss[loss=3.221, NarTop10Accuracy=0.681, over 5915.19 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 17:28:11,596 INFO [trainer.py:765] (4/8) Epoch 14, batch 1100, train_loss[loss=3.023, NarTop10Accuracy=0.7261, over 6834.00 frames. ], tot_loss[loss=3.219, NarTop10Accuracy=0.6818, over 5946.71 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 17:28:40,732 INFO [trainer.py:765] (4/8) Epoch 14, batch 1200, train_loss[loss=3.466, NarTop10Accuracy=0.6328, over 7056.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.6809, over 5907.99 frames. ], batch size: 31, lr: 6.15e-03 +2024-08-06 17:29:16,213 INFO [trainer.py:765] (4/8) Epoch 14, batch 1300, train_loss[loss=3.473, NarTop10Accuracy=0.6219, over 5079.00 frames. ], tot_loss[loss=3.22, NarTop10Accuracy=0.6816, over 5974.44 frames. ], batch size: 6, lr: 6.14e-03 +2024-08-06 17:29:54,601 INFO [trainer.py:765] (4/8) Epoch 14, batch 1400, train_loss[loss=3.296, NarTop10Accuracy=0.6698, over 6015.00 frames. ], tot_loss[loss=3.23, NarTop10Accuracy=0.6798, over 6004.61 frames. ], batch size: 11, lr: 6.13e-03 +2024-08-06 17:30:25,314 INFO [trainer.py:765] (4/8) Epoch 14, batch 1500, train_loss[loss=3.804, NarTop10Accuracy=0.5648, over 6189.00 frames. ], tot_loss[loss=3.241, NarTop10Accuracy=0.6775, over 5952.03 frames. ], batch size: 50, lr: 6.12e-03 +2024-08-06 17:30:53,041 INFO [trainer.py:765] (4/8) Epoch 14, batch 1600, train_loss[loss=2.976, NarTop10Accuracy=0.7259, over 6987.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6795, over 5945.95 frames. ], batch size: 22, lr: 6.11e-03 +2024-08-06 17:31:19,727 INFO [trainer.py:765] (4/8) Epoch 14, batch 1700, train_loss[loss=3.061, NarTop10Accuracy=0.7205, over 6663.00 frames. ], tot_loss[loss=3.213, NarTop10Accuracy=0.6837, over 5940.04 frames. ], batch size: 14, lr: 6.10e-03 +2024-08-06 17:31:46,288 INFO [trainer.py:765] (4/8) Epoch 14, batch 1800, train_loss[loss=3.04, NarTop10Accuracy=0.7166, over 7041.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6879, over 5985.90 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 17:32:12,726 INFO [trainer.py:765] (4/8) Epoch 14, batch 1900, train_loss[loss=3.729, NarTop10Accuracy=0.5711, over 5985.00 frames. ], tot_loss[loss=3.204, NarTop10Accuracy=0.6857, over 6044.73 frames. ], batch size: 51, lr: 6.09e-03 +2024-08-06 17:32:38,281 INFO [trainer.py:765] (4/8) Epoch 14, batch 2000, train_loss[loss=3.257, NarTop10Accuracy=0.6796, over 6087.00 frames. ], tot_loss[loss=3.215, NarTop10Accuracy=0.6831, over 6027.79 frames. ], batch size: 50, lr: 6.08e-03 +2024-08-06 17:33:03,645 INFO [trainer.py:765] (4/8) Epoch 14, batch 2100, train_loss[loss=2.974, NarTop10Accuracy=0.7372, over 4854.00 frames. ], tot_loss[loss=3.218, NarTop10Accuracy=0.6824, over 6000.53 frames. ], batch size: 5, lr: 6.07e-03 +2024-08-06 17:33:28,997 INFO [trainer.py:765] (4/8) Epoch 14, batch 2200, train_loss[loss=3.249, NarTop10Accuracy=0.6774, over 7470.00 frames. ], tot_loss[loss=3.219, NarTop10Accuracy=0.6823, over 6018.33 frames. ], batch size: 31, lr: 6.06e-03 +2024-08-06 17:33:54,085 INFO [trainer.py:765] (4/8) Epoch 14, batch 2300, train_loss[loss=2.834, NarTop10Accuracy=0.7621, over 5703.00 frames. ], tot_loss[loss=3.234, NarTop10Accuracy=0.6794, over 6021.89 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 17:34:18,533 INFO [trainer.py:765] (4/8) Epoch 14, batch 2400, train_loss[loss=2.869, NarTop10Accuracy=0.7596, over 5223.00 frames. ], tot_loss[loss=3.234, NarTop10Accuracy=0.6788, over 5761.86 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:42,115 INFO [trainer.py:765] (4/8) Epoch 14, batch 2500, train_loss[loss=2.956, NarTop10Accuracy=0.7338, over 5073.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.6857, over 5479.95 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:45,394 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 17:34:53,209 INFO [trainer.py:811] (4/8) Epoch 14, validation: loss=3.062, NarTop10Accuracy=0.7136, over 1905321.00 frames. +2024-08-06 17:34:53,209 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 17:34:53,679 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 1.975e+02 2.132e+02 2.304e+02 3.875e+02, threshold=4.265e+02, percent-clipped=0.0 +2024-08-06 17:35:09,645 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 17:36:11,738 INFO [trainer.py:765] (4/8) Epoch 15, batch 100, train_loss[loss=3.109, NarTop10Accuracy=0.7027, over 7182.00 frames. ], tot_loss[loss=3.212, NarTop10Accuracy=0.6836, over 2350.11 frames. ], batch size: 31, lr: 5.82e-03 +2024-08-06 17:36:44,334 INFO [trainer.py:765] (4/8) Epoch 15, batch 200, train_loss[loss=3.546, NarTop10Accuracy=0.6146, over 6660.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6882, over 3829.15 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 17:37:17,714 INFO [trainer.py:765] (4/8) Epoch 15, batch 300, train_loss[loss=3.314, NarTop10Accuracy=0.667, over 6972.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6879, over 4658.30 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 17:37:48,904 INFO [trainer.py:765] (4/8) Epoch 15, batch 400, train_loss[loss=2.985, NarTop10Accuracy=0.7338, over 5790.00 frames. ], tot_loss[loss=3.187, NarTop10Accuracy=0.6891, over 5121.25 frames. ], batch size: 8, lr: 5.80e-03 +2024-08-06 17:38:22,354 INFO [trainer.py:765] (4/8) Epoch 15, batch 500, train_loss[loss=2.865, NarTop10Accuracy=0.7507, over 5985.00 frames. ], tot_loss[loss=3.187, NarTop10Accuracy=0.689, over 5372.96 frames. ], batch size: 11, lr: 5.79e-03 +2024-08-06 17:38:53,093 INFO [trainer.py:765] (4/8) Epoch 15, batch 600, train_loss[loss=2.899, NarTop10Accuracy=0.7491, over 5664.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.687, over 5623.54 frames. ], batch size: 9, lr: 5.78e-03 +2024-08-06 17:39:27,922 INFO [trainer.py:765] (4/8) Epoch 15, batch 700, train_loss[loss=2.89, NarTop10Accuracy=0.7521, over 5118.00 frames. ], tot_loss[loss=3.202, NarTop10Accuracy=0.6857, over 5723.01 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 17:40:05,564 INFO [trainer.py:765] (4/8) Epoch 15, batch 800, train_loss[loss=3.359, NarTop10Accuracy=0.6472, over 5040.00 frames. ], tot_loss[loss=3.228, NarTop10Accuracy=0.6801, over 5765.60 frames. ], batch size: 6, lr: 5.76e-03 +2024-08-06 17:40:35,791 INFO [trainer.py:765] (4/8) Epoch 15, batch 900, train_loss[loss=3.354, NarTop10Accuracy=0.6533, over 6159.00 frames. ], tot_loss[loss=3.206, NarTop10Accuracy=0.6844, over 5793.28 frames. ], batch size: 13, lr: 5.76e-03 +2024-08-06 17:41:11,251 INFO [trainer.py:765] (4/8) Epoch 15, batch 1000, train_loss[loss=3.041, NarTop10Accuracy=0.7115, over 6567.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6872, over 5896.83 frames. ], batch size: 14, lr: 5.75e-03 +2024-08-06 17:41:46,452 INFO [trainer.py:765] (4/8) Epoch 15, batch 1100, train_loss[loss=3.078, NarTop10Accuracy=0.7015, over 6807.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.6871, over 5945.70 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 17:42:19,456 INFO [trainer.py:765] (4/8) Epoch 15, batch 1200, train_loss[loss=3.334, NarTop10Accuracy=0.6545, over 6978.00 frames. ], tot_loss[loss=3.227, NarTop10Accuracy=0.6799, over 5938.76 frames. ], batch size: 31, lr: 5.73e-03 +2024-08-06 17:42:54,427 INFO [trainer.py:765] (4/8) Epoch 15, batch 1300, train_loss[loss=2.965, NarTop10Accuracy=0.7255, over 5157.00 frames. ], tot_loss[loss=3.21, NarTop10Accuracy=0.6834, over 6002.21 frames. ], batch size: 6, lr: 5.73e-03 +2024-08-06 17:43:26,607 INFO [trainer.py:765] (4/8) Epoch 15, batch 1400, train_loss[loss=3.471, NarTop10Accuracy=0.6253, over 6078.00 frames. ], tot_loss[loss=3.215, NarTop10Accuracy=0.6824, over 6016.80 frames. ], batch size: 11, lr: 5.72e-03 +2024-08-06 17:43:56,558 INFO [trainer.py:765] (4/8) Epoch 15, batch 1500, train_loss[loss=3.077, NarTop10Accuracy=0.709, over 6159.00 frames. ], tot_loss[loss=3.217, NarTop10Accuracy=0.682, over 5961.03 frames. ], batch size: 50, lr: 5.71e-03 +2024-08-06 17:44:24,241 INFO [trainer.py:765] (4/8) Epoch 15, batch 1600, train_loss[loss=3.616, NarTop10Accuracy=0.5989, over 7356.00 frames. ], tot_loss[loss=3.198, NarTop10Accuracy=0.686, over 5936.46 frames. ], batch size: 23, lr: 5.70e-03 +2024-08-06 17:44:50,856 INFO [trainer.py:765] (4/8) Epoch 15, batch 1700, train_loss[loss=3.015, NarTop10Accuracy=0.7106, over 6120.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.6869, over 5918.77 frames. ], batch size: 13, lr: 5.70e-03 +2024-08-06 17:45:17,293 INFO [trainer.py:765] (4/8) Epoch 15, batch 1800, train_loss[loss=3.138, NarTop10Accuracy=0.6962, over 7518.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.6883, over 5989.08 frames. ], batch size: 23, lr: 5.69e-03 +2024-08-06 17:45:43,679 INFO [trainer.py:765] (4/8) Epoch 15, batch 1900, train_loss[loss=3.186, NarTop10Accuracy=0.6899, over 6339.00 frames. ], tot_loss[loss=3.216, NarTop10Accuracy=0.6826, over 6031.20 frames. ], batch size: 50, lr: 5.68e-03 +2024-08-06 17:45:53,540 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 17:46:01,743 INFO [trainer.py:811] (4/8) Epoch 15, validation: loss=3.006, NarTop10Accuracy=0.725, over 1905321.00 frames. +2024-08-06 17:46:01,743 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 17:46:02,216 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.004e+02 2.149e+02 2.324e+02 3.721e+02, threshold=4.298e+02, percent-clipped=0.0 +2024-08-06 17:46:17,371 INFO [trainer.py:765] (4/8) Epoch 15, batch 2000, train_loss[loss=3.258, NarTop10Accuracy=0.6744, over 5493.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.6857, over 6005.19 frames. ], batch size: 50, lr: 5.67e-03 +2024-08-06 17:46:42,773 INFO [trainer.py:765] (4/8) Epoch 15, batch 2100, train_loss[loss=3.194, NarTop10Accuracy=0.6856, over 4023.00 frames. ], tot_loss[loss=3.204, NarTop10Accuracy=0.6848, over 5971.54 frames. ], batch size: 4, lr: 5.67e-03 +2024-08-06 17:47:08,032 INFO [trainer.py:765] (4/8) Epoch 15, batch 2200, train_loss[loss=3.056, NarTop10Accuracy=0.7148, over 7140.00 frames. ], tot_loss[loss=3.204, NarTop10Accuracy=0.6851, over 6005.32 frames. ], batch size: 31, lr: 5.66e-03 +2024-08-06 17:47:33,291 INFO [trainer.py:765] (4/8) Epoch 15, batch 2300, train_loss[loss=3.515, NarTop10Accuracy=0.6157, over 5727.00 frames. ], tot_loss[loss=3.205, NarTop10Accuracy=0.6849, over 6024.94 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 17:47:57,640 INFO [trainer.py:765] (4/8) Epoch 15, batch 2400, train_loss[loss=3.48, NarTop10Accuracy=0.6346, over 5070.00 frames. ], tot_loss[loss=3.187, NarTop10Accuracy=0.6887, over 5792.62 frames. ], batch size: 7, lr: 5.65e-03 +2024-08-06 17:48:21,161 INFO [trainer.py:765] (4/8) Epoch 15, batch 2500, train_loss[loss=2.972, NarTop10Accuracy=0.7382, over 5082.00 frames. ], tot_loss[loss=3.17, NarTop10Accuracy=0.6918, over 5482.78 frames. ], batch size: 7, lr: 5.64e-03 +2024-08-06 17:48:41,525 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 17:49:41,220 INFO [trainer.py:765] (4/8) Epoch 16, batch 100, train_loss[loss=3.469, NarTop10Accuracy=0.6372, over 7191.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6961, over 2359.93 frames. ], batch size: 31, lr: 5.45e-03 +2024-08-06 17:50:12,156 INFO [trainer.py:765] (4/8) Epoch 16, batch 200, train_loss[loss=2.98, NarTop10Accuracy=0.7327, over 6840.00 frames. ], tot_loss[loss=3.204, NarTop10Accuracy=0.6845, over 3845.81 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 17:50:45,158 INFO [trainer.py:765] (4/8) Epoch 16, batch 300, train_loss[loss=3.127, NarTop10Accuracy=0.702, over 7092.00 frames. ], tot_loss[loss=3.198, NarTop10Accuracy=0.6859, over 4652.18 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 17:51:15,975 INFO [trainer.py:765] (4/8) Epoch 16, batch 400, train_loss[loss=3.433, NarTop10Accuracy=0.6259, over 5136.00 frames. ], tot_loss[loss=3.195, NarTop10Accuracy=0.6865, over 5111.50 frames. ], batch size: 7, lr: 5.43e-03 +2024-08-06 17:51:50,322 INFO [trainer.py:765] (4/8) Epoch 16, batch 500, train_loss[loss=3.036, NarTop10Accuracy=0.7213, over 5973.00 frames. ], tot_loss[loss=3.186, NarTop10Accuracy=0.6884, over 5385.51 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 17:52:24,250 INFO [trainer.py:765] (4/8) Epoch 16, batch 600, train_loss[loss=3.005, NarTop10Accuracy=0.729, over 5733.00 frames. ], tot_loss[loss=3.195, NarTop10Accuracy=0.6864, over 5655.47 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 17:52:55,385 INFO [trainer.py:765] (4/8) Epoch 16, batch 700, train_loss[loss=2.857, NarTop10Accuracy=0.7487, over 4935.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6874, over 5732.71 frames. ], batch size: 6, lr: 5.41e-03 +2024-08-06 17:53:33,814 INFO [trainer.py:765] (4/8) Epoch 16, batch 800, train_loss[loss=2.93, NarTop10Accuracy=0.7286, over 5232.00 frames. ], tot_loss[loss=3.182, NarTop10Accuracy=0.6896, over 5798.94 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 17:54:03,922 INFO [trainer.py:765] (4/8) Epoch 16, batch 900, train_loss[loss=3.519, NarTop10Accuracy=0.6184, over 6234.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6914, over 5813.10 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 17:54:37,606 INFO [trainer.py:765] (4/8) Epoch 16, batch 1000, train_loss[loss=2.989, NarTop10Accuracy=0.7289, over 6255.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6933, over 5892.64 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 17:55:17,195 INFO [trainer.py:765] (4/8) Epoch 16, batch 1100, train_loss[loss=3.203, NarTop10Accuracy=0.6821, over 6933.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.6867, over 5934.88 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 17:55:46,208 INFO [trainer.py:765] (4/8) Epoch 16, batch 1200, train_loss[loss=3.491, NarTop10Accuracy=0.6208, over 7206.00 frames. ], tot_loss[loss=3.205, NarTop10Accuracy=0.6846, over 5912.06 frames. ], batch size: 31, lr: 5.37e-03 +2024-08-06 17:56:22,774 INFO [trainer.py:765] (4/8) Epoch 16, batch 1300, train_loss[loss=3.245, NarTop10Accuracy=0.6746, over 4989.00 frames. ], tot_loss[loss=3.197, NarTop10Accuracy=0.686, over 5985.80 frames. ], batch size: 6, lr: 5.37e-03 +2024-08-06 17:56:44,647 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 17:56:53,428 INFO [trainer.py:811] (4/8) Epoch 16, validation: loss=3.112, NarTop10Accuracy=0.703, over 1905321.00 frames. +2024-08-06 17:56:53,429 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 17:56:54,007 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 1.974e+02 2.136e+02 2.310e+02 5.351e+02, threshold=4.271e+02, percent-clipped=0.2 +2024-08-06 17:57:06,171 INFO [trainer.py:765] (4/8) Epoch 16, batch 1400, train_loss[loss=3.151, NarTop10Accuracy=0.6955, over 6183.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.6879, over 6017.14 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 17:57:34,033 INFO [trainer.py:765] (4/8) Epoch 16, batch 1500, train_loss[loss=3.386, NarTop10Accuracy=0.6482, over 5556.00 frames. ], tot_loss[loss=3.185, NarTop10Accuracy=0.6887, over 5948.60 frames. ], batch size: 50, lr: 5.35e-03 +2024-08-06 17:58:01,775 INFO [trainer.py:765] (4/8) Epoch 16, batch 1600, train_loss[loss=2.981, NarTop10Accuracy=0.7252, over 7095.00 frames. ], tot_loss[loss=3.182, NarTop10Accuracy=0.6893, over 5917.08 frames. ], batch size: 22, lr: 5.35e-03 +2024-08-06 17:58:28,475 INFO [trainer.py:765] (4/8) Epoch 16, batch 1700, train_loss[loss=2.97, NarTop10Accuracy=0.7414, over 6540.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.687, over 5912.40 frames. ], batch size: 14, lr: 5.34e-03 +2024-08-06 17:58:54,976 INFO [trainer.py:765] (4/8) Epoch 16, batch 1800, train_loss[loss=3.04, NarTop10Accuracy=0.714, over 7164.00 frames. ], tot_loss[loss=3.179, NarTop10Accuracy=0.6902, over 5981.10 frames. ], batch size: 22, lr: 5.33e-03 +2024-08-06 17:59:21,360 INFO [trainer.py:765] (4/8) Epoch 16, batch 1900, train_loss[loss=3.469, NarTop10Accuracy=0.6325, over 6132.00 frames. ], tot_loss[loss=3.206, NarTop10Accuracy=0.6849, over 6004.84 frames. ], batch size: 50, lr: 5.33e-03 +2024-08-06 17:59:46,857 INFO [trainer.py:765] (4/8) Epoch 16, batch 2000, train_loss[loss=3.091, NarTop10Accuracy=0.7086, over 6429.00 frames. ], tot_loss[loss=3.181, NarTop10Accuracy=0.6901, over 5985.90 frames. ], batch size: 52, lr: 5.32e-03 +2024-08-06 18:00:12,117 INFO [trainer.py:765] (4/8) Epoch 16, batch 2100, train_loss[loss=3.382, NarTop10Accuracy=0.6419, over 4926.00 frames. ], tot_loss[loss=3.204, NarTop10Accuracy=0.6851, over 5970.91 frames. ], batch size: 5, lr: 5.32e-03 +2024-08-06 18:00:37,334 INFO [trainer.py:765] (4/8) Epoch 16, batch 2200, train_loss[loss=3.201, NarTop10Accuracy=0.686, over 7395.00 frames. ], tot_loss[loss=3.22, NarTop10Accuracy=0.6812, over 5996.05 frames. ], batch size: 31, lr: 5.31e-03 +2024-08-06 18:01:02,502 INFO [trainer.py:765] (4/8) Epoch 16, batch 2300, train_loss[loss=2.957, NarTop10Accuracy=0.7312, over 5682.00 frames. ], tot_loss[loss=3.218, NarTop10Accuracy=0.6821, over 6012.96 frames. ], batch size: 9, lr: 5.30e-03 +2024-08-06 18:01:26,884 INFO [trainer.py:765] (4/8) Epoch 16, batch 2400, train_loss[loss=2.958, NarTop10Accuracy=0.7378, over 5133.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6873, over 5769.63 frames. ], batch size: 7, lr: 5.30e-03 +2024-08-06 18:01:50,406 INFO [trainer.py:765] (4/8) Epoch 16, batch 2500, train_loss[loss=3.076, NarTop10Accuracy=0.7211, over 4974.00 frames. ], tot_loss[loss=3.17, NarTop10Accuracy=0.6919, over 5457.92 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 18:02:10,325 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 18:03:08,530 INFO [trainer.py:765] (4/8) Epoch 17, batch 100, train_loss[loss=3.182, NarTop10Accuracy=0.6836, over 7341.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.699, over 2359.24 frames. ], batch size: 32, lr: 5.12e-03 +2024-08-06 18:03:45,145 INFO [trainer.py:765] (4/8) Epoch 17, batch 200, train_loss[loss=3.378, NarTop10Accuracy=0.6556, over 6774.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6946, over 3840.47 frames. ], batch size: 17, lr: 5.12e-03 +2024-08-06 18:04:19,590 INFO [trainer.py:765] (4/8) Epoch 17, batch 300, train_loss[loss=3.318, NarTop10Accuracy=0.6642, over 7053.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.691, over 4644.19 frames. ], batch size: 22, lr: 5.11e-03 +2024-08-06 18:04:48,401 INFO [trainer.py:765] (4/8) Epoch 17, batch 400, train_loss[loss=3.34, NarTop10Accuracy=0.6667, over 5130.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6919, over 5086.87 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 18:05:24,680 INFO [trainer.py:765] (4/8) Epoch 17, batch 500, train_loss[loss=2.869, NarTop10Accuracy=0.7528, over 6042.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6947, over 5375.87 frames. ], batch size: 11, lr: 5.10e-03 +2024-08-06 18:05:58,739 INFO [trainer.py:765] (4/8) Epoch 17, batch 600, train_loss[loss=3.128, NarTop10Accuracy=0.7005, over 5661.00 frames. ], tot_loss[loss=3.167, NarTop10Accuracy=0.692, over 5649.46 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 18:06:32,474 INFO [trainer.py:765] (4/8) Epoch 17, batch 700, train_loss[loss=3.099, NarTop10Accuracy=0.7032, over 4932.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6914, over 5722.54 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 18:07:02,723 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 18:07:10,763 INFO [trainer.py:811] (4/8) Epoch 17, validation: loss=3.018, NarTop10Accuracy=0.7223, over 1905321.00 frames. +2024-08-06 18:07:10,763 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 18:07:11,312 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.005e+02 2.161e+02 2.341e+02 3.806e+02, threshold=4.323e+02, percent-clipped=0.0 +2024-08-06 18:07:14,353 INFO [trainer.py:765] (4/8) Epoch 17, batch 800, train_loss[loss=3.056, NarTop10Accuracy=0.7064, over 4974.00 frames. ], tot_loss[loss=3.179, NarTop10Accuracy=0.6895, over 5764.47 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 18:07:49,721 INFO [trainer.py:765] (4/8) Epoch 17, batch 900, train_loss[loss=3.641, NarTop10Accuracy=0.5983, over 6834.00 frames. ], tot_loss[loss=3.162, NarTop10Accuracy=0.6937, over 5800.29 frames. ], batch size: 14, lr: 5.07e-03 +2024-08-06 18:08:21,598 INFO [trainer.py:765] (4/8) Epoch 17, batch 1000, train_loss[loss=3.228, NarTop10Accuracy=0.6782, over 6636.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.6915, over 5918.32 frames. ], batch size: 14, lr: 5.07e-03 +2024-08-06 18:09:03,106 INFO [trainer.py:765] (4/8) Epoch 17, batch 1100, train_loss[loss=2.987, NarTop10Accuracy=0.7317, over 6861.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6913, over 5958.86 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 18:09:36,746 INFO [trainer.py:765] (4/8) Epoch 17, batch 1200, train_loss[loss=3.137, NarTop10Accuracy=0.7013, over 7086.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.6913, over 5945.42 frames. ], batch size: 31, lr: 5.06e-03 +2024-08-06 18:10:10,689 INFO [trainer.py:765] (4/8) Epoch 17, batch 1300, train_loss[loss=3.157, NarTop10Accuracy=0.6878, over 5130.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.691, over 6016.86 frames. ], batch size: 6, lr: 5.05e-03 +2024-08-06 18:10:48,027 INFO [trainer.py:765] (4/8) Epoch 17, batch 1400, train_loss[loss=3.293, NarTop10Accuracy=0.6625, over 6069.00 frames. ], tot_loss[loss=3.179, NarTop10Accuracy=0.6896, over 6040.31 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 18:11:19,105 INFO [trainer.py:765] (4/8) Epoch 17, batch 1500, train_loss[loss=3.382, NarTop10Accuracy=0.6485, over 6426.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.6908, over 5994.62 frames. ], batch size: 50, lr: 5.04e-03 +2024-08-06 18:11:46,855 INFO [trainer.py:765] (4/8) Epoch 17, batch 1600, train_loss[loss=3.053, NarTop10Accuracy=0.7188, over 6915.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6936, over 5949.67 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 18:12:13,509 INFO [trainer.py:765] (4/8) Epoch 17, batch 1700, train_loss[loss=3.461, NarTop10Accuracy=0.6349, over 6135.00 frames. ], tot_loss[loss=3.178, NarTop10Accuracy=0.6899, over 5928.61 frames. ], batch size: 13, lr: 5.03e-03 +2024-08-06 18:12:40,002 INFO [trainer.py:765] (4/8) Epoch 17, batch 1800, train_loss[loss=3.003, NarTop10Accuracy=0.7269, over 7110.00 frames. ], tot_loss[loss=3.191, NarTop10Accuracy=0.6874, over 5984.00 frames. ], batch size: 22, lr: 5.02e-03 +2024-08-06 18:13:06,380 INFO [trainer.py:765] (4/8) Epoch 17, batch 1900, train_loss[loss=3.06, NarTop10Accuracy=0.7112, over 6234.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.6864, over 6023.04 frames. ], batch size: 50, lr: 5.01e-03 +2024-08-06 18:13:31,923 INFO [trainer.py:765] (4/8) Epoch 17, batch 2000, train_loss[loss=3.642, NarTop10Accuracy=0.5937, over 6303.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6916, over 5999.45 frames. ], batch size: 50, lr: 5.01e-03 +2024-08-06 18:13:57,228 INFO [trainer.py:765] (4/8) Epoch 17, batch 2100, train_loss[loss=3.091, NarTop10Accuracy=0.7081, over 3918.00 frames. ], tot_loss[loss=3.178, NarTop10Accuracy=0.6895, over 5970.73 frames. ], batch size: 4, lr: 5.00e-03 +2024-08-06 18:14:22,435 INFO [trainer.py:765] (4/8) Epoch 17, batch 2200, train_loss[loss=3.031, NarTop10Accuracy=0.7221, over 7464.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.6863, over 5992.56 frames. ], batch size: 32, lr: 5.00e-03 +2024-08-06 18:14:47,592 INFO [trainer.py:765] (4/8) Epoch 17, batch 2300, train_loss[loss=2.974, NarTop10Accuracy=0.7284, over 5556.00 frames. ], tot_loss[loss=3.187, NarTop10Accuracy=0.6879, over 6012.82 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 18:15:12,061 INFO [trainer.py:765] (4/8) Epoch 17, batch 2400, train_loss[loss=2.8, NarTop10Accuracy=0.7584, over 4938.00 frames. ], tot_loss[loss=3.181, NarTop10Accuracy=0.6892, over 5756.92 frames. ], batch size: 7, lr: 4.99e-03 +2024-08-06 18:15:35,515 INFO [trainer.py:765] (4/8) Epoch 17, batch 2500, train_loss[loss=2.918, NarTop10Accuracy=0.7484, over 4989.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6922, over 5464.80 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 18:15:54,872 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 18:16:49,907 INFO [trainer.py:765] (4/8) Epoch 18, batch 100, train_loss[loss=3.029, NarTop10Accuracy=0.7226, over 7365.00 frames. ], tot_loss[loss=3.177, NarTop10Accuracy=0.6901, over 2368.58 frames. ], batch size: 31, lr: 4.83e-03 +2024-08-06 18:17:24,749 INFO [trainer.py:765] (4/8) Epoch 18, batch 200, train_loss[loss=2.965, NarTop10Accuracy=0.7407, over 6648.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6936, over 3843.54 frames. ], batch size: 17, lr: 4.83e-03 +2024-08-06 18:17:27,716 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 18:17:35,926 INFO [trainer.py:811] (4/8) Epoch 18, validation: loss=3.062, NarTop10Accuracy=0.7137, over 1905321.00 frames. +2024-08-06 18:17:35,927 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 18:17:36,528 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.024e+02 2.164e+02 2.334e+02 7.024e+02, threshold=4.329e+02, percent-clipped=0.1 +2024-08-06 18:18:06,912 INFO [trainer.py:765] (4/8) Epoch 18, batch 300, train_loss[loss=3.472, NarTop10Accuracy=0.6257, over 6885.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6917, over 4635.62 frames. ], batch size: 22, lr: 4.82e-03 +2024-08-06 18:18:38,183 INFO [trainer.py:765] (4/8) Epoch 18, batch 400, train_loss[loss=3.439, NarTop10Accuracy=0.6397, over 5064.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6942, over 5094.89 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 18:19:13,599 INFO [trainer.py:765] (4/8) Epoch 18, batch 500, train_loss[loss=3.169, NarTop10Accuracy=0.6886, over 6048.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6941, over 5374.70 frames. ], batch size: 11, lr: 4.81e-03 +2024-08-06 18:19:48,151 INFO [trainer.py:765] (4/8) Epoch 18, batch 600, train_loss[loss=3.294, NarTop10Accuracy=0.6617, over 5679.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6968, over 5643.95 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 18:20:23,869 INFO [trainer.py:765] (4/8) Epoch 18, batch 700, train_loss[loss=3.568, NarTop10Accuracy=0.6091, over 5070.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.6948, over 5713.82 frames. ], batch size: 6, lr: 4.80e-03 +2024-08-06 18:21:01,026 INFO [trainer.py:765] (4/8) Epoch 18, batch 800, train_loss[loss=2.775, NarTop10Accuracy=0.7717, over 4959.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.692, over 5776.07 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 18:21:32,408 INFO [trainer.py:765] (4/8) Epoch 18, batch 900, train_loss[loss=2.808, NarTop10Accuracy=0.7621, over 6369.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6963, over 5793.71 frames. ], batch size: 13, lr: 4.79e-03 +2024-08-06 18:22:11,192 INFO [trainer.py:765] (4/8) Epoch 18, batch 1000, train_loss[loss=2.96, NarTop10Accuracy=0.7429, over 6702.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6932, over 5903.53 frames. ], batch size: 14, lr: 4.78e-03 +2024-08-06 18:22:46,969 INFO [trainer.py:765] (4/8) Epoch 18, batch 1100, train_loss[loss=3.371, NarTop10Accuracy=0.65, over 6870.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6932, over 5936.97 frames. ], batch size: 17, lr: 4.78e-03 +2024-08-06 18:23:18,605 INFO [trainer.py:765] (4/8) Epoch 18, batch 1200, train_loss[loss=3.548, NarTop10Accuracy=0.6113, over 7047.00 frames. ], tot_loss[loss=3.177, NarTop10Accuracy=0.6901, over 5916.77 frames. ], batch size: 31, lr: 4.77e-03 +2024-08-06 18:24:00,099 INFO [trainer.py:765] (4/8) Epoch 18, batch 1300, train_loss[loss=2.829, NarTop10Accuracy=0.7589, over 4377.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6936, over 5976.56 frames. ], batch size: 5, lr: 4.77e-03 +2024-08-06 18:24:29,574 INFO [trainer.py:765] (4/8) Epoch 18, batch 1400, train_loss[loss=2.922, NarTop10Accuracy=0.739, over 5976.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6943, over 6012.04 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 18:25:00,307 INFO [trainer.py:765] (4/8) Epoch 18, batch 1500, train_loss[loss=3.166, NarTop10Accuracy=0.6993, over 6912.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6959, over 5965.22 frames. ], batch size: 50, lr: 4.76e-03 +2024-08-06 18:25:28,085 INFO [trainer.py:765] (4/8) Epoch 18, batch 1600, train_loss[loss=3.003, NarTop10Accuracy=0.7275, over 6993.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.693, over 5943.60 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 18:25:54,687 INFO [trainer.py:765] (4/8) Epoch 18, batch 1700, train_loss[loss=3.045, NarTop10Accuracy=0.7206, over 6225.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6932, over 5909.36 frames. ], batch size: 13, lr: 4.75e-03 +2024-08-06 18:26:21,196 INFO [trainer.py:765] (4/8) Epoch 18, batch 1800, train_loss[loss=3.471, NarTop10Accuracy=0.6277, over 7221.00 frames. ], tot_loss[loss=3.156, NarTop10Accuracy=0.6946, over 5971.48 frames. ], batch size: 22, lr: 4.74e-03 +2024-08-06 18:26:47,567 INFO [trainer.py:765] (4/8) Epoch 18, batch 1900, train_loss[loss=3.151, NarTop10Accuracy=0.7008, over 5955.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6936, over 6011.46 frames. ], batch size: 50, lr: 4.74e-03 +2024-08-06 18:27:13,176 INFO [trainer.py:765] (4/8) Epoch 18, batch 2000, train_loss[loss=3.123, NarTop10Accuracy=0.71, over 6390.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6937, over 6005.72 frames. ], batch size: 51, lr: 4.73e-03 +2024-08-06 18:27:38,528 INFO [trainer.py:765] (4/8) Epoch 18, batch 2100, train_loss[loss=3.405, NarTop10Accuracy=0.6472, over 3927.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6952, over 5983.71 frames. ], batch size: 4, lr: 4.73e-03 +2024-08-06 18:28:03,812 INFO [trainer.py:765] (4/8) Epoch 18, batch 2200, train_loss[loss=3.236, NarTop10Accuracy=0.6777, over 7302.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6938, over 6024.12 frames. ], batch size: 32, lr: 4.72e-03 +2024-08-06 18:28:06,571 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 18:28:14,649 INFO [trainer.py:811] (4/8) Epoch 18, validation: loss=3.028, NarTop10Accuracy=0.7201, over 1905321.00 frames. +2024-08-06 18:28:14,650 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 18:28:15,147 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.054e+02 2.220e+02 2.384e+02 3.992e+02, threshold=4.441e+02, percent-clipped=0.0 +2024-08-06 18:28:37,097 INFO [trainer.py:765] (4/8) Epoch 18, batch 2300, train_loss[loss=2.902, NarTop10Accuracy=0.7464, over 5775.00 frames. ], tot_loss[loss=3.178, NarTop10Accuracy=0.69, over 6027.29 frames. ], batch size: 9, lr: 4.72e-03 +2024-08-06 18:29:01,592 INFO [trainer.py:765] (4/8) Epoch 18, batch 2400, train_loss[loss=2.992, NarTop10Accuracy=0.7298, over 5250.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6961, over 5778.92 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 18:29:25,027 INFO [trainer.py:765] (4/8) Epoch 18, batch 2500, train_loss[loss=2.952, NarTop10Accuracy=0.7342, over 5712.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7007, over 5470.41 frames. ], batch size: 8, lr: 4.71e-03 +2024-08-06 18:29:45,425 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 18:30:41,231 INFO [trainer.py:765] (4/8) Epoch 19, batch 100, train_loss[loss=2.9, NarTop10Accuracy=0.7513, over 7290.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.6921, over 2348.62 frames. ], batch size: 31, lr: 4.57e-03 +2024-08-06 18:31:15,602 INFO [trainer.py:765] (4/8) Epoch 19, batch 200, train_loss[loss=2.925, NarTop10Accuracy=0.743, over 6936.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6948, over 3864.35 frames. ], batch size: 17, lr: 4.57e-03 +2024-08-06 18:31:47,468 INFO [trainer.py:765] (4/8) Epoch 19, batch 300, train_loss[loss=3.624, NarTop10Accuracy=0.6026, over 7107.00 frames. ], tot_loss[loss=3.136, NarTop10Accuracy=0.6995, over 4659.53 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 18:32:20,355 INFO [trainer.py:765] (4/8) Epoch 19, batch 400, train_loss[loss=3.135, NarTop10Accuracy=0.7054, over 5139.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.6991, over 5115.54 frames. ], batch size: 7, lr: 4.56e-03 +2024-08-06 18:32:50,335 INFO [trainer.py:765] (4/8) Epoch 19, batch 500, train_loss[loss=3.04, NarTop10Accuracy=0.7218, over 6102.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6997, over 5376.75 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 18:33:29,609 INFO [trainer.py:765] (4/8) Epoch 19, batch 600, train_loss[loss=3.139, NarTop10Accuracy=0.702, over 5616.00 frames. ], tot_loss[loss=3.139, NarTop10Accuracy=0.6984, over 5639.53 frames. ], batch size: 9, lr: 4.55e-03 +2024-08-06 18:34:03,592 INFO [trainer.py:765] (4/8) Epoch 19, batch 700, train_loss[loss=2.843, NarTop10Accuracy=0.7609, over 4947.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.6955, over 5707.22 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 18:34:35,178 INFO [trainer.py:765] (4/8) Epoch 19, batch 800, train_loss[loss=3.217, NarTop10Accuracy=0.6856, over 5052.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6941, over 5760.31 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 18:35:10,262 INFO [trainer.py:765] (4/8) Epoch 19, batch 900, train_loss[loss=2.833, NarTop10Accuracy=0.766, over 6816.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6965, over 5784.60 frames. ], batch size: 14, lr: 4.53e-03 +2024-08-06 18:35:48,637 INFO [trainer.py:765] (4/8) Epoch 19, batch 1000, train_loss[loss=3.341, NarTop10Accuracy=0.6549, over 6075.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6963, over 5893.46 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 18:36:20,938 INFO [trainer.py:765] (4/8) Epoch 19, batch 1100, train_loss[loss=2.935, NarTop10Accuracy=0.7461, over 6873.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6941, over 5937.71 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 18:36:57,130 INFO [trainer.py:765] (4/8) Epoch 19, batch 1200, train_loss[loss=2.996, NarTop10Accuracy=0.7321, over 7149.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6925, over 5914.13 frames. ], batch size: 31, lr: 4.52e-03 +2024-08-06 18:37:35,314 INFO [trainer.py:765] (4/8) Epoch 19, batch 1300, train_loss[loss=2.848, NarTop10Accuracy=0.764, over 5058.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6927, over 5978.76 frames. ], batch size: 6, lr: 4.51e-03 +2024-08-06 18:38:04,679 INFO [trainer.py:765] (4/8) Epoch 19, batch 1400, train_loss[loss=2.948, NarTop10Accuracy=0.7415, over 6204.00 frames. ], tot_loss[loss=3.167, NarTop10Accuracy=0.6922, over 6015.85 frames. ], batch size: 11, lr: 4.51e-03 +2024-08-06 18:38:34,550 INFO [trainer.py:765] (4/8) Epoch 19, batch 1500, train_loss[loss=3.468, NarTop10Accuracy=0.6341, over 6342.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6963, over 5952.45 frames. ], batch size: 50, lr: 4.50e-03 +2024-08-06 18:39:02,311 INFO [trainer.py:765] (4/8) Epoch 19, batch 1600, train_loss[loss=3.508, NarTop10Accuracy=0.6211, over 7083.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.697, over 5937.18 frames. ], batch size: 22, lr: 4.50e-03 +2024-08-06 18:39:11,590 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 18:39:19,795 INFO [trainer.py:811] (4/8) Epoch 19, validation: loss=2.958, NarTop10Accuracy=0.7345, over 1905321.00 frames. +2024-08-06 18:39:19,796 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 18:39:20,378 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.040e+02 2.194e+02 2.364e+02 6.410e+02, threshold=4.387e+02, percent-clipped=0.2 +2024-08-06 18:39:37,191 INFO [trainer.py:765] (4/8) Epoch 19, batch 1700, train_loss[loss=3.584, NarTop10Accuracy=0.5954, over 6216.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6959, over 5920.54 frames. ], batch size: 13, lr: 4.49e-03 +2024-08-06 18:40:03,789 INFO [trainer.py:765] (4/8) Epoch 19, batch 1800, train_loss[loss=3.492, NarTop10Accuracy=0.6226, over 7170.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.696, over 5977.92 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 18:40:30,217 INFO [trainer.py:765] (4/8) Epoch 19, batch 1900, train_loss[loss=3.09, NarTop10Accuracy=0.7142, over 6000.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6971, over 6012.01 frames. ], batch size: 50, lr: 4.49e-03 +2024-08-06 18:40:55,793 INFO [trainer.py:765] (4/8) Epoch 19, batch 2000, train_loss[loss=3.318, NarTop10Accuracy=0.6694, over 6228.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6965, over 5973.11 frames. ], batch size: 51, lr: 4.48e-03 +2024-08-06 18:41:21,183 INFO [trainer.py:765] (4/8) Epoch 19, batch 2100, train_loss[loss=2.74, NarTop10Accuracy=0.7822, over 3834.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.6989, over 5956.48 frames. ], batch size: 4, lr: 4.48e-03 +2024-08-06 18:41:46,455 INFO [trainer.py:765] (4/8) Epoch 19, batch 2200, train_loss[loss=3.201, NarTop10Accuracy=0.6864, over 7083.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6965, over 5993.65 frames. ], batch size: 31, lr: 4.47e-03 +2024-08-06 18:42:11,559 INFO [trainer.py:765] (4/8) Epoch 19, batch 2300, train_loss[loss=3.155, NarTop10Accuracy=0.6922, over 5673.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.6916, over 6010.88 frames. ], batch size: 9, lr: 4.47e-03 +2024-08-06 18:42:35,989 INFO [trainer.py:765] (4/8) Epoch 19, batch 2400, train_loss[loss=2.983, NarTop10Accuracy=0.7362, over 5187.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6957, over 5767.20 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:42:59,690 INFO [trainer.py:765] (4/8) Epoch 19, batch 2500, train_loss[loss=2.936, NarTop10Accuracy=0.7433, over 5244.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7003, over 5474.71 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:43:19,728 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 18:44:22,973 INFO [trainer.py:765] (4/8) Epoch 20, batch 100, train_loss[loss=3.157, NarTop10Accuracy=0.6848, over 7143.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6944, over 2374.53 frames. ], batch size: 31, lr: 4.34e-03 +2024-08-06 18:44:58,379 INFO [trainer.py:765] (4/8) Epoch 20, batch 200, train_loss[loss=3.388, NarTop10Accuracy=0.6508, over 6732.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.7016, over 3862.70 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 18:45:32,278 INFO [trainer.py:765] (4/8) Epoch 20, batch 300, train_loss[loss=3.448, NarTop10Accuracy=0.6391, over 7038.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7043, over 4657.07 frames. ], batch size: 22, lr: 4.33e-03 +2024-08-06 18:46:05,128 INFO [trainer.py:765] (4/8) Epoch 20, batch 400, train_loss[loss=2.815, NarTop10Accuracy=0.7658, over 5145.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7037, over 5117.48 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 18:46:35,769 INFO [trainer.py:765] (4/8) Epoch 20, batch 500, train_loss[loss=2.898, NarTop10Accuracy=0.7467, over 6099.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7007, over 5380.02 frames. ], batch size: 11, lr: 4.32e-03 +2024-08-06 18:47:13,255 INFO [trainer.py:765] (4/8) Epoch 20, batch 600, train_loss[loss=3.098, NarTop10Accuracy=0.7082, over 5730.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7008, over 5655.13 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 18:47:44,481 INFO [trainer.py:765] (4/8) Epoch 20, batch 700, train_loss[loss=2.929, NarTop10Accuracy=0.7545, over 4257.00 frames. ], tot_loss[loss=3.121, NarTop10Accuracy=0.7023, over 5731.19 frames. ], batch size: 5, lr: 4.31e-03 +2024-08-06 18:48:21,015 INFO [trainer.py:765] (4/8) Epoch 20, batch 800, train_loss[loss=2.876, NarTop10Accuracy=0.7537, over 4263.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.6992, over 5773.71 frames. ], batch size: 5, lr: 4.31e-03 +2024-08-06 18:48:56,534 INFO [trainer.py:765] (4/8) Epoch 20, batch 900, train_loss[loss=2.941, NarTop10Accuracy=0.7384, over 6141.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.7, over 5787.54 frames. ], batch size: 13, lr: 4.30e-03 +2024-08-06 18:49:29,805 INFO [trainer.py:765] (4/8) Epoch 20, batch 1000, train_loss[loss=3.377, NarTop10Accuracy=0.6405, over 6696.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6941, over 5874.60 frames. ], batch size: 14, lr: 4.30e-03 +2024-08-06 18:49:52,236 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 18:50:00,327 INFO [trainer.py:811] (4/8) Epoch 20, validation: loss=2.962, NarTop10Accuracy=0.7336, over 1905321.00 frames. +2024-08-06 18:50:00,327 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 18:50:00,874 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.061e+02 2.223e+02 2.401e+02 3.871e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 18:50:15,427 INFO [trainer.py:765] (4/8) Epoch 20, batch 1100, train_loss[loss=3.254, NarTop10Accuracy=0.6853, over 6855.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.696, over 5928.47 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 18:50:53,775 INFO [trainer.py:765] (4/8) Epoch 20, batch 1200, train_loss[loss=2.978, NarTop10Accuracy=0.7352, over 7182.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6955, over 5939.76 frames. ], batch size: 31, lr: 4.29e-03 +2024-08-06 18:51:25,129 INFO [trainer.py:765] (4/8) Epoch 20, batch 1300, train_loss[loss=3.438, NarTop10Accuracy=0.6375, over 4989.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.6971, over 6013.11 frames. ], batch size: 6, lr: 4.29e-03 +2024-08-06 18:51:59,314 INFO [trainer.py:765] (4/8) Epoch 20, batch 1400, train_loss[loss=2.934, NarTop10Accuracy=0.7284, over 6201.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6991, over 6028.56 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 18:52:32,805 INFO [trainer.py:765] (4/8) Epoch 20, batch 1500, train_loss[loss=3.314, NarTop10Accuracy=0.6715, over 6138.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6965, over 5950.47 frames. ], batch size: 50, lr: 4.28e-03 +2024-08-06 18:53:00,635 INFO [trainer.py:765] (4/8) Epoch 20, batch 1600, train_loss[loss=2.902, NarTop10Accuracy=0.7565, over 7092.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6948, over 5933.29 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 18:53:27,328 INFO [trainer.py:765] (4/8) Epoch 20, batch 1700, train_loss[loss=3.378, NarTop10Accuracy=0.6451, over 6147.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.697, over 5927.19 frames. ], batch size: 13, lr: 4.27e-03 +2024-08-06 18:53:53,850 INFO [trainer.py:765] (4/8) Epoch 20, batch 1800, train_loss[loss=2.919, NarTop10Accuracy=0.7358, over 7104.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.6996, over 5993.80 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 18:54:20,315 INFO [trainer.py:765] (4/8) Epoch 20, batch 1900, train_loss[loss=3.091, NarTop10Accuracy=0.7079, over 6321.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6938, over 6024.87 frames. ], batch size: 51, lr: 4.26e-03 +2024-08-06 18:54:45,890 INFO [trainer.py:765] (4/8) Epoch 20, batch 2000, train_loss[loss=3.752, NarTop10Accuracy=0.5663, over 6015.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6934, over 6005.99 frames. ], batch size: 50, lr: 4.26e-03 +2024-08-06 18:55:11,182 INFO [trainer.py:765] (4/8) Epoch 20, batch 2100, train_loss[loss=3.485, NarTop10Accuracy=0.615, over 3921.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6939, over 5978.23 frames. ], batch size: 4, lr: 4.25e-03 +2024-08-06 18:55:36,414 INFO [trainer.py:765] (4/8) Epoch 20, batch 2200, train_loss[loss=2.856, NarTop10Accuracy=0.7518, over 7188.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6932, over 6012.55 frames. ], batch size: 31, lr: 4.25e-03 +2024-08-06 18:56:01,635 INFO [trainer.py:765] (4/8) Epoch 20, batch 2300, train_loss[loss=3.383, NarTop10Accuracy=0.652, over 5721.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.6911, over 6029.86 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 18:56:26,049 INFO [trainer.py:765] (4/8) Epoch 20, batch 2400, train_loss[loss=3.127, NarTop10Accuracy=0.6996, over 5070.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6942, over 5776.68 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:56:49,565 INFO [trainer.py:765] (4/8) Epoch 20, batch 2500, train_loss[loss=2.885, NarTop10Accuracy=0.7506, over 5163.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7017, over 5470.79 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:57:09,584 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 18:58:09,585 INFO [trainer.py:765] (4/8) Epoch 21, batch 100, train_loss[loss=3.205, NarTop10Accuracy=0.6843, over 7176.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.703, over 2361.36 frames. ], batch size: 31, lr: 4.13e-03 +2024-08-06 18:58:40,418 INFO [trainer.py:765] (4/8) Epoch 21, batch 200, train_loss[loss=2.817, NarTop10Accuracy=0.763, over 6792.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7008, over 3857.19 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 18:59:13,334 INFO [trainer.py:765] (4/8) Epoch 21, batch 300, train_loss[loss=2.929, NarTop10Accuracy=0.7434, over 7020.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.7006, over 4649.72 frames. ], batch size: 22, lr: 4.12e-03 +2024-08-06 18:59:48,151 INFO [trainer.py:765] (4/8) Epoch 21, batch 400, train_loss[loss=2.841, NarTop10Accuracy=0.7571, over 5136.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7042, over 5101.63 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 19:00:16,840 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 19:00:25,075 INFO [trainer.py:811] (4/8) Epoch 21, validation: loss=2.992, NarTop10Accuracy=0.7268, over 1905321.00 frames. +2024-08-06 19:00:25,076 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 19:00:25,622 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.071e+02 2.224e+02 2.387e+02 3.839e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 19:00:29,891 INFO [trainer.py:765] (4/8) Epoch 21, batch 500, train_loss[loss=2.881, NarTop10Accuracy=0.7474, over 6117.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7027, over 5387.79 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 19:01:03,328 INFO [trainer.py:765] (4/8) Epoch 21, batch 600, train_loss[loss=3.596, NarTop10Accuracy=0.6064, over 5739.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7047, over 5638.46 frames. ], batch size: 9, lr: 4.11e-03 +2024-08-06 19:01:39,388 INFO [trainer.py:765] (4/8) Epoch 21, batch 700, train_loss[loss=2.804, NarTop10Accuracy=0.7592, over 4227.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7029, over 5708.79 frames. ], batch size: 5, lr: 4.10e-03 +2024-08-06 19:02:18,047 INFO [trainer.py:765] (4/8) Epoch 21, batch 800, train_loss[loss=2.961, NarTop10Accuracy=0.7324, over 5154.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.699, over 5767.45 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 19:02:48,663 INFO [trainer.py:765] (4/8) Epoch 21, batch 900, train_loss[loss=2.934, NarTop10Accuracy=0.7363, over 6138.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6992, over 5794.50 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 19:03:25,801 INFO [trainer.py:765] (4/8) Epoch 21, batch 1000, train_loss[loss=2.879, NarTop10Accuracy=0.7515, over 6780.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.6994, over 5895.64 frames. ], batch size: 14, lr: 4.09e-03 +2024-08-06 19:04:07,207 INFO [trainer.py:765] (4/8) Epoch 21, batch 1100, train_loss[loss=3.484, NarTop10Accuracy=0.6211, over 6786.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6958, over 5932.54 frames. ], batch size: 17, lr: 4.09e-03 +2024-08-06 19:04:38,462 INFO [trainer.py:765] (4/8) Epoch 21, batch 1200, train_loss[loss=3.271, NarTop10Accuracy=0.6668, over 7185.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6994, over 5931.22 frames. ], batch size: 31, lr: 4.08e-03 +2024-08-06 19:05:15,316 INFO [trainer.py:765] (4/8) Epoch 21, batch 1300, train_loss[loss=2.889, NarTop10Accuracy=0.7548, over 5115.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.703, over 5998.88 frames. ], batch size: 6, lr: 4.08e-03 +2024-08-06 19:05:55,559 INFO [trainer.py:765] (4/8) Epoch 21, batch 1400, train_loss[loss=3.391, NarTop10Accuracy=0.6456, over 6075.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.7024, over 6018.86 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 19:06:23,600 INFO [trainer.py:765] (4/8) Epoch 21, batch 1500, train_loss[loss=3.315, NarTop10Accuracy=0.6599, over 6108.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.6994, over 5957.13 frames. ], batch size: 50, lr: 4.07e-03 +2024-08-06 19:06:51,462 INFO [trainer.py:765] (4/8) Epoch 21, batch 1600, train_loss[loss=2.988, NarTop10Accuracy=0.7368, over 7119.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.6989, over 5924.21 frames. ], batch size: 22, lr: 4.07e-03 +2024-08-06 19:07:18,212 INFO [trainer.py:765] (4/8) Epoch 21, batch 1700, train_loss[loss=3.273, NarTop10Accuracy=0.6708, over 6132.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6992, over 5898.75 frames. ], batch size: 13, lr: 4.06e-03 +2024-08-06 19:07:44,809 INFO [trainer.py:765] (4/8) Epoch 21, batch 1800, train_loss[loss=2.732, NarTop10Accuracy=0.773, over 7023.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7003, over 5971.53 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 19:08:11,369 INFO [trainer.py:765] (4/8) Epoch 21, batch 1900, train_loss[loss=3.755, NarTop10Accuracy=0.5763, over 6399.00 frames. ], tot_loss[loss=3.138, NarTop10Accuracy=0.6979, over 6018.89 frames. ], batch size: 52, lr: 4.06e-03 +2024-08-06 19:08:37,105 INFO [trainer.py:765] (4/8) Epoch 21, batch 2000, train_loss[loss=3.451, NarTop10Accuracy=0.6276, over 6315.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.6989, over 6001.00 frames. ], batch size: 50, lr: 4.05e-03 +2024-08-06 19:09:02,507 INFO [trainer.py:765] (4/8) Epoch 21, batch 2100, train_loss[loss=2.952, NarTop10Accuracy=0.7333, over 4815.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.6973, over 5984.16 frames. ], batch size: 5, lr: 4.05e-03 +2024-08-06 19:09:27,891 INFO [trainer.py:765] (4/8) Epoch 21, batch 2200, train_loss[loss=2.959, NarTop10Accuracy=0.7458, over 7419.00 frames. ], tot_loss[loss=3.14, NarTop10Accuracy=0.6969, over 6010.87 frames. ], batch size: 33, lr: 4.04e-03 +2024-08-06 19:09:53,223 INFO [trainer.py:765] (4/8) Epoch 21, batch 2300, train_loss[loss=2.991, NarTop10Accuracy=0.7255, over 5661.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6942, over 6024.63 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 19:10:17,597 INFO [trainer.py:765] (4/8) Epoch 21, batch 2400, train_loss[loss=3.515, NarTop10Accuracy=0.6157, over 5055.00 frames. ], tot_loss[loss=3.14, NarTop10Accuracy=0.6971, over 5769.72 frames. ], batch size: 7, lr: 4.04e-03 +2024-08-06 19:10:37,229 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 19:10:45,275 INFO [trainer.py:811] (4/8) Epoch 21, validation: loss=2.971, NarTop10Accuracy=0.7316, over 1905321.00 frames. +2024-08-06 19:10:45,276 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 19:10:45,741 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.100e+02 2.242e+02 2.407e+02 6.546e+02, threshold=4.484e+02, percent-clipped=0.1 +2024-08-06 19:10:49,273 INFO [trainer.py:765] (4/8) Epoch 21, batch 2500, train_loss[loss=3.447, NarTop10Accuracy=0.6289, over 5151.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7046, over 5460.18 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 19:11:08,907 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 19:12:09,053 INFO [trainer.py:765] (4/8) Epoch 22, batch 100, train_loss[loss=2.879, NarTop10Accuracy=0.753, over 7245.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7074, over 2372.70 frames. ], batch size: 31, lr: 3.93e-03 +2024-08-06 19:12:44,462 INFO [trainer.py:765] (4/8) Epoch 22, batch 200, train_loss[loss=3.181, NarTop10Accuracy=0.688, over 6723.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7069, over 3860.46 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 19:13:14,533 INFO [trainer.py:765] (4/8) Epoch 22, batch 300, train_loss[loss=3.021, NarTop10Accuracy=0.7251, over 7065.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7078, over 4658.66 frames. ], batch size: 22, lr: 3.93e-03 +2024-08-06 19:13:49,228 INFO [trainer.py:765] (4/8) Epoch 22, batch 400, train_loss[loss=3.005, NarTop10Accuracy=0.7221, over 5157.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7079, over 5103.19 frames. ], batch size: 7, lr: 3.92e-03 +2024-08-06 19:14:24,850 INFO [trainer.py:765] (4/8) Epoch 22, batch 500, train_loss[loss=3.065, NarTop10Accuracy=0.7073, over 6096.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7072, over 5383.89 frames. ], batch size: 11, lr: 3.92e-03 +2024-08-06 19:14:55,701 INFO [trainer.py:765] (4/8) Epoch 22, batch 600, train_loss[loss=3.133, NarTop10Accuracy=0.7052, over 5820.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7004, over 5631.22 frames. ], batch size: 9, lr: 3.92e-03 +2024-08-06 19:15:30,867 INFO [trainer.py:765] (4/8) Epoch 22, batch 700, train_loss[loss=3.287, NarTop10Accuracy=0.6621, over 5043.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.7011, over 5717.54 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 19:16:10,664 INFO [trainer.py:765] (4/8) Epoch 22, batch 800, train_loss[loss=3.062, NarTop10Accuracy=0.7217, over 4416.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7034, over 5796.07 frames. ], batch size: 5, lr: 3.91e-03 +2024-08-06 19:16:40,952 INFO [trainer.py:765] (4/8) Epoch 22, batch 900, train_loss[loss=2.986, NarTop10Accuracy=0.7223, over 6717.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7023, over 5807.46 frames. ], batch size: 14, lr: 3.90e-03 +2024-08-06 19:17:16,433 INFO [trainer.py:765] (4/8) Epoch 22, batch 1000, train_loss[loss=3.067, NarTop10Accuracy=0.7107, over 6681.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7035, over 5905.43 frames. ], batch size: 14, lr: 3.90e-03 +2024-08-06 19:17:52,085 INFO [trainer.py:765] (4/8) Epoch 22, batch 1100, train_loss[loss=2.958, NarTop10Accuracy=0.7383, over 6885.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7024, over 5940.31 frames. ], batch size: 17, lr: 3.90e-03 +2024-08-06 19:18:25,926 INFO [trainer.py:765] (4/8) Epoch 22, batch 1200, train_loss[loss=2.923, NarTop10Accuracy=0.7381, over 7122.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7064, over 5945.16 frames. ], batch size: 31, lr: 3.89e-03 +2024-08-06 19:19:01,252 INFO [trainer.py:765] (4/8) Epoch 22, batch 1300, train_loss[loss=3.004, NarTop10Accuracy=0.7245, over 5013.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7075, over 6001.98 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 19:19:33,317 INFO [trainer.py:765] (4/8) Epoch 22, batch 1400, train_loss[loss=2.851, NarTop10Accuracy=0.7603, over 6102.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7033, over 6000.56 frames. ], batch size: 11, lr: 3.89e-03 +2024-08-06 19:20:03,830 INFO [trainer.py:765] (4/8) Epoch 22, batch 1500, train_loss[loss=3.557, NarTop10Accuracy=0.6203, over 6453.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.704, over 5942.42 frames. ], batch size: 52, lr: 3.88e-03 +2024-08-06 19:20:31,646 INFO [trainer.py:765] (4/8) Epoch 22, batch 1600, train_loss[loss=3.089, NarTop10Accuracy=0.7136, over 7086.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7, over 5937.22 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 19:20:58,417 INFO [trainer.py:765] (4/8) Epoch 22, batch 1700, train_loss[loss=3.179, NarTop10Accuracy=0.6955, over 6303.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7018, over 5935.38 frames. ], batch size: 13, lr: 3.88e-03 +2024-08-06 19:21:25,010 INFO [trainer.py:765] (4/8) Epoch 22, batch 1800, train_loss[loss=2.825, NarTop10Accuracy=0.7637, over 6930.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.703, over 5990.46 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 19:21:51,371 INFO [trainer.py:765] (4/8) Epoch 22, batch 1900, train_loss[loss=3.095, NarTop10Accuracy=0.707, over 5583.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6989, over 6037.43 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 19:21:53,109 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 19:22:01,088 INFO [trainer.py:811] (4/8) Epoch 22, validation: loss=3.009, NarTop10Accuracy=0.7241, over 1905321.00 frames. +2024-08-06 19:22:01,089 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 19:22:01,575 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.114e+02 2.276e+02 2.445e+02 4.438e+02, threshold=4.551e+02, percent-clipped=0.0 +2024-08-06 19:22:24,818 INFO [trainer.py:765] (4/8) Epoch 22, batch 2000, train_loss[loss=3.448, NarTop10Accuracy=0.6338, over 5925.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7029, over 5999.15 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 19:22:50,041 INFO [trainer.py:765] (4/8) Epoch 22, batch 2100, train_loss[loss=3.163, NarTop10Accuracy=0.6882, over 3873.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7044, over 5978.72 frames. ], batch size: 4, lr: 3.86e-03 +2024-08-06 19:23:15,230 INFO [trainer.py:765] (4/8) Epoch 22, batch 2200, train_loss[loss=3.002, NarTop10Accuracy=0.7307, over 7191.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7034, over 6018.00 frames. ], batch size: 31, lr: 3.86e-03 +2024-08-06 19:23:40,314 INFO [trainer.py:765] (4/8) Epoch 22, batch 2300, train_loss[loss=3.162, NarTop10Accuracy=0.6926, over 5694.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6995, over 6024.27 frames. ], batch size: 9, lr: 3.86e-03 +2024-08-06 19:24:04,602 INFO [trainer.py:765] (4/8) Epoch 22, batch 2400, train_loss[loss=3.111, NarTop10Accuracy=0.6908, over 5178.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7016, over 5772.89 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:28,024 INFO [trainer.py:765] (4/8) Epoch 22, batch 2500, train_loss[loss=3.134, NarTop10Accuracy=0.6905, over 5106.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7042, over 5464.66 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:47,502 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 19:25:45,385 INFO [trainer.py:765] (4/8) Epoch 23, batch 100, train_loss[loss=3.086, NarTop10Accuracy=0.7069, over 7146.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.6995, over 2369.78 frames. ], batch size: 31, lr: 3.76e-03 +2024-08-06 19:26:21,308 INFO [trainer.py:765] (4/8) Epoch 23, batch 200, train_loss[loss=3.432, NarTop10Accuracy=0.6437, over 6807.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7004, over 3846.47 frames. ], batch size: 17, lr: 3.76e-03 +2024-08-06 19:26:57,602 INFO [trainer.py:765] (4/8) Epoch 23, batch 300, train_loss[loss=3.039, NarTop10Accuracy=0.724, over 7047.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7057, over 4653.10 frames. ], batch size: 22, lr: 3.75e-03 +2024-08-06 19:27:26,540 INFO [trainer.py:765] (4/8) Epoch 23, batch 400, train_loss[loss=3.333, NarTop10Accuracy=0.6584, over 5310.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7031, over 5120.45 frames. ], batch size: 7, lr: 3.75e-03 +2024-08-06 19:27:59,712 INFO [trainer.py:765] (4/8) Epoch 23, batch 500, train_loss[loss=3.452, NarTop10Accuracy=0.6294, over 6096.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7017, over 5390.83 frames. ], batch size: 11, lr: 3.75e-03 +2024-08-06 19:28:35,883 INFO [trainer.py:765] (4/8) Epoch 23, batch 600, train_loss[loss=3.096, NarTop10Accuracy=0.6956, over 5733.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7037, over 5658.62 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 19:29:11,367 INFO [trainer.py:765] (4/8) Epoch 23, batch 700, train_loss[loss=2.988, NarTop10Accuracy=0.7081, over 5022.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7076, over 5730.31 frames. ], batch size: 6, lr: 3.74e-03 +2024-08-06 19:29:43,612 INFO [trainer.py:765] (4/8) Epoch 23, batch 800, train_loss[loss=2.571, NarTop10Accuracy=0.8085, over 5208.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7058, over 5781.01 frames. ], batch size: 6, lr: 3.74e-03 +2024-08-06 19:30:19,390 INFO [trainer.py:765] (4/8) Epoch 23, batch 900, train_loss[loss=3.338, NarTop10Accuracy=0.6499, over 6201.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.706, over 5808.74 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 19:30:58,195 INFO [trainer.py:765] (4/8) Epoch 23, batch 1000, train_loss[loss=2.973, NarTop10Accuracy=0.7267, over 6081.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7078, over 5912.17 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 19:31:31,520 INFO [trainer.py:765] (4/8) Epoch 23, batch 1100, train_loss[loss=2.97, NarTop10Accuracy=0.7267, over 6936.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7068, over 5945.26 frames. ], batch size: 17, lr: 3.73e-03 +2024-08-06 19:32:08,517 INFO [trainer.py:765] (4/8) Epoch 23, batch 1200, train_loss[loss=3.041, NarTop10Accuracy=0.7182, over 7545.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7036, over 5942.40 frames. ], batch size: 31, lr: 3.72e-03 +2024-08-06 19:32:46,937 INFO [trainer.py:765] (4/8) Epoch 23, batch 1300, train_loss[loss=3.12, NarTop10Accuracy=0.7042, over 5184.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7026, over 6013.04 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 19:32:56,403 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 19:33:04,722 INFO [trainer.py:811] (4/8) Epoch 23, validation: loss=2.893, NarTop10Accuracy=0.7468, over 1905321.00 frames. +2024-08-06 19:33:04,723 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 19:33:05,262 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.108e+02 2.273e+02 2.457e+02 3.966e+02, threshold=4.546e+02, percent-clipped=0.0 +2024-08-06 19:33:27,407 INFO [trainer.py:765] (4/8) Epoch 23, batch 1400, train_loss[loss=2.755, NarTop10Accuracy=0.7746, over 6051.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7024, over 6015.56 frames. ], batch size: 11, lr: 3.72e-03 +2024-08-06 19:33:58,215 INFO [trainer.py:765] (4/8) Epoch 23, batch 1500, train_loss[loss=3.243, NarTop10Accuracy=0.68, over 6009.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7055, over 5965.01 frames. ], batch size: 51, lr: 3.71e-03 +2024-08-06 19:34:26,015 INFO [trainer.py:765] (4/8) Epoch 23, batch 1600, train_loss[loss=2.921, NarTop10Accuracy=0.7523, over 7125.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7036, over 5936.58 frames. ], batch size: 22, lr: 3.71e-03 +2024-08-06 19:34:52,782 INFO [trainer.py:765] (4/8) Epoch 23, batch 1700, train_loss[loss=3.242, NarTop10Accuracy=0.6812, over 6138.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.6999, over 5906.50 frames. ], batch size: 13, lr: 3.71e-03 +2024-08-06 19:35:19,261 INFO [trainer.py:765] (4/8) Epoch 23, batch 1800, train_loss[loss=2.925, NarTop10Accuracy=0.7395, over 7185.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7014, over 5972.12 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 19:35:45,596 INFO [trainer.py:765] (4/8) Epoch 23, batch 1900, train_loss[loss=3.453, NarTop10Accuracy=0.6272, over 5844.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.6994, over 6012.01 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 19:36:11,170 INFO [trainer.py:765] (4/8) Epoch 23, batch 2000, train_loss[loss=3.636, NarTop10Accuracy=0.5919, over 5745.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7032, over 6002.16 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 19:36:36,517 INFO [trainer.py:765] (4/8) Epoch 23, batch 2100, train_loss[loss=3.155, NarTop10Accuracy=0.6875, over 4869.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7026, over 5981.68 frames. ], batch size: 5, lr: 3.69e-03 +2024-08-06 19:37:01,908 INFO [trainer.py:765] (4/8) Epoch 23, batch 2200, train_loss[loss=3.105, NarTop10Accuracy=0.6991, over 7353.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7003, over 6018.75 frames. ], batch size: 31, lr: 3.69e-03 +2024-08-06 19:37:27,060 INFO [trainer.py:765] (4/8) Epoch 23, batch 2300, train_loss[loss=3.082, NarTop10Accuracy=0.7067, over 5706.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7015, over 6030.69 frames. ], batch size: 9, lr: 3.69e-03 +2024-08-06 19:37:51,424 INFO [trainer.py:765] (4/8) Epoch 23, batch 2400, train_loss[loss=3.006, NarTop10Accuracy=0.7267, over 5187.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7011, over 5774.99 frames. ], batch size: 7, lr: 3.69e-03 +2024-08-06 19:38:15,052 INFO [trainer.py:765] (4/8) Epoch 23, batch 2500, train_loss[loss=3.241, NarTop10Accuracy=0.6763, over 5367.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7048, over 5472.77 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 19:38:35,167 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 19:39:37,632 INFO [trainer.py:765] (4/8) Epoch 24, batch 100, train_loss[loss=3.519, NarTop10Accuracy=0.6201, over 7362.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7021, over 2367.63 frames. ], batch size: 31, lr: 3.60e-03 +2024-08-06 19:40:10,190 INFO [trainer.py:765] (4/8) Epoch 24, batch 200, train_loss[loss=2.892, NarTop10Accuracy=0.7499, over 6792.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7051, over 3854.13 frames. ], batch size: 17, lr: 3.60e-03 +2024-08-06 19:40:40,556 INFO [trainer.py:765] (4/8) Epoch 24, batch 300, train_loss[loss=2.902, NarTop10Accuracy=0.7481, over 6957.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7071, over 4654.29 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 19:41:18,234 INFO [trainer.py:765] (4/8) Epoch 24, batch 400, train_loss[loss=2.896, NarTop10Accuracy=0.7516, over 5121.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7067, over 5102.44 frames. ], batch size: 7, lr: 3.59e-03 +2024-08-06 19:41:50,323 INFO [trainer.py:765] (4/8) Epoch 24, batch 500, train_loss[loss=2.859, NarTop10Accuracy=0.7547, over 6012.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7089, over 5366.43 frames. ], batch size: 11, lr: 3.59e-03 +2024-08-06 19:42:21,453 INFO [trainer.py:765] (4/8) Epoch 24, batch 600, train_loss[loss=2.767, NarTop10Accuracy=0.7793, over 5820.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7099, over 5638.91 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 19:42:52,843 INFO [trainer.py:765] (4/8) Epoch 24, batch 700, train_loss[loss=2.938, NarTop10Accuracy=0.7404, over 5082.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7096, over 5733.59 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 19:43:17,381 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 19:43:25,410 INFO [trainer.py:811] (4/8) Epoch 24, validation: loss=3.021, NarTop10Accuracy=0.7204, over 1905321.00 frames. +2024-08-06 19:43:25,411 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 19:43:28,563 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.113e+02 2.282e+02 2.472e+02 2.357e+03, threshold=4.564e+02, percent-clipped=0.2 +2024-08-06 19:43:40,815 INFO [trainer.py:765] (4/8) Epoch 24, batch 800, train_loss[loss=2.692, NarTop10Accuracy=0.7914, over 4347.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7089, over 5790.68 frames. ], batch size: 5, lr: 3.58e-03 +2024-08-06 19:44:11,410 INFO [trainer.py:765] (4/8) Epoch 24, batch 900, train_loss[loss=2.895, NarTop10Accuracy=0.7525, over 6243.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7092, over 5809.37 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 19:44:47,490 INFO [trainer.py:765] (4/8) Epoch 24, batch 1000, train_loss[loss=3.213, NarTop10Accuracy=0.6809, over 6690.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7064, over 5916.91 frames. ], batch size: 14, lr: 3.57e-03 +2024-08-06 19:45:27,108 INFO [trainer.py:765] (4/8) Epoch 24, batch 1100, train_loss[loss=3.54, NarTop10Accuracy=0.6127, over 6744.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7039, over 5931.46 frames. ], batch size: 17, lr: 3.57e-03 +2024-08-06 19:45:58,438 INFO [trainer.py:765] (4/8) Epoch 24, batch 1200, train_loss[loss=2.998, NarTop10Accuracy=0.7293, over 7824.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7053, over 5923.96 frames. ], batch size: 33, lr: 3.57e-03 +2024-08-06 19:46:30,295 INFO [trainer.py:765] (4/8) Epoch 24, batch 1300, train_loss[loss=3.398, NarTop10Accuracy=0.6415, over 5130.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7065, over 5999.08 frames. ], batch size: 6, lr: 3.56e-03 +2024-08-06 19:47:07,860 INFO [trainer.py:765] (4/8) Epoch 24, batch 1400, train_loss[loss=3.297, NarTop10Accuracy=0.6608, over 6051.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.7041, over 6022.05 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 19:47:40,957 INFO [trainer.py:765] (4/8) Epoch 24, batch 1500, train_loss[loss=3.294, NarTop10Accuracy=0.6601, over 5769.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7015, over 5955.75 frames. ], batch size: 50, lr: 3.56e-03 +2024-08-06 19:48:08,676 INFO [trainer.py:765] (4/8) Epoch 24, batch 1600, train_loss[loss=3.378, NarTop10Accuracy=0.648, over 7017.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7009, over 5931.66 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:48:35,269 INFO [trainer.py:765] (4/8) Epoch 24, batch 1700, train_loss[loss=2.978, NarTop10Accuracy=0.7344, over 6357.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7011, over 5920.12 frames. ], batch size: 13, lr: 3.55e-03 +2024-08-06 19:49:01,638 INFO [trainer.py:765] (4/8) Epoch 24, batch 1800, train_loss[loss=2.837, NarTop10Accuracy=0.759, over 7002.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.6999, over 5983.59 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:49:28,043 INFO [trainer.py:765] (4/8) Epoch 24, batch 1900, train_loss[loss=3.542, NarTop10Accuracy=0.6165, over 6057.00 frames. ], tot_loss[loss=3.138, NarTop10Accuracy=0.6979, over 6037.89 frames. ], batch size: 51, lr: 3.55e-03 +2024-08-06 19:49:53,533 INFO [trainer.py:765] (4/8) Epoch 24, batch 2000, train_loss[loss=3.427, NarTop10Accuracy=0.6304, over 6171.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7031, over 6004.46 frames. ], batch size: 50, lr: 3.54e-03 +2024-08-06 19:50:18,820 INFO [trainer.py:765] (4/8) Epoch 24, batch 2100, train_loss[loss=2.915, NarTop10Accuracy=0.7425, over 3894.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7036, over 5986.14 frames. ], batch size: 4, lr: 3.54e-03 +2024-08-06 19:50:43,942 INFO [trainer.py:765] (4/8) Epoch 24, batch 2200, train_loss[loss=3.525, NarTop10Accuracy=0.612, over 7332.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7031, over 6011.51 frames. ], batch size: 31, lr: 3.54e-03 +2024-08-06 19:51:09,024 INFO [trainer.py:765] (4/8) Epoch 24, batch 2300, train_loss[loss=2.893, NarTop10Accuracy=0.7436, over 5874.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7041, over 6017.27 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 19:51:33,349 INFO [trainer.py:765] (4/8) Epoch 24, batch 2400, train_loss[loss=3.091, NarTop10Accuracy=0.6994, over 5121.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7061, over 5784.23 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 19:51:56,783 INFO [trainer.py:765] (4/8) Epoch 24, batch 2500, train_loss[loss=2.914, NarTop10Accuracy=0.7364, over 5202.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7108, over 5496.14 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 19:52:16,696 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 19:53:22,198 INFO [trainer.py:765] (4/8) Epoch 25, batch 100, train_loss[loss=3.347, NarTop10Accuracy=0.6591, over 7377.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7077, over 2373.68 frames. ], batch size: 31, lr: 3.45e-03 +2024-08-06 19:53:47,262 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 19:53:55,329 INFO [trainer.py:811] (4/8) Epoch 25, validation: loss=2.96, NarTop10Accuracy=0.7332, over 1905321.00 frames. +2024-08-06 19:53:55,329 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 19:53:55,916 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.155e+02 2.306e+02 2.475e+02 6.485e+02, threshold=4.611e+02, percent-clipped=0.1 +2024-08-06 19:54:01,177 INFO [trainer.py:765] (4/8) Epoch 25, batch 200, train_loss[loss=2.975, NarTop10Accuracy=0.7315, over 6816.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7071, over 3865.82 frames. ], batch size: 17, lr: 3.45e-03 +2024-08-06 19:54:35,647 INFO [trainer.py:765] (4/8) Epoch 25, batch 300, train_loss[loss=3.185, NarTop10Accuracy=0.6857, over 7179.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7092, over 4666.15 frames. ], batch size: 22, lr: 3.45e-03 +2024-08-06 19:55:12,958 INFO [trainer.py:765] (4/8) Epoch 25, batch 400, train_loss[loss=3.079, NarTop10Accuracy=0.706, over 5007.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7083, over 5120.05 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 19:55:43,738 INFO [trainer.py:765] (4/8) Epoch 25, batch 500, train_loss[loss=2.956, NarTop10Accuracy=0.7355, over 6123.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7093, over 5397.54 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 19:56:14,815 INFO [trainer.py:765] (4/8) Epoch 25, batch 600, train_loss[loss=2.685, NarTop10Accuracy=0.7863, over 5712.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7095, over 5656.11 frames. ], batch size: 9, lr: 3.44e-03 +2024-08-06 19:56:55,496 INFO [trainer.py:765] (4/8) Epoch 25, batch 700, train_loss[loss=2.768, NarTop10Accuracy=0.7731, over 5088.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7106, over 5723.73 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 19:57:30,136 INFO [trainer.py:765] (4/8) Epoch 25, batch 800, train_loss[loss=2.886, NarTop10Accuracy=0.7409, over 5151.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7096, over 5782.17 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 19:58:00,679 INFO [trainer.py:765] (4/8) Epoch 25, batch 900, train_loss[loss=3.147, NarTop10Accuracy=0.6905, over 6627.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7112, over 5805.03 frames. ], batch size: 14, lr: 3.43e-03 +2024-08-06 19:58:37,639 INFO [trainer.py:765] (4/8) Epoch 25, batch 1000, train_loss[loss=2.779, NarTop10Accuracy=0.7712, over 6306.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7084, over 5909.80 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 19:59:14,855 INFO [trainer.py:765] (4/8) Epoch 25, batch 1100, train_loss[loss=3.354, NarTop10Accuracy=0.6544, over 6879.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7069, over 5925.86 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 19:59:49,039 INFO [trainer.py:765] (4/8) Epoch 25, batch 1200, train_loss[loss=3.25, NarTop10Accuracy=0.6747, over 7533.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.707, over 5919.76 frames. ], batch size: 31, lr: 3.42e-03 +2024-08-06 20:00:25,598 INFO [trainer.py:765] (4/8) Epoch 25, batch 1300, train_loss[loss=2.775, NarTop10Accuracy=0.773, over 5223.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7083, over 5990.98 frames. ], batch size: 6, lr: 3.42e-03 +2024-08-06 20:01:02,016 INFO [trainer.py:765] (4/8) Epoch 25, batch 1400, train_loss[loss=2.676, NarTop10Accuracy=0.7903, over 6057.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7093, over 6011.13 frames. ], batch size: 11, lr: 3.42e-03 +2024-08-06 20:01:32,823 INFO [trainer.py:765] (4/8) Epoch 25, batch 1500, train_loss[loss=3.161, NarTop10Accuracy=0.6859, over 5940.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7082, over 5944.63 frames. ], batch size: 50, lr: 3.41e-03 +2024-08-06 20:02:00,625 INFO [trainer.py:765] (4/8) Epoch 25, batch 1600, train_loss[loss=2.874, NarTop10Accuracy=0.7532, over 7167.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7087, over 5930.19 frames. ], batch size: 22, lr: 3.41e-03 +2024-08-06 20:02:27,360 INFO [trainer.py:765] (4/8) Epoch 25, batch 1700, train_loss[loss=3.105, NarTop10Accuracy=0.7152, over 6549.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7081, over 5917.43 frames. ], batch size: 14, lr: 3.41e-03 +2024-08-06 20:02:53,853 INFO [trainer.py:765] (4/8) Epoch 25, batch 1800, train_loss[loss=3.393, NarTop10Accuracy=0.6539, over 7029.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7062, over 5993.40 frames. ], batch size: 22, lr: 3.40e-03 +2024-08-06 20:03:20,340 INFO [trainer.py:765] (4/8) Epoch 25, batch 1900, train_loss[loss=3.151, NarTop10Accuracy=0.7035, over 6246.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7037, over 6018.07 frames. ], batch size: 50, lr: 3.40e-03 +2024-08-06 20:03:45,933 INFO [trainer.py:765] (4/8) Epoch 25, batch 2000, train_loss[loss=3.524, NarTop10Accuracy=0.622, over 5985.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7011, over 5982.15 frames. ], batch size: 51, lr: 3.40e-03 +2024-08-06 20:04:11,246 INFO [trainer.py:765] (4/8) Epoch 25, batch 2100, train_loss[loss=2.774, NarTop10Accuracy=0.7727, over 4050.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7031, over 5976.10 frames. ], batch size: 4, lr: 3.40e-03 +2024-08-06 20:04:31,409 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 20:04:39,343 INFO [trainer.py:811] (4/8) Epoch 25, validation: loss=2.999, NarTop10Accuracy=0.7251, over 1905321.00 frames. +2024-08-06 20:04:39,344 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 28075MB +2024-08-06 20:04:39,840 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.185e+02 2.339e+02 2.507e+02 3.640e+02, threshold=4.678e+02, percent-clipped=0.0 +2024-08-06 20:04:44,512 INFO [trainer.py:765] (4/8) Epoch 25, batch 2200, train_loss[loss=3.22, NarTop10Accuracy=0.6776, over 7413.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7027, over 6022.23 frames. ], batch size: 31, lr: 3.39e-03 +2024-08-06 20:05:09,644 INFO [trainer.py:765] (4/8) Epoch 25, batch 2300, train_loss[loss=3.073, NarTop10Accuracy=0.7046, over 5772.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7026, over 6021.93 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 20:05:34,140 INFO [trainer.py:765] (4/8) Epoch 25, batch 2400, train_loss[loss=2.839, NarTop10Accuracy=0.7637, over 5358.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7067, over 5787.24 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:05:57,845 INFO [trainer.py:765] (4/8) Epoch 25, batch 2500, train_loss[loss=2.69, NarTop10Accuracy=0.7922, over 5256.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7114, over 5489.24 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:06:17,423 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 20:07:19,304 INFO [trainer.py:765] (4/8) Epoch 26, batch 100, train_loss[loss=3.09, NarTop10Accuracy=0.7109, over 7302.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7088, over 2359.91 frames. ], batch size: 31, lr: 3.32e-03 +2024-08-06 20:07:52,382 INFO [trainer.py:765] (4/8) Epoch 26, batch 200, train_loss[loss=2.735, NarTop10Accuracy=0.7737, over 6768.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7094, over 3859.16 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 20:08:24,733 INFO [trainer.py:765] (4/8) Epoch 26, batch 300, train_loss[loss=3.035, NarTop10Accuracy=0.7168, over 7152.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.709, over 4674.21 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 20:08:58,184 INFO [trainer.py:765] (4/8) Epoch 26, batch 400, train_loss[loss=2.993, NarTop10Accuracy=0.735, over 5307.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7092, over 5100.65 frames. ], batch size: 7, lr: 3.31e-03 +2024-08-06 20:09:33,147 INFO [trainer.py:765] (4/8) Epoch 26, batch 500, train_loss[loss=2.798, NarTop10Accuracy=0.7736, over 6075.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7072, over 5382.86 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 20:10:03,890 INFO [trainer.py:765] (4/8) Epoch 26, batch 600, train_loss[loss=2.816, NarTop10Accuracy=0.7683, over 5694.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7121, over 5650.30 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 20:10:39,872 INFO [trainer.py:765] (4/8) Epoch 26, batch 700, train_loss[loss=3.35, NarTop10Accuracy=0.656, over 5016.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.708, over 5737.27 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 20:11:19,061 INFO [trainer.py:765] (4/8) Epoch 26, batch 800, train_loss[loss=2.856, NarTop10Accuracy=0.7507, over 5121.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7084, over 5785.32 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 20:11:49,315 INFO [trainer.py:765] (4/8) Epoch 26, batch 900, train_loss[loss=2.847, NarTop10Accuracy=0.7529, over 6621.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.709, over 5792.43 frames. ], batch size: 14, lr: 3.29e-03 +2024-08-06 20:12:25,973 INFO [trainer.py:765] (4/8) Epoch 26, batch 1000, train_loss[loss=2.912, NarTop10Accuracy=0.7513, over 6114.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7068, over 5898.38 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 20:13:06,377 INFO [trainer.py:765] (4/8) Epoch 26, batch 1100, train_loss[loss=3.354, NarTop10Accuracy=0.6608, over 6750.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.7053, over 5941.19 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 20:13:37,536 INFO [trainer.py:765] (4/8) Epoch 26, batch 1200, train_loss[loss=3.23, NarTop10Accuracy=0.6689, over 7224.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7086, over 5944.88 frames. ], batch size: 31, lr: 3.29e-03 +2024-08-06 20:14:13,696 INFO [trainer.py:765] (4/8) Epoch 26, batch 1300, train_loss[loss=2.864, NarTop10Accuracy=0.7516, over 5178.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7089, over 6019.74 frames. ], batch size: 6, lr: 3.28e-03 +2024-08-06 20:14:50,538 INFO [trainer.py:765] (4/8) Epoch 26, batch 1400, train_loss[loss=2.788, NarTop10Accuracy=0.7678, over 6054.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7085, over 6029.11 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 20:15:21,155 INFO [trainer.py:765] (4/8) Epoch 26, batch 1500, train_loss[loss=3.083, NarTop10Accuracy=0.7145, over 6162.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.709, over 5971.23 frames. ], batch size: 50, lr: 3.28e-03 +2024-08-06 20:15:48,979 INFO [trainer.py:765] (4/8) Epoch 26, batch 1600, train_loss[loss=3.027, NarTop10Accuracy=0.722, over 6963.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7103, over 5956.04 frames. ], batch size: 22, lr: 3.28e-03 +2024-08-06 20:15:50,002 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 20:15:58,239 INFO [trainer.py:811] (4/8) Epoch 26, validation: loss=2.899, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 20:15:58,239 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 20:15:58,778 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.166e+02 2.322e+02 2.511e+02 3.952e+02, threshold=4.644e+02, percent-clipped=0.0 +2024-08-06 20:16:23,951 INFO [trainer.py:765] (4/8) Epoch 26, batch 1700, train_loss[loss=3.215, NarTop10Accuracy=0.6768, over 6105.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7124, over 5950.76 frames. ], batch size: 13, lr: 3.28e-03 +2024-08-06 20:16:50,426 INFO [trainer.py:765] (4/8) Epoch 26, batch 1800, train_loss[loss=2.797, NarTop10Accuracy=0.769, over 7122.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7099, over 6006.22 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 20:17:16,840 INFO [trainer.py:765] (4/8) Epoch 26, batch 1900, train_loss[loss=2.969, NarTop10Accuracy=0.7331, over 6306.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7081, over 6030.76 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 20:17:42,379 INFO [trainer.py:765] (4/8) Epoch 26, batch 2000, train_loss[loss=3.584, NarTop10Accuracy=0.6109, over 5538.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7065, over 5992.64 frames. ], batch size: 51, lr: 3.27e-03 +2024-08-06 20:18:07,562 INFO [trainer.py:765] (4/8) Epoch 26, batch 2100, train_loss[loss=3.006, NarTop10Accuracy=0.7165, over 3972.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7062, over 5966.98 frames. ], batch size: 4, lr: 3.27e-03 +2024-08-06 20:18:32,776 INFO [trainer.py:765] (4/8) Epoch 26, batch 2200, train_loss[loss=2.863, NarTop10Accuracy=0.7521, over 7107.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.707, over 6004.37 frames. ], batch size: 31, lr: 3.26e-03 +2024-08-06 20:18:57,897 INFO [trainer.py:765] (4/8) Epoch 26, batch 2300, train_loss[loss=3.147, NarTop10Accuracy=0.6805, over 5625.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.706, over 6024.40 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 20:19:22,205 INFO [trainer.py:765] (4/8) Epoch 26, batch 2400, train_loss[loss=2.786, NarTop10Accuracy=0.7699, over 5187.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7108, over 5774.19 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:19:45,651 INFO [trainer.py:765] (4/8) Epoch 26, batch 2500, train_loss[loss=2.736, NarTop10Accuracy=0.7783, over 5256.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7153, over 5478.16 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:20:05,796 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 20:21:04,874 INFO [trainer.py:765] (4/8) Epoch 27, batch 100, train_loss[loss=3.222, NarTop10Accuracy=0.6768, over 7059.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7115, over 2361.67 frames. ], batch size: 31, lr: 3.19e-03 +2024-08-06 20:21:39,783 INFO [trainer.py:765] (4/8) Epoch 27, batch 200, train_loss[loss=2.689, NarTop10Accuracy=0.7887, over 6657.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7101, over 3855.36 frames. ], batch size: 17, lr: 3.19e-03 +2024-08-06 20:22:13,049 INFO [trainer.py:765] (4/8) Epoch 27, batch 300, train_loss[loss=2.907, NarTop10Accuracy=0.7549, over 7101.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7102, over 4665.99 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 20:22:43,557 INFO [trainer.py:765] (4/8) Epoch 27, batch 400, train_loss[loss=2.713, NarTop10Accuracy=0.7812, over 5238.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7138, over 5114.69 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 20:23:18,084 INFO [trainer.py:765] (4/8) Epoch 27, batch 500, train_loss[loss=2.769, NarTop10Accuracy=0.7797, over 6015.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7163, over 5385.67 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 20:23:51,435 INFO [trainer.py:765] (4/8) Epoch 27, batch 600, train_loss[loss=3.099, NarTop10Accuracy=0.6934, over 5871.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7162, over 5651.35 frames. ], batch size: 9, lr: 3.18e-03 +2024-08-06 20:24:24,975 INFO [trainer.py:765] (4/8) Epoch 27, batch 700, train_loss[loss=2.745, NarTop10Accuracy=0.7885, over 5085.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7166, over 5712.54 frames. ], batch size: 6, lr: 3.18e-03 +2024-08-06 20:25:03,407 INFO [trainer.py:765] (4/8) Epoch 27, batch 800, train_loss[loss=3.283, NarTop10Accuracy=0.6593, over 5160.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7112, over 5771.59 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 20:25:34,176 INFO [trainer.py:765] (4/8) Epoch 27, batch 900, train_loss[loss=3.197, NarTop10Accuracy=0.6875, over 6654.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7109, over 5808.87 frames. ], batch size: 14, lr: 3.17e-03 +2024-08-06 20:26:10,097 INFO [trainer.py:765] (4/8) Epoch 27, batch 1000, train_loss[loss=2.853, NarTop10Accuracy=0.7598, over 6078.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7105, over 5912.63 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 20:26:18,314 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 20:26:26,346 INFO [trainer.py:811] (4/8) Epoch 27, validation: loss=2.95, NarTop10Accuracy=0.735, over 1905321.00 frames. +2024-08-06 20:26:26,347 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 20:26:26,877 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.166e+02 2.331e+02 2.512e+02 4.284e+02, threshold=4.663e+02, percent-clipped=0.0 +2024-08-06 20:26:50,899 INFO [trainer.py:765] (4/8) Epoch 27, batch 1100, train_loss[loss=2.958, NarTop10Accuracy=0.7336, over 6738.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7105, over 5943.89 frames. ], batch size: 17, lr: 3.17e-03 +2024-08-06 20:27:24,545 INFO [trainer.py:765] (4/8) Epoch 27, batch 1200, train_loss[loss=2.927, NarTop10Accuracy=0.7482, over 6921.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7119, over 5933.88 frames. ], batch size: 31, lr: 3.16e-03 +2024-08-06 20:27:58,568 INFO [trainer.py:765] (4/8) Epoch 27, batch 1300, train_loss[loss=2.709, NarTop10Accuracy=0.7849, over 4986.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7132, over 6001.94 frames. ], batch size: 6, lr: 3.16e-03 +2024-08-06 20:28:36,745 INFO [trainer.py:765] (4/8) Epoch 27, batch 1400, train_loss[loss=3.367, NarTop10Accuracy=0.653, over 6060.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7088, over 6030.68 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 20:29:04,632 INFO [trainer.py:765] (4/8) Epoch 27, batch 1500, train_loss[loss=2.96, NarTop10Accuracy=0.7308, over 5748.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7104, over 5970.03 frames. ], batch size: 50, lr: 3.16e-03 +2024-08-06 20:29:32,362 INFO [trainer.py:765] (4/8) Epoch 27, batch 1600, train_loss[loss=2.976, NarTop10Accuracy=0.7348, over 7134.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7088, over 5940.84 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:29:58,977 INFO [trainer.py:765] (4/8) Epoch 27, batch 1700, train_loss[loss=3.079, NarTop10Accuracy=0.7087, over 6204.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7099, over 5911.56 frames. ], batch size: 13, lr: 3.15e-03 +2024-08-06 20:30:25,463 INFO [trainer.py:765] (4/8) Epoch 27, batch 1800, train_loss[loss=3.504, NarTop10Accuracy=0.6256, over 6912.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7089, over 5973.77 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:30:51,845 INFO [trainer.py:765] (4/8) Epoch 27, batch 1900, train_loss[loss=3.085, NarTop10Accuracy=0.7188, over 6060.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7083, over 6014.56 frames. ], batch size: 51, lr: 3.15e-03 +2024-08-06 20:31:17,390 INFO [trainer.py:765] (4/8) Epoch 27, batch 2000, train_loss[loss=3.081, NarTop10Accuracy=0.7073, over 6183.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7098, over 5994.65 frames. ], batch size: 50, lr: 3.15e-03 +2024-08-06 20:31:42,659 INFO [trainer.py:765] (4/8) Epoch 27, batch 2100, train_loss[loss=2.721, NarTop10Accuracy=0.7772, over 3939.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7097, over 5963.82 frames. ], batch size: 4, lr: 3.14e-03 +2024-08-06 20:32:07,804 INFO [trainer.py:765] (4/8) Epoch 27, batch 2200, train_loss[loss=3.447, NarTop10Accuracy=0.6339, over 7152.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.708, over 6013.00 frames. ], batch size: 31, lr: 3.14e-03 +2024-08-06 20:32:32,941 INFO [trainer.py:765] (4/8) Epoch 27, batch 2300, train_loss[loss=2.782, NarTop10Accuracy=0.7626, over 5748.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7086, over 6028.75 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 20:32:57,246 INFO [trainer.py:765] (4/8) Epoch 27, batch 2400, train_loss[loss=2.725, NarTop10Accuracy=0.7866, over 5109.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7081, over 5760.84 frames. ], batch size: 7, lr: 3.14e-03 +2024-08-06 20:33:20,615 INFO [trainer.py:765] (4/8) Epoch 27, batch 2500, train_loss[loss=3.552, NarTop10Accuracy=0.6141, over 5055.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7146, over 5460.59 frames. ], batch size: 7, lr: 3.13e-03 +2024-08-06 20:33:40,897 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 20:34:35,828 INFO [trainer.py:765] (4/8) Epoch 28, batch 100, train_loss[loss=3.021, NarTop10Accuracy=0.7284, over 7467.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7089, over 2360.22 frames. ], batch size: 31, lr: 3.07e-03 +2024-08-06 20:35:07,393 INFO [trainer.py:765] (4/8) Epoch 28, batch 200, train_loss[loss=2.752, NarTop10Accuracy=0.7733, over 6726.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7077, over 3870.05 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 20:35:45,421 INFO [trainer.py:765] (4/8) Epoch 28, batch 300, train_loss[loss=2.992, NarTop10Accuracy=0.7186, over 7185.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7092, over 4676.21 frames. ], batch size: 22, lr: 3.07e-03 +2024-08-06 20:36:15,864 INFO [trainer.py:765] (4/8) Epoch 28, batch 400, train_loss[loss=3.32, NarTop10Accuracy=0.6632, over 5304.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7075, over 5131.27 frames. ], batch size: 7, lr: 3.07e-03 +2024-08-06 20:36:32,406 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 20:36:40,530 INFO [trainer.py:811] (4/8) Epoch 28, validation: loss=2.963, NarTop10Accuracy=0.7327, over 1905321.00 frames. +2024-08-06 20:36:40,531 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 20:36:41,102 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.179e+02 2.348e+02 2.536e+02 3.573e+02, threshold=4.696e+02, percent-clipped=0.0 +2024-08-06 20:36:56,664 INFO [trainer.py:765] (4/8) Epoch 28, batch 500, train_loss[loss=3.235, NarTop10Accuracy=0.6836, over 6018.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7106, over 5400.81 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 20:37:29,463 INFO [trainer.py:765] (4/8) Epoch 28, batch 600, train_loss[loss=3.2, NarTop10Accuracy=0.6826, over 5604.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7103, over 5665.52 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 20:38:08,891 INFO [trainer.py:765] (4/8) Epoch 28, batch 700, train_loss[loss=3.189, NarTop10Accuracy=0.6837, over 4359.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7081, over 5731.66 frames. ], batch size: 5, lr: 3.06e-03 +2024-08-06 20:38:42,489 INFO [trainer.py:765] (4/8) Epoch 28, batch 800, train_loss[loss=2.88, NarTop10Accuracy=0.7597, over 5121.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7131, over 5796.08 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 20:39:15,507 INFO [trainer.py:765] (4/8) Epoch 28, batch 900, train_loss[loss=3.314, NarTop10Accuracy=0.6669, over 6177.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7144, over 5812.40 frames. ], batch size: 13, lr: 3.06e-03 +2024-08-06 20:39:53,241 INFO [trainer.py:765] (4/8) Epoch 28, batch 1000, train_loss[loss=3.357, NarTop10Accuracy=0.6525, over 6138.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.714, over 5915.67 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 20:40:25,868 INFO [trainer.py:765] (4/8) Epoch 28, batch 1100, train_loss[loss=2.728, NarTop10Accuracy=0.7824, over 6891.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7113, over 5948.49 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 20:40:59,419 INFO [trainer.py:765] (4/8) Epoch 28, batch 1200, train_loss[loss=3.253, NarTop10Accuracy=0.6707, over 7251.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7096, over 5915.87 frames. ], batch size: 31, lr: 3.05e-03 +2024-08-06 20:41:38,681 INFO [trainer.py:765] (4/8) Epoch 28, batch 1300, train_loss[loss=3.048, NarTop10Accuracy=0.7133, over 5106.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7093, over 5977.92 frames. ], batch size: 6, lr: 3.05e-03 +2024-08-06 20:42:13,048 INFO [trainer.py:765] (4/8) Epoch 28, batch 1400, train_loss[loss=2.944, NarTop10Accuracy=0.7333, over 6069.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7078, over 6001.98 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 20:42:43,171 INFO [trainer.py:765] (4/8) Epoch 28, batch 1500, train_loss[loss=3.512, NarTop10Accuracy=0.6283, over 6204.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7112, over 5943.45 frames. ], batch size: 52, lr: 3.04e-03 +2024-08-06 20:43:11,081 INFO [trainer.py:765] (4/8) Epoch 28, batch 1600, train_loss[loss=2.91, NarTop10Accuracy=0.7533, over 7191.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7091, over 5907.85 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 20:43:37,786 INFO [trainer.py:765] (4/8) Epoch 28, batch 1700, train_loss[loss=3.05, NarTop10Accuracy=0.7236, over 6315.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7084, over 5899.35 frames. ], batch size: 13, lr: 3.04e-03 +2024-08-06 20:44:04,326 INFO [trainer.py:765] (4/8) Epoch 28, batch 1800, train_loss[loss=3.049, NarTop10Accuracy=0.7114, over 7155.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7094, over 5964.19 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 20:44:30,757 INFO [trainer.py:765] (4/8) Epoch 28, batch 1900, train_loss[loss=3.135, NarTop10Accuracy=0.6959, over 5802.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7096, over 6032.14 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 20:44:56,329 INFO [trainer.py:765] (4/8) Epoch 28, batch 2000, train_loss[loss=3.035, NarTop10Accuracy=0.7242, over 6129.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7144, over 6003.81 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 20:45:21,651 INFO [trainer.py:765] (4/8) Epoch 28, batch 2100, train_loss[loss=2.859, NarTop10Accuracy=0.7544, over 4896.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7145, over 5974.99 frames. ], batch size: 5, lr: 3.03e-03 +2024-08-06 20:45:47,077 INFO [trainer.py:765] (4/8) Epoch 28, batch 2200, train_loss[loss=2.925, NarTop10Accuracy=0.7513, over 7329.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7117, over 6010.66 frames. ], batch size: 31, lr: 3.03e-03 +2024-08-06 20:46:12,308 INFO [trainer.py:765] (4/8) Epoch 28, batch 2300, train_loss[loss=3.128, NarTop10Accuracy=0.6931, over 5805.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7085, over 6017.37 frames. ], batch size: 9, lr: 3.03e-03 +2024-08-06 20:46:36,808 INFO [trainer.py:765] (4/8) Epoch 28, batch 2400, train_loss[loss=2.916, NarTop10Accuracy=0.7418, over 5127.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7085, over 5762.47 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:46:48,595 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 20:46:56,604 INFO [trainer.py:811] (4/8) Epoch 28, validation: loss=2.931, NarTop10Accuracy=0.7396, over 1905321.00 frames. +2024-08-06 20:46:56,605 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 20:46:57,082 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.201e+02 2.381e+02 2.551e+02 4.872e+02, threshold=4.762e+02, percent-clipped=0.1 +2024-08-06 20:47:08,293 INFO [trainer.py:765] (4/8) Epoch 28, batch 2500, train_loss[loss=2.93, NarTop10Accuracy=0.7366, over 5019.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7115, over 5459.01 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:47:28,264 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 20:48:21,053 INFO [trainer.py:765] (4/8) Epoch 29, batch 100, train_loss[loss=2.978, NarTop10Accuracy=0.7259, over 7143.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7103, over 2369.30 frames. ], batch size: 31, lr: 2.96e-03 +2024-08-06 20:48:53,406 INFO [trainer.py:765] (4/8) Epoch 29, batch 200, train_loss[loss=3.337, NarTop10Accuracy=0.6571, over 6753.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7155, over 3863.72 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 20:49:27,477 INFO [trainer.py:765] (4/8) Epoch 29, batch 300, train_loss[loss=3.042, NarTop10Accuracy=0.7184, over 7020.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7173, over 4686.46 frames. ], batch size: 22, lr: 2.96e-03 +2024-08-06 20:49:56,053 INFO [trainer.py:765] (4/8) Epoch 29, batch 400, train_loss[loss=3.303, NarTop10Accuracy=0.6631, over 5259.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7128, over 5133.87 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 20:50:29,436 INFO [trainer.py:765] (4/8) Epoch 29, batch 500, train_loss[loss=3.265, NarTop10Accuracy=0.6727, over 5988.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7156, over 5376.77 frames. ], batch size: 11, lr: 2.96e-03 +2024-08-06 20:51:00,025 INFO [trainer.py:765] (4/8) Epoch 29, batch 600, train_loss[loss=2.728, NarTop10Accuracy=0.7852, over 5706.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7154, over 5636.78 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 20:51:35,678 INFO [trainer.py:765] (4/8) Epoch 29, batch 700, train_loss[loss=2.843, NarTop10Accuracy=0.77, over 4968.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7104, over 5726.08 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 20:52:10,725 INFO [trainer.py:765] (4/8) Epoch 29, batch 800, train_loss[loss=2.744, NarTop10Accuracy=0.789, over 5142.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7127, over 5804.02 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 20:52:40,743 INFO [trainer.py:765] (4/8) Epoch 29, batch 900, train_loss[loss=2.754, NarTop10Accuracy=0.7662, over 6231.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7113, over 5809.21 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 20:53:16,862 INFO [trainer.py:765] (4/8) Epoch 29, batch 1000, train_loss[loss=3.333, NarTop10Accuracy=0.6516, over 6189.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7102, over 5906.77 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 20:53:52,903 INFO [trainer.py:765] (4/8) Epoch 29, batch 1100, train_loss[loss=3.202, NarTop10Accuracy=0.6857, over 6780.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7088, over 5923.47 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 20:54:23,691 INFO [trainer.py:765] (4/8) Epoch 29, batch 1200, train_loss[loss=3.103, NarTop10Accuracy=0.7101, over 7014.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7104, over 5905.59 frames. ], batch size: 31, lr: 2.94e-03 +2024-08-06 20:55:01,429 INFO [trainer.py:765] (4/8) Epoch 29, batch 1300, train_loss[loss=2.748, NarTop10Accuracy=0.7694, over 4884.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7112, over 5981.01 frames. ], batch size: 6, lr: 2.94e-03 +2024-08-06 20:55:32,557 INFO [trainer.py:765] (4/8) Epoch 29, batch 1400, train_loss[loss=3.263, NarTop10Accuracy=0.6711, over 6057.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7105, over 6007.72 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 20:56:04,359 INFO [trainer.py:765] (4/8) Epoch 29, batch 1500, train_loss[loss=3.322, NarTop10Accuracy=0.6586, over 6621.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7101, over 5970.65 frames. ], batch size: 50, lr: 2.94e-03 +2024-08-06 20:56:32,042 INFO [trainer.py:765] (4/8) Epoch 29, batch 1600, train_loss[loss=3.377, NarTop10Accuracy=0.6445, over 7233.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7092, over 5957.40 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:56:58,640 INFO [trainer.py:765] (4/8) Epoch 29, batch 1700, train_loss[loss=2.787, NarTop10Accuracy=0.7714, over 6315.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7098, over 5944.53 frames. ], batch size: 13, lr: 2.93e-03 +2024-08-06 20:57:25,001 INFO [trainer.py:765] (4/8) Epoch 29, batch 1800, train_loss[loss=3.111, NarTop10Accuracy=0.7049, over 7134.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7115, over 5989.54 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:57:44,622 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 20:57:52,863 INFO [trainer.py:811] (4/8) Epoch 29, validation: loss=2.897, NarTop10Accuracy=0.7458, over 1905321.00 frames. +2024-08-06 20:57:52,864 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 20:57:53,424 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.206e+02 2.380e+02 2.554e+02 4.464e+02, threshold=4.759e+02, percent-clipped=0.0 +2024-08-06 20:57:59,756 INFO [trainer.py:765] (4/8) Epoch 29, batch 1900, train_loss[loss=3.002, NarTop10Accuracy=0.7232, over 5916.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.708, over 6027.08 frames. ], batch size: 50, lr: 2.93e-03 +2024-08-06 20:58:25,308 INFO [trainer.py:765] (4/8) Epoch 29, batch 2000, train_loss[loss=3.553, NarTop10Accuracy=0.6196, over 6162.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7083, over 6007.44 frames. ], batch size: 50, lr: 2.93e-03 +2024-08-06 20:58:50,629 INFO [trainer.py:765] (4/8) Epoch 29, batch 2100, train_loss[loss=2.972, NarTop10Accuracy=0.7445, over 3918.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7072, over 5978.34 frames. ], batch size: 4, lr: 2.92e-03 +2024-08-06 20:59:15,805 INFO [trainer.py:765] (4/8) Epoch 29, batch 2200, train_loss[loss=2.912, NarTop10Accuracy=0.7416, over 7332.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.71, over 6024.18 frames. ], batch size: 31, lr: 2.92e-03 +2024-08-06 20:59:40,910 INFO [trainer.py:765] (4/8) Epoch 29, batch 2300, train_loss[loss=2.892, NarTop10Accuracy=0.7432, over 5685.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7076, over 6031.24 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 21:00:05,155 INFO [trainer.py:765] (4/8) Epoch 29, batch 2400, train_loss[loss=2.771, NarTop10Accuracy=0.7738, over 5142.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7103, over 5780.43 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 21:00:28,742 INFO [trainer.py:765] (4/8) Epoch 29, batch 2500, train_loss[loss=3.455, NarTop10Accuracy=0.6368, over 4998.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7145, over 5465.58 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 21:00:48,843 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 21:01:41,716 INFO [trainer.py:765] (4/8) Epoch 30, batch 100, train_loss[loss=2.94, NarTop10Accuracy=0.7407, over 7221.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7192, over 2374.84 frames. ], batch size: 31, lr: 2.86e-03 +2024-08-06 21:02:17,013 INFO [trainer.py:765] (4/8) Epoch 30, batch 200, train_loss[loss=2.974, NarTop10Accuracy=0.729, over 7116.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7206, over 3856.93 frames. ], batch size: 18, lr: 2.86e-03 +2024-08-06 21:02:51,342 INFO [trainer.py:765] (4/8) Epoch 30, batch 300, train_loss[loss=2.897, NarTop10Accuracy=0.7431, over 7185.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7236, over 4649.91 frames. ], batch size: 23, lr: 2.86e-03 +2024-08-06 21:03:21,642 INFO [trainer.py:765] (4/8) Epoch 30, batch 400, train_loss[loss=2.724, NarTop10Accuracy=0.7788, over 4983.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7199, over 5098.70 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 21:03:58,545 INFO [trainer.py:765] (4/8) Epoch 30, batch 500, train_loss[loss=3.384, NarTop10Accuracy=0.6466, over 6057.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7183, over 5386.76 frames. ], batch size: 11, lr: 2.86e-03 +2024-08-06 21:04:31,655 INFO [trainer.py:765] (4/8) Epoch 30, batch 600, train_loss[loss=2.926, NarTop10Accuracy=0.7388, over 5784.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7176, over 5655.39 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 21:05:03,525 INFO [trainer.py:765] (4/8) Epoch 30, batch 700, train_loss[loss=3.064, NarTop10Accuracy=0.7164, over 4971.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7206, over 5712.17 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 21:05:44,131 INFO [trainer.py:765] (4/8) Epoch 30, batch 800, train_loss[loss=2.996, NarTop10Accuracy=0.73, over 4377.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7205, over 5777.87 frames. ], batch size: 5, lr: 2.85e-03 +2024-08-06 21:06:14,843 INFO [trainer.py:765] (4/8) Epoch 30, batch 900, train_loss[loss=2.794, NarTop10Accuracy=0.7623, over 6147.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7202, over 5793.63 frames. ], batch size: 13, lr: 2.85e-03 +2024-08-06 21:06:48,951 INFO [trainer.py:765] (4/8) Epoch 30, batch 1000, train_loss[loss=2.926, NarTop10Accuracy=0.7458, over 6726.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7122, over 5898.88 frames. ], batch size: 14, lr: 2.85e-03 +2024-08-06 21:07:25,936 INFO [trainer.py:765] (4/8) Epoch 30, batch 1100, train_loss[loss=3.312, NarTop10Accuracy=0.6665, over 7044.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7098, over 5929.50 frames. ], batch size: 17, lr: 2.84e-03 +2024-08-06 21:08:02,380 INFO [trainer.py:765] (4/8) Epoch 30, batch 1200, train_loss[loss=2.955, NarTop10Accuracy=0.7347, over 7407.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7109, over 5924.34 frames. ], batch size: 31, lr: 2.84e-03 +2024-08-06 21:08:35,370 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 21:08:43,457 INFO [trainer.py:811] (4/8) Epoch 30, validation: loss=2.93, NarTop10Accuracy=0.7391, over 1905321.00 frames. +2024-08-06 21:08:43,458 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 21:08:44,197 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.209e+02 2.377e+02 2.553e+02 3.956e+02, threshold=4.754e+02, percent-clipped=0.0 +2024-08-06 21:08:44,202 INFO [trainer.py:765] (4/8) Epoch 30, batch 1300, train_loss[loss=2.951, NarTop10Accuracy=0.725, over 4974.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7125, over 5999.01 frames. ], batch size: 6, lr: 2.84e-03 +2024-08-06 21:09:22,396 INFO [trainer.py:765] (4/8) Epoch 30, batch 1400, train_loss[loss=2.774, NarTop10Accuracy=0.7655, over 6126.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7123, over 6038.06 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 21:09:52,373 INFO [trainer.py:765] (4/8) Epoch 30, batch 1500, train_loss[loss=2.989, NarTop10Accuracy=0.7303, over 6330.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7131, over 5966.95 frames. ], batch size: 50, lr: 2.84e-03 +2024-08-06 21:10:20,083 INFO [trainer.py:765] (4/8) Epoch 30, batch 1600, train_loss[loss=3.067, NarTop10Accuracy=0.7135, over 7056.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7118, over 5930.81 frames. ], batch size: 22, lr: 2.84e-03 +2024-08-06 21:10:46,678 INFO [trainer.py:765] (4/8) Epoch 30, batch 1700, train_loss[loss=3.107, NarTop10Accuracy=0.6914, over 6276.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.71, over 5916.21 frames. ], batch size: 13, lr: 2.83e-03 +2024-08-06 21:11:13,057 INFO [trainer.py:765] (4/8) Epoch 30, batch 1800, train_loss[loss=3.424, NarTop10Accuracy=0.636, over 7260.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7102, over 5973.96 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 21:11:39,417 INFO [trainer.py:765] (4/8) Epoch 30, batch 1900, train_loss[loss=3.016, NarTop10Accuracy=0.7295, over 6252.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7093, over 6012.22 frames. ], batch size: 50, lr: 2.83e-03 +2024-08-06 21:12:04,825 INFO [trainer.py:765] (4/8) Epoch 30, batch 2000, train_loss[loss=3.348, NarTop10Accuracy=0.6578, over 5970.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7123, over 5975.72 frames. ], batch size: 50, lr: 2.83e-03 +2024-08-06 21:12:30,087 INFO [trainer.py:765] (4/8) Epoch 30, batch 2100, train_loss[loss=2.78, NarTop10Accuracy=0.7686, over 3852.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7119, over 5954.97 frames. ], batch size: 4, lr: 2.83e-03 +2024-08-06 21:12:55,224 INFO [trainer.py:765] (4/8) Epoch 30, batch 2200, train_loss[loss=3.067, NarTop10Accuracy=0.7186, over 7128.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7111, over 5995.09 frames. ], batch size: 31, lr: 2.82e-03 +2024-08-06 21:13:20,296 INFO [trainer.py:765] (4/8) Epoch 30, batch 2300, train_loss[loss=2.869, NarTop10Accuracy=0.7583, over 5562.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7078, over 5986.58 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 21:13:44,490 INFO [trainer.py:765] (4/8) Epoch 30, batch 2400, train_loss[loss=2.769, NarTop10Accuracy=0.7661, over 5865.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7151, over 5763.64 frames. ], batch size: 8, lr: 2.82e-03 +2024-08-06 21:14:07,986 INFO [trainer.py:765] (4/8) Epoch 30, batch 2500, train_loss[loss=2.984, NarTop10Accuracy=0.7213, over 5145.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7155, over 5475.79 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:28,015 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 21:15:23,633 INFO [trainer.py:765] (4/8) Epoch 31, batch 100, train_loss[loss=3.491, NarTop10Accuracy=0.6241, over 7152.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7121, over 2365.48 frames. ], batch size: 31, lr: 2.77e-03 +2024-08-06 21:15:55,127 INFO [trainer.py:765] (4/8) Epoch 31, batch 200, train_loss[loss=2.973, NarTop10Accuracy=0.7293, over 6642.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7176, over 3856.68 frames. ], batch size: 17, lr: 2.77e-03 +2024-08-06 21:16:31,216 INFO [trainer.py:765] (4/8) Epoch 31, batch 300, train_loss[loss=2.912, NarTop10Accuracy=0.7535, over 7083.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7197, over 4667.27 frames. ], batch size: 22, lr: 2.77e-03 +2024-08-06 21:17:01,625 INFO [trainer.py:765] (4/8) Epoch 31, batch 400, train_loss[loss=3.13, NarTop10Accuracy=0.697, over 5196.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7158, over 5106.83 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 21:17:35,724 INFO [trainer.py:765] (4/8) Epoch 31, batch 500, train_loss[loss=2.773, NarTop10Accuracy=0.7735, over 6108.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7166, over 5390.36 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 21:18:07,084 INFO [trainer.py:765] (4/8) Epoch 31, batch 600, train_loss[loss=2.831, NarTop10Accuracy=0.7624, over 5772.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7135, over 5665.82 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 21:18:44,610 INFO [trainer.py:765] (4/8) Epoch 31, batch 700, train_loss[loss=3.355, NarTop10Accuracy=0.6475, over 5106.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7131, over 5725.36 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 21:18:51,096 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 21:18:59,276 INFO [trainer.py:811] (4/8) Epoch 31, validation: loss=2.984, NarTop10Accuracy=0.7279, over 1905321.00 frames. +2024-08-06 21:18:59,277 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 21:18:59,986 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.222e+02 2.378e+02 2.557e+02 4.306e+02, threshold=4.755e+02, percent-clipped=0.0 +2024-08-06 21:19:24,246 INFO [trainer.py:765] (4/8) Epoch 31, batch 800, train_loss[loss=2.767, NarTop10Accuracy=0.7755, over 5766.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.715, over 5782.59 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 21:19:56,951 INFO [trainer.py:765] (4/8) Epoch 31, batch 900, train_loss[loss=3.376, NarTop10Accuracy=0.6357, over 6222.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7152, over 5793.97 frames. ], batch size: 13, lr: 2.76e-03 +2024-08-06 21:20:33,311 INFO [trainer.py:765] (4/8) Epoch 31, batch 1000, train_loss[loss=3.416, NarTop10Accuracy=0.6416, over 6252.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7158, over 5895.82 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 21:21:10,216 INFO [trainer.py:765] (4/8) Epoch 31, batch 1100, train_loss[loss=3.252, NarTop10Accuracy=0.6785, over 6831.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7157, over 5927.55 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 21:21:41,120 INFO [trainer.py:765] (4/8) Epoch 31, batch 1200, train_loss[loss=2.84, NarTop10Accuracy=0.7537, over 7236.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7179, over 5925.90 frames. ], batch size: 31, lr: 2.75e-03 +2024-08-06 21:22:19,742 INFO [trainer.py:765] (4/8) Epoch 31, batch 1300, train_loss[loss=2.791, NarTop10Accuracy=0.767, over 4335.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7126, over 5988.64 frames. ], batch size: 5, lr: 2.75e-03 +2024-08-06 21:22:53,534 INFO [trainer.py:765] (4/8) Epoch 31, batch 1400, train_loss[loss=2.917, NarTop10Accuracy=0.7442, over 6108.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7108, over 6035.39 frames. ], batch size: 11, lr: 2.75e-03 +2024-08-06 21:23:21,269 INFO [trainer.py:765] (4/8) Epoch 31, batch 1500, train_loss[loss=3.395, NarTop10Accuracy=0.6437, over 6054.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7133, over 5948.87 frames. ], batch size: 53, lr: 2.74e-03 +2024-08-06 21:23:49,005 INFO [trainer.py:765] (4/8) Epoch 31, batch 1600, train_loss[loss=3.325, NarTop10Accuracy=0.6554, over 7347.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7146, over 5931.36 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 21:24:15,512 INFO [trainer.py:765] (4/8) Epoch 31, batch 1700, train_loss[loss=3.44, NarTop10Accuracy=0.6424, over 6231.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7138, over 5931.41 frames. ], batch size: 13, lr: 2.74e-03 +2024-08-06 21:24:41,996 INFO [trainer.py:765] (4/8) Epoch 31, batch 1800, train_loss[loss=2.8, NarTop10Accuracy=0.7746, over 6915.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.716, over 6002.60 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 21:25:08,357 INFO [trainer.py:765] (4/8) Epoch 31, batch 1900, train_loss[loss=3.233, NarTop10Accuracy=0.6855, over 5991.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7131, over 6032.47 frames. ], batch size: 51, lr: 2.74e-03 +2024-08-06 21:25:33,773 INFO [trainer.py:765] (4/8) Epoch 31, batch 2000, train_loss[loss=3.029, NarTop10Accuracy=0.7255, over 5760.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7149, over 5995.51 frames. ], batch size: 51, lr: 2.74e-03 +2024-08-06 21:25:59,106 INFO [trainer.py:765] (4/8) Epoch 31, batch 2100, train_loss[loss=2.756, NarTop10Accuracy=0.7752, over 4764.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7152, over 5983.00 frames. ], batch size: 5, lr: 2.73e-03 +2024-08-06 21:26:24,238 INFO [trainer.py:765] (4/8) Epoch 31, batch 2200, train_loss[loss=2.998, NarTop10Accuracy=0.7278, over 7290.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7173, over 6025.61 frames. ], batch size: 31, lr: 2.73e-03 +2024-08-06 21:26:49,322 INFO [trainer.py:765] (4/8) Epoch 31, batch 2300, train_loss[loss=2.816, NarTop10Accuracy=0.7636, over 5712.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7141, over 6040.57 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 21:27:13,607 INFO [trainer.py:765] (4/8) Epoch 31, batch 2400, train_loss[loss=2.916, NarTop10Accuracy=0.7451, over 5055.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7147, over 5776.56 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 21:27:37,027 INFO [trainer.py:765] (4/8) Epoch 31, batch 2500, train_loss[loss=2.937, NarTop10Accuracy=0.7417, over 5625.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7175, over 5492.51 frames. ], batch size: 8, lr: 2.73e-03 +2024-08-06 21:27:57,405 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 21:28:49,392 INFO [trainer.py:765] (4/8) Epoch 32, batch 100, train_loss[loss=2.878, NarTop10Accuracy=0.7504, over 7425.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7156, over 2356.78 frames. ], batch size: 32, lr: 2.68e-03 +2024-08-06 21:29:08,160 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 21:29:16,392 INFO [trainer.py:811] (4/8) Epoch 32, validation: loss=2.919, NarTop10Accuracy=0.7409, over 1905321.00 frames. +2024-08-06 21:29:16,393 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 21:29:16,939 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.842e+02 2.253e+02 2.413e+02 2.600e+02 5.680e+02, threshold=4.826e+02, percent-clipped=0.1 +2024-08-06 21:29:32,272 INFO [trainer.py:765] (4/8) Epoch 32, batch 200, train_loss[loss=3.229, NarTop10Accuracy=0.6773, over 6852.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7148, over 3860.33 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 21:30:05,278 INFO [trainer.py:765] (4/8) Epoch 32, batch 300, train_loss[loss=3.124, NarTop10Accuracy=0.7065, over 7299.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7163, over 4653.59 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 21:30:34,102 INFO [trainer.py:765] (4/8) Epoch 32, batch 400, train_loss[loss=2.901, NarTop10Accuracy=0.7522, over 5292.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7137, over 5106.33 frames. ], batch size: 7, lr: 2.68e-03 +2024-08-06 21:31:13,529 INFO [trainer.py:765] (4/8) Epoch 32, batch 500, train_loss[loss=3.172, NarTop10Accuracy=0.6922, over 6096.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7157, over 5394.92 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 21:31:42,485 INFO [trainer.py:765] (4/8) Epoch 32, batch 600, train_loss[loss=3, NarTop10Accuracy=0.7107, over 5799.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7154, over 5661.76 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 21:32:17,028 INFO [trainer.py:765] (4/8) Epoch 32, batch 700, train_loss[loss=2.656, NarTop10Accuracy=0.7894, over 5040.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7171, over 5726.62 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 21:33:00,646 INFO [trainer.py:765] (4/8) Epoch 32, batch 800, train_loss[loss=3.241, NarTop10Accuracy=0.67, over 4941.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7161, over 5773.04 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 21:33:28,990 INFO [trainer.py:765] (4/8) Epoch 32, batch 900, train_loss[loss=2.779, NarTop10Accuracy=0.7718, over 6174.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7189, over 5787.40 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 21:34:04,049 INFO [trainer.py:765] (4/8) Epoch 32, batch 1000, train_loss[loss=3.189, NarTop10Accuracy=0.6836, over 6312.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.717, over 5889.43 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 21:34:46,674 INFO [trainer.py:765] (4/8) Epoch 32, batch 1100, train_loss[loss=3.116, NarTop10Accuracy=0.7002, over 6738.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7158, over 5924.21 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 21:35:18,171 INFO [trainer.py:765] (4/8) Epoch 32, batch 1200, train_loss[loss=3.178, NarTop10Accuracy=0.6906, over 7278.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7134, over 5910.36 frames. ], batch size: 31, lr: 2.66e-03 +2024-08-06 21:35:52,800 INFO [trainer.py:765] (4/8) Epoch 32, batch 1300, train_loss[loss=3.139, NarTop10Accuracy=0.6973, over 5034.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7119, over 5991.54 frames. ], batch size: 6, lr: 2.66e-03 +2024-08-06 21:36:29,478 INFO [trainer.py:765] (4/8) Epoch 32, batch 1400, train_loss[loss=3.39, NarTop10Accuracy=0.6439, over 5988.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7124, over 6022.31 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 21:37:04,733 INFO [trainer.py:765] (4/8) Epoch 32, batch 1500, train_loss[loss=3.475, NarTop10Accuracy=0.6307, over 6285.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7127, over 5950.74 frames. ], batch size: 51, lr: 2.66e-03 +2024-08-06 21:37:32,521 INFO [trainer.py:765] (4/8) Epoch 32, batch 1600, train_loss[loss=3.04, NarTop10Accuracy=0.7166, over 7125.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.714, over 5936.02 frames. ], batch size: 22, lr: 2.66e-03 +2024-08-06 21:37:59,159 INFO [trainer.py:765] (4/8) Epoch 32, batch 1700, train_loss[loss=3.147, NarTop10Accuracy=0.6915, over 6336.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7148, over 5907.70 frames. ], batch size: 13, lr: 2.65e-03 +2024-08-06 21:38:25,702 INFO [trainer.py:765] (4/8) Epoch 32, batch 1800, train_loss[loss=3.017, NarTop10Accuracy=0.7172, over 6990.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7134, over 5967.11 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 21:38:52,169 INFO [trainer.py:765] (4/8) Epoch 32, batch 1900, train_loss[loss=3.037, NarTop10Accuracy=0.7142, over 6618.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7094, over 6019.03 frames. ], batch size: 51, lr: 2.65e-03 +2024-08-06 21:39:17,768 INFO [trainer.py:765] (4/8) Epoch 32, batch 2000, train_loss[loss=3.465, NarTop10Accuracy=0.6361, over 6279.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.713, over 5995.00 frames. ], batch size: 51, lr: 2.65e-03 +2024-08-06 21:39:43,178 INFO [trainer.py:765] (4/8) Epoch 32, batch 2100, train_loss[loss=2.64, NarTop10Accuracy=0.7981, over 4860.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7134, over 5982.03 frames. ], batch size: 5, lr: 2.65e-03 +2024-08-06 21:39:54,781 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 21:40:02,942 INFO [trainer.py:811] (4/8) Epoch 32, validation: loss=2.886, NarTop10Accuracy=0.7482, over 1905321.00 frames. +2024-08-06 21:40:02,942 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 21:40:03,423 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.874e+02 2.278e+02 2.449e+02 2.609e+02 8.207e+02, threshold=4.898e+02, percent-clipped=0.3 +2024-08-06 21:40:16,629 INFO [trainer.py:765] (4/8) Epoch 32, batch 2200, train_loss[loss=3.148, NarTop10Accuracy=0.6995, over 7164.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7141, over 6016.36 frames. ], batch size: 31, lr: 2.65e-03 +2024-08-06 21:40:41,718 INFO [trainer.py:765] (4/8) Epoch 32, batch 2300, train_loss[loss=3.229, NarTop10Accuracy=0.6759, over 5733.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.71, over 6029.65 frames. ], batch size: 9, lr: 2.65e-03 +2024-08-06 21:41:06,072 INFO [trainer.py:765] (4/8) Epoch 32, batch 2400, train_loss[loss=3.368, NarTop10Accuracy=0.6535, over 5154.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7138, over 5792.66 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:29,538 INFO [trainer.py:765] (4/8) Epoch 32, batch 2500, train_loss[loss=2.805, NarTop10Accuracy=0.7761, over 5169.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7191, over 5502.42 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:49,894 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 21:42:47,615 INFO [trainer.py:765] (4/8) Epoch 33, batch 100, train_loss[loss=3.027, NarTop10Accuracy=0.7252, over 7278.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.724, over 2361.08 frames. ], batch size: 31, lr: 2.60e-03 +2024-08-06 21:43:22,368 INFO [trainer.py:765] (4/8) Epoch 33, batch 200, train_loss[loss=2.607, NarTop10Accuracy=0.8004, over 6774.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7208, over 3856.26 frames. ], batch size: 17, lr: 2.60e-03 +2024-08-06 21:43:56,513 INFO [trainer.py:765] (4/8) Epoch 33, batch 300, train_loss[loss=3.424, NarTop10Accuracy=0.6305, over 6993.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7174, over 4653.87 frames. ], batch size: 22, lr: 2.60e-03 +2024-08-06 21:44:30,316 INFO [trainer.py:765] (4/8) Epoch 33, batch 400, train_loss[loss=2.729, NarTop10Accuracy=0.7903, over 5181.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7179, over 5112.87 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 21:45:02,870 INFO [trainer.py:765] (4/8) Epoch 33, batch 500, train_loss[loss=2.738, NarTop10Accuracy=0.7864, over 6024.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7221, over 5391.26 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 21:45:36,226 INFO [trainer.py:765] (4/8) Epoch 33, batch 600, train_loss[loss=3.328, NarTop10Accuracy=0.6513, over 5796.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7171, over 5639.76 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 21:46:11,316 INFO [trainer.py:765] (4/8) Epoch 33, batch 700, train_loss[loss=2.85, NarTop10Accuracy=0.7637, over 4914.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7165, over 5712.53 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:46:46,169 INFO [trainer.py:765] (4/8) Epoch 33, batch 800, train_loss[loss=2.788, NarTop10Accuracy=0.7792, over 4950.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7166, over 5781.05 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:47:18,908 INFO [trainer.py:765] (4/8) Epoch 33, batch 900, train_loss[loss=3.326, NarTop10Accuracy=0.6602, over 6711.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7158, over 5782.58 frames. ], batch size: 14, lr: 2.59e-03 +2024-08-06 21:47:57,316 INFO [trainer.py:765] (4/8) Epoch 33, batch 1000, train_loss[loss=2.979, NarTop10Accuracy=0.7255, over 6258.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7157, over 5898.63 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 21:48:30,908 INFO [trainer.py:765] (4/8) Epoch 33, batch 1100, train_loss[loss=2.948, NarTop10Accuracy=0.7318, over 6783.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7117, over 5937.13 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 21:49:06,659 INFO [trainer.py:765] (4/8) Epoch 33, batch 1200, train_loss[loss=2.861, NarTop10Accuracy=0.7565, over 7383.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7139, over 5934.46 frames. ], batch size: 31, lr: 2.58e-03 +2024-08-06 21:49:42,815 INFO [trainer.py:765] (4/8) Epoch 33, batch 1300, train_loss[loss=3.039, NarTop10Accuracy=0.716, over 4209.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7146, over 5985.76 frames. ], batch size: 5, lr: 2.58e-03 +2024-08-06 21:50:17,310 INFO [trainer.py:765] (4/8) Epoch 33, batch 1400, train_loss[loss=3.329, NarTop10Accuracy=0.6507, over 6165.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7137, over 6023.87 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 21:50:45,370 INFO [trainer.py:765] (4/8) Epoch 33, batch 1500, train_loss[loss=3.071, NarTop10Accuracy=0.7084, over 6180.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7141, over 5960.54 frames. ], batch size: 50, lr: 2.58e-03 +2024-08-06 21:51:04,606 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 21:51:12,661 INFO [trainer.py:811] (4/8) Epoch 33, validation: loss=2.938, NarTop10Accuracy=0.7372, over 1905321.00 frames. +2024-08-06 21:51:12,662 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 21:51:13,180 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.834e+02 2.250e+02 2.409e+02 2.586e+02 3.975e+02, threshold=4.818e+02, percent-clipped=0.0 +2024-08-06 21:51:21,261 INFO [trainer.py:765] (4/8) Epoch 33, batch 1600, train_loss[loss=3.235, NarTop10Accuracy=0.673, over 7164.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7168, over 5918.04 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:51:47,923 INFO [trainer.py:765] (4/8) Epoch 33, batch 1700, train_loss[loss=2.706, NarTop10Accuracy=0.7863, over 6228.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.715, over 5903.25 frames. ], batch size: 13, lr: 2.57e-03 +2024-08-06 21:52:14,392 INFO [trainer.py:765] (4/8) Epoch 33, batch 1800, train_loss[loss=2.801, NarTop10Accuracy=0.7639, over 6978.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7157, over 5971.86 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:52:40,856 INFO [trainer.py:765] (4/8) Epoch 33, batch 1900, train_loss[loss=3.4, NarTop10Accuracy=0.642, over 6387.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7116, over 6013.39 frames. ], batch size: 50, lr: 2.57e-03 +2024-08-06 21:53:06,352 INFO [trainer.py:765] (4/8) Epoch 33, batch 2000, train_loss[loss=3.503, NarTop10Accuracy=0.6257, over 6066.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7179, over 6001.04 frames. ], batch size: 51, lr: 2.57e-03 +2024-08-06 21:53:31,659 INFO [trainer.py:765] (4/8) Epoch 33, batch 2100, train_loss[loss=3.304, NarTop10Accuracy=0.6628, over 3978.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7165, over 5984.61 frames. ], batch size: 4, lr: 2.57e-03 +2024-08-06 21:53:56,891 INFO [trainer.py:765] (4/8) Epoch 33, batch 2200, train_loss[loss=3.489, NarTop10Accuracy=0.6236, over 7164.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7142, over 6027.20 frames. ], batch size: 31, lr: 2.57e-03 +2024-08-06 21:54:21,990 INFO [trainer.py:765] (4/8) Epoch 33, batch 2300, train_loss[loss=2.718, NarTop10Accuracy=0.7714, over 5673.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7145, over 6011.90 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 21:54:46,430 INFO [trainer.py:765] (4/8) Epoch 33, batch 2400, train_loss[loss=2.682, NarTop10Accuracy=0.7835, over 5256.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7173, over 5768.78 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:09,862 INFO [trainer.py:765] (4/8) Epoch 33, batch 2500, train_loss[loss=2.694, NarTop10Accuracy=0.7814, over 5097.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7203, over 5471.32 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:29,791 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 21:56:24,721 INFO [trainer.py:765] (4/8) Epoch 34, batch 100, train_loss[loss=3.34, NarTop10Accuracy=0.6624, over 7140.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7174, over 2383.12 frames. ], batch size: 31, lr: 2.52e-03 +2024-08-06 21:56:55,613 INFO [trainer.py:765] (4/8) Epoch 34, batch 200, train_loss[loss=3.074, NarTop10Accuracy=0.698, over 6816.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7216, over 3869.75 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 21:57:31,776 INFO [trainer.py:765] (4/8) Epoch 34, batch 300, train_loss[loss=2.889, NarTop10Accuracy=0.7526, over 7194.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7198, over 4683.29 frames. ], batch size: 22, lr: 2.52e-03 +2024-08-06 21:58:02,724 INFO [trainer.py:765] (4/8) Epoch 34, batch 400, train_loss[loss=3.275, NarTop10Accuracy=0.6601, over 5067.00 frames. ], tot_loss[loss=3.011, NarTop10Accuracy=0.7231, over 5115.84 frames. ], batch size: 7, lr: 2.52e-03 +2024-08-06 21:58:34,689 INFO [trainer.py:765] (4/8) Epoch 34, batch 500, train_loss[loss=3.024, NarTop10Accuracy=0.7174, over 6063.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7205, over 5387.82 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 21:59:09,616 INFO [trainer.py:765] (4/8) Epoch 34, batch 600, train_loss[loss=2.903, NarTop10Accuracy=0.7439, over 5682.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7188, over 5662.63 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 21:59:46,056 INFO [trainer.py:765] (4/8) Epoch 34, batch 700, train_loss[loss=3.149, NarTop10Accuracy=0.6952, over 5097.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7178, over 5730.18 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:17,575 INFO [trainer.py:765] (4/8) Epoch 34, batch 800, train_loss[loss=2.951, NarTop10Accuracy=0.7371, over 5097.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7192, over 5772.11 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:49,874 INFO [trainer.py:765] (4/8) Epoch 34, batch 900, train_loss[loss=2.938, NarTop10Accuracy=0.7375, over 6558.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7191, over 5800.47 frames. ], batch size: 14, lr: 2.51e-03 +2024-08-06 22:01:25,338 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 22:01:33,386 INFO [trainer.py:811] (4/8) Epoch 34, validation: loss=2.9, NarTop10Accuracy=0.7444, over 1905321.00 frames. +2024-08-06 22:01:33,387 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 22:01:34,091 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.259e+02 2.434e+02 2.615e+02 5.125e+02, threshold=4.868e+02, percent-clipped=0.1 +2024-08-06 22:01:35,625 INFO [trainer.py:765] (4/8) Epoch 34, batch 1000, train_loss[loss=3.259, NarTop10Accuracy=0.6758, over 6630.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7173, over 5892.55 frames. ], batch size: 14, lr: 2.51e-03 +2024-08-06 22:02:10,829 INFO [trainer.py:765] (4/8) Epoch 34, batch 1100, train_loss[loss=3.344, NarTop10Accuracy=0.6637, over 6816.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7158, over 5920.57 frames. ], batch size: 17, lr: 2.51e-03 +2024-08-06 22:02:46,786 INFO [trainer.py:765] (4/8) Epoch 34, batch 1200, train_loss[loss=3.032, NarTop10Accuracy=0.7228, over 7095.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7162, over 5923.10 frames. ], batch size: 31, lr: 2.50e-03 +2024-08-06 22:03:20,814 INFO [trainer.py:765] (4/8) Epoch 34, batch 1300, train_loss[loss=2.677, NarTop10Accuracy=0.7961, over 5121.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7174, over 5974.34 frames. ], batch size: 6, lr: 2.50e-03 +2024-08-06 22:03:52,950 INFO [trainer.py:765] (4/8) Epoch 34, batch 1400, train_loss[loss=3.267, NarTop10Accuracy=0.6612, over 6009.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7167, over 5995.36 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 22:04:20,822 INFO [trainer.py:765] (4/8) Epoch 34, batch 1500, train_loss[loss=3.064, NarTop10Accuracy=0.7137, over 5715.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7167, over 5931.13 frames. ], batch size: 50, lr: 2.50e-03 +2024-08-06 22:04:48,600 INFO [trainer.py:765] (4/8) Epoch 34, batch 1600, train_loss[loss=2.904, NarTop10Accuracy=0.7373, over 7278.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7154, over 5918.15 frames. ], batch size: 23, lr: 2.50e-03 +2024-08-06 22:05:15,241 INFO [trainer.py:765] (4/8) Epoch 34, batch 1700, train_loss[loss=3.18, NarTop10Accuracy=0.6857, over 6630.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7177, over 5927.14 frames. ], batch size: 14, lr: 2.50e-03 +2024-08-06 22:05:41,721 INFO [trainer.py:765] (4/8) Epoch 34, batch 1800, train_loss[loss=3.403, NarTop10Accuracy=0.6471, over 6927.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7164, over 5988.91 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 22:06:08,207 INFO [trainer.py:765] (4/8) Epoch 34, batch 1900, train_loss[loss=3.046, NarTop10Accuracy=0.7231, over 6171.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7115, over 6019.29 frames. ], batch size: 51, lr: 2.49e-03 +2024-08-06 22:06:33,770 INFO [trainer.py:765] (4/8) Epoch 34, batch 2000, train_loss[loss=3.046, NarTop10Accuracy=0.7147, over 6222.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7148, over 5991.67 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 22:06:59,126 INFO [trainer.py:765] (4/8) Epoch 34, batch 2100, train_loss[loss=3.314, NarTop10Accuracy=0.6633, over 3864.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7117, over 5968.86 frames. ], batch size: 4, lr: 2.49e-03 +2024-08-06 22:07:24,398 INFO [trainer.py:765] (4/8) Epoch 34, batch 2200, train_loss[loss=2.831, NarTop10Accuracy=0.7599, over 7446.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.712, over 6021.31 frames. ], batch size: 31, lr: 2.49e-03 +2024-08-06 22:07:49,535 INFO [trainer.py:765] (4/8) Epoch 34, batch 2300, train_loss[loss=2.656, NarTop10Accuracy=0.7986, over 5709.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7126, over 6036.20 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 22:08:14,059 INFO [trainer.py:765] (4/8) Epoch 34, batch 2400, train_loss[loss=3.481, NarTop10Accuracy=0.6323, over 5826.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7135, over 5791.17 frames. ], batch size: 8, lr: 2.49e-03 +2024-08-06 22:08:37,648 INFO [trainer.py:765] (4/8) Epoch 34, batch 2500, train_loss[loss=2.764, NarTop10Accuracy=0.7703, over 5115.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7205, over 5495.06 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:57,618 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 22:09:52,640 INFO [trainer.py:765] (4/8) Epoch 35, batch 100, train_loss[loss=2.873, NarTop10Accuracy=0.7477, over 7347.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7175, over 2364.89 frames. ], batch size: 32, lr: 2.45e-03 +2024-08-06 22:10:29,698 INFO [trainer.py:765] (4/8) Epoch 35, batch 200, train_loss[loss=3.205, NarTop10Accuracy=0.6853, over 6822.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7136, over 3835.11 frames. ], batch size: 17, lr: 2.45e-03 +2024-08-06 22:11:04,942 INFO [trainer.py:765] (4/8) Epoch 35, batch 300, train_loss[loss=2.762, NarTop10Accuracy=0.7831, over 7230.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7192, over 4646.05 frames. ], batch size: 22, lr: 2.44e-03 +2024-08-06 22:11:35,333 INFO [trainer.py:765] (4/8) Epoch 35, batch 400, train_loss[loss=3.006, NarTop10Accuracy=0.7304, over 5229.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7202, over 5107.88 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 22:11:40,048 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 22:11:48,129 INFO [trainer.py:811] (4/8) Epoch 35, validation: loss=2.84, NarTop10Accuracy=0.7576, over 1905321.00 frames. +2024-08-06 22:11:48,129 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 22:11:48,702 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.275e+02 2.426e+02 2.615e+02 4.095e+02, threshold=4.852e+02, percent-clipped=0.0 +2024-08-06 22:12:17,723 INFO [trainer.py:765] (4/8) Epoch 35, batch 500, train_loss[loss=2.74, NarTop10Accuracy=0.7759, over 6042.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7231, over 5386.42 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 22:12:51,425 INFO [trainer.py:765] (4/8) Epoch 35, batch 600, train_loss[loss=3.408, NarTop10Accuracy=0.6441, over 5664.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7193, over 5657.22 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 22:13:24,940 INFO [trainer.py:765] (4/8) Epoch 35, batch 700, train_loss[loss=2.898, NarTop10Accuracy=0.7518, over 5118.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7186, over 5734.90 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 22:14:01,384 INFO [trainer.py:765] (4/8) Epoch 35, batch 800, train_loss[loss=2.891, NarTop10Accuracy=0.7477, over 4965.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7172, over 5792.71 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 22:14:34,373 INFO [trainer.py:765] (4/8) Epoch 35, batch 900, train_loss[loss=3.11, NarTop10Accuracy=0.6978, over 6111.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7198, over 5801.60 frames. ], batch size: 13, lr: 2.44e-03 +2024-08-06 22:15:09,372 INFO [trainer.py:765] (4/8) Epoch 35, batch 1000, train_loss[loss=2.936, NarTop10Accuracy=0.7477, over 6306.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.718, over 5887.15 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 22:15:48,495 INFO [trainer.py:765] (4/8) Epoch 35, batch 1100, train_loss[loss=2.988, NarTop10Accuracy=0.7308, over 6894.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7169, over 5923.25 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 22:16:22,484 INFO [trainer.py:765] (4/8) Epoch 35, batch 1200, train_loss[loss=2.962, NarTop10Accuracy=0.7367, over 7143.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7193, over 5917.17 frames. ], batch size: 31, lr: 2.43e-03 +2024-08-06 22:16:57,061 INFO [trainer.py:765] (4/8) Epoch 35, batch 1300, train_loss[loss=2.952, NarTop10Accuracy=0.7299, over 4383.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7206, over 5987.19 frames. ], batch size: 5, lr: 2.43e-03 +2024-08-06 22:17:31,061 INFO [trainer.py:765] (4/8) Epoch 35, batch 1400, train_loss[loss=3.094, NarTop10Accuracy=0.7067, over 6036.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7192, over 6012.92 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 22:18:03,062 INFO [trainer.py:765] (4/8) Epoch 35, batch 1500, train_loss[loss=3.011, NarTop10Accuracy=0.7325, over 6375.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7175, over 5956.35 frames. ], batch size: 50, lr: 2.43e-03 +2024-08-06 22:18:30,728 INFO [trainer.py:765] (4/8) Epoch 35, batch 1600, train_loss[loss=2.833, NarTop10Accuracy=0.7589, over 7059.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7147, over 5932.81 frames. ], batch size: 22, lr: 2.43e-03 +2024-08-06 22:18:57,320 INFO [trainer.py:765] (4/8) Epoch 35, batch 1700, train_loss[loss=2.846, NarTop10Accuracy=0.7632, over 6177.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7137, over 5924.68 frames. ], batch size: 13, lr: 2.42e-03 +2024-08-06 22:19:23,703 INFO [trainer.py:765] (4/8) Epoch 35, batch 1800, train_loss[loss=3.454, NarTop10Accuracy=0.6254, over 7137.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7158, over 5965.68 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 22:19:50,202 INFO [trainer.py:765] (4/8) Epoch 35, batch 1900, train_loss[loss=3.262, NarTop10Accuracy=0.6741, over 5592.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7145, over 5994.65 frames. ], batch size: 53, lr: 2.42e-03 +2024-08-06 22:20:15,763 INFO [trainer.py:765] (4/8) Epoch 35, batch 2000, train_loss[loss=2.987, NarTop10Accuracy=0.7268, over 6261.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7167, over 5980.51 frames. ], batch size: 50, lr: 2.42e-03 +2024-08-06 22:20:41,045 INFO [trainer.py:765] (4/8) Epoch 35, batch 2100, train_loss[loss=2.708, NarTop10Accuracy=0.7814, over 4002.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7156, over 5944.87 frames. ], batch size: 4, lr: 2.42e-03 +2024-08-06 22:21:06,226 INFO [trainer.py:765] (4/8) Epoch 35, batch 2200, train_loss[loss=2.994, NarTop10Accuracy=0.7322, over 7293.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.714, over 6002.49 frames. ], batch size: 31, lr: 2.42e-03 +2024-08-06 22:21:31,286 INFO [trainer.py:765] (4/8) Epoch 35, batch 2300, train_loss[loss=2.989, NarTop10Accuracy=0.73, over 5736.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7143, over 6011.68 frames. ], batch size: 9, lr: 2.42e-03 +2024-08-06 22:21:55,648 INFO [trainer.py:765] (4/8) Epoch 35, batch 2400, train_loss[loss=3.248, NarTop10Accuracy=0.676, over 5208.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7155, over 5761.82 frames. ], batch size: 7, lr: 2.42e-03 +2024-08-06 22:21:59,681 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 22:22:07,656 INFO [trainer.py:811] (4/8) Epoch 35, validation: loss=2.905, NarTop10Accuracy=0.7437, over 1905321.00 frames. +2024-08-06 22:22:07,657 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 22:22:08,116 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.895e+02 2.316e+02 2.462e+02 2.653e+02 5.566e+02, threshold=4.923e+02, percent-clipped=0.1 +2024-08-06 22:22:27,127 INFO [trainer.py:765] (4/8) Epoch 35, batch 2500, train_loss[loss=3.238, NarTop10Accuracy=0.6808, over 5049.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7197, over 5468.57 frames. ], batch size: 7, lr: 2.41e-03 +2024-08-06 22:22:47,167 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 22:23:47,172 INFO [trainer.py:765] (4/8) Epoch 36, batch 100, train_loss[loss=3.256, NarTop10Accuracy=0.6793, over 7224.00 frames. ], tot_loss[loss=2.99, NarTop10Accuracy=0.7275, over 2363.42 frames. ], batch size: 31, lr: 2.38e-03 +2024-08-06 22:24:22,494 INFO [trainer.py:765] (4/8) Epoch 36, batch 200, train_loss[loss=2.847, NarTop10Accuracy=0.7521, over 6900.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7196, over 3848.51 frames. ], batch size: 17, lr: 2.38e-03 +2024-08-06 22:24:54,721 INFO [trainer.py:765] (4/8) Epoch 36, batch 300, train_loss[loss=3.287, NarTop10Accuracy=0.6656, over 7002.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7177, over 4658.37 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 22:25:29,276 INFO [trainer.py:765] (4/8) Epoch 36, batch 400, train_loss[loss=2.992, NarTop10Accuracy=0.7297, over 5112.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7211, over 5099.85 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 22:26:01,818 INFO [trainer.py:765] (4/8) Epoch 36, batch 500, train_loss[loss=3.293, NarTop10Accuracy=0.6637, over 6066.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7209, over 5382.08 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 22:26:35,025 INFO [trainer.py:765] (4/8) Epoch 36, batch 600, train_loss[loss=3.018, NarTop10Accuracy=0.7316, over 5658.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7212, over 5654.37 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 22:27:10,990 INFO [trainer.py:765] (4/8) Epoch 36, batch 700, train_loss[loss=3.196, NarTop10Accuracy=0.6964, over 5049.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.721, over 5738.49 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 22:27:44,915 INFO [trainer.py:765] (4/8) Epoch 36, batch 800, train_loss[loss=3.217, NarTop10Accuracy=0.6862, over 5043.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7177, over 5777.18 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 22:28:17,812 INFO [trainer.py:765] (4/8) Epoch 36, batch 900, train_loss[loss=2.855, NarTop10Accuracy=0.7642, over 6204.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7198, over 5786.21 frames. ], batch size: 13, lr: 2.37e-03 +2024-08-06 22:28:56,983 INFO [trainer.py:765] (4/8) Epoch 36, batch 1000, train_loss[loss=3.418, NarTop10Accuracy=0.6336, over 6255.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7201, over 5906.80 frames. ], batch size: 13, lr: 2.37e-03 +2024-08-06 22:29:29,364 INFO [trainer.py:765] (4/8) Epoch 36, batch 1100, train_loss[loss=2.949, NarTop10Accuracy=0.7363, over 6585.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.719, over 5936.96 frames. ], batch size: 17, lr: 2.36e-03 +2024-08-06 22:30:05,681 INFO [trainer.py:765] (4/8) Epoch 36, batch 1200, train_loss[loss=3.016, NarTop10Accuracy=0.722, over 7455.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7199, over 5931.55 frames. ], batch size: 31, lr: 2.36e-03 +2024-08-06 22:30:42,576 INFO [trainer.py:765] (4/8) Epoch 36, batch 1300, train_loss[loss=2.925, NarTop10Accuracy=0.7372, over 5067.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7182, over 5999.44 frames. ], batch size: 6, lr: 2.36e-03 +2024-08-06 22:31:15,938 INFO [trainer.py:765] (4/8) Epoch 36, batch 1400, train_loss[loss=3.06, NarTop10Accuracy=0.707, over 5997.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7217, over 6029.95 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 22:31:43,748 INFO [trainer.py:765] (4/8) Epoch 36, batch 1500, train_loss[loss=3.441, NarTop10Accuracy=0.6392, over 6324.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7197, over 5959.34 frames. ], batch size: 51, lr: 2.36e-03 +2024-08-06 22:32:11,460 INFO [trainer.py:765] (4/8) Epoch 36, batch 1600, train_loss[loss=3.302, NarTop10Accuracy=0.6601, over 7011.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.72, over 5919.85 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 22:32:38,109 INFO [trainer.py:765] (4/8) Epoch 36, batch 1700, train_loss[loss=3.277, NarTop10Accuracy=0.6729, over 6228.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7163, over 5897.19 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 22:33:04,554 INFO [trainer.py:765] (4/8) Epoch 36, batch 1800, train_loss[loss=3.263, NarTop10Accuracy=0.6752, over 6879.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7177, over 5969.22 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 22:33:15,170 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 22:33:23,567 INFO [trainer.py:811] (4/8) Epoch 36, validation: loss=2.897, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 22:33:23,568 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 22:33:24,096 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.876e+02 2.309e+02 2.476e+02 2.664e+02 4.811e+02, threshold=4.951e+02, percent-clipped=0.0 +2024-08-06 22:33:39,456 INFO [trainer.py:765] (4/8) Epoch 36, batch 1900, train_loss[loss=3.03, NarTop10Accuracy=0.7155, over 6357.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7174, over 6032.58 frames. ], batch size: 52, lr: 2.35e-03 +2024-08-06 22:34:05,077 INFO [trainer.py:765] (4/8) Epoch 36, batch 2000, train_loss[loss=3.121, NarTop10Accuracy=0.7044, over 6417.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.718, over 6007.57 frames. ], batch size: 50, lr: 2.35e-03 +2024-08-06 22:34:30,514 INFO [trainer.py:765] (4/8) Epoch 36, batch 2100, train_loss[loss=2.574, NarTop10Accuracy=0.8028, over 3978.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7205, over 5974.25 frames. ], batch size: 4, lr: 2.35e-03 +2024-08-06 22:34:55,938 INFO [trainer.py:765] (4/8) Epoch 36, batch 2200, train_loss[loss=3.476, NarTop10Accuracy=0.6317, over 7347.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7164, over 6008.11 frames. ], batch size: 31, lr: 2.35e-03 +2024-08-06 22:35:21,145 INFO [trainer.py:765] (4/8) Epoch 36, batch 2300, train_loss[loss=3.308, NarTop10Accuracy=0.6717, over 5826.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7147, over 6010.95 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 22:35:45,600 INFO [trainer.py:765] (4/8) Epoch 36, batch 2400, train_loss[loss=3.27, NarTop10Accuracy=0.6749, over 5208.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7172, over 5766.04 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:09,182 INFO [trainer.py:765] (4/8) Epoch 36, batch 2500, train_loss[loss=2.748, NarTop10Accuracy=0.7793, over 5154.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7209, over 5463.98 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:28,978 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 22:37:29,725 INFO [trainer.py:765] (4/8) Epoch 37, batch 100, train_loss[loss=2.774, NarTop10Accuracy=0.7764, over 7125.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7163, over 2350.12 frames. ], batch size: 31, lr: 2.31e-03 +2024-08-06 22:38:01,272 INFO [trainer.py:765] (4/8) Epoch 37, batch 200, train_loss[loss=2.85, NarTop10Accuracy=0.7613, over 6780.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7196, over 3853.10 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 22:38:35,956 INFO [trainer.py:765] (4/8) Epoch 37, batch 300, train_loss[loss=3.259, NarTop10Accuracy=0.6754, over 7260.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7207, over 4659.06 frames. ], batch size: 22, lr: 2.31e-03 +2024-08-06 22:39:09,307 INFO [trainer.py:765] (4/8) Epoch 37, batch 400, train_loss[loss=2.609, NarTop10Accuracy=0.8035, over 5199.00 frames. ], tot_loss[loss=3.007, NarTop10Accuracy=0.724, over 5100.49 frames. ], batch size: 7, lr: 2.31e-03 +2024-08-06 22:39:43,861 INFO [trainer.py:765] (4/8) Epoch 37, batch 500, train_loss[loss=3.402, NarTop10Accuracy=0.6435, over 6150.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7234, over 5372.75 frames. ], batch size: 11, lr: 2.31e-03 +2024-08-06 22:40:17,333 INFO [trainer.py:765] (4/8) Epoch 37, batch 600, train_loss[loss=2.672, NarTop10Accuracy=0.7928, over 5586.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7224, over 5637.45 frames. ], batch size: 9, lr: 2.31e-03 +2024-08-06 22:40:51,615 INFO [trainer.py:765] (4/8) Epoch 37, batch 700, train_loss[loss=3.139, NarTop10Accuracy=0.6935, over 4335.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7176, over 5708.04 frames. ], batch size: 5, lr: 2.30e-03 +2024-08-06 22:41:30,564 INFO [trainer.py:765] (4/8) Epoch 37, batch 800, train_loss[loss=2.737, NarTop10Accuracy=0.7784, over 5271.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7174, over 5786.69 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:41:59,083 INFO [trainer.py:765] (4/8) Epoch 37, batch 900, train_loss[loss=2.79, NarTop10Accuracy=0.7692, over 6327.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7211, over 5807.61 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 22:42:38,267 INFO [trainer.py:765] (4/8) Epoch 37, batch 1000, train_loss[loss=3.071, NarTop10Accuracy=0.7094, over 6096.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7177, over 5911.43 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 22:43:15,907 INFO [trainer.py:765] (4/8) Epoch 37, batch 1100, train_loss[loss=2.937, NarTop10Accuracy=0.7423, over 6879.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7177, over 5946.54 frames. ], batch size: 17, lr: 2.30e-03 +2024-08-06 22:43:47,740 INFO [trainer.py:765] (4/8) Epoch 37, batch 1200, train_loss[loss=2.812, NarTop10Accuracy=0.7608, over 7407.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.717, over 5918.52 frames. ], batch size: 32, lr: 2.30e-03 +2024-08-06 22:44:11,754 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 22:44:20,075 INFO [trainer.py:811] (4/8) Epoch 37, validation: loss=2.92, NarTop10Accuracy=0.7407, over 1905321.00 frames. +2024-08-06 22:44:20,076 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 22:44:20,606 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.887e+02 2.309e+02 2.481e+02 2.647e+02 8.766e+02, threshold=4.961e+02, percent-clipped=0.1 +2024-08-06 22:44:32,784 INFO [trainer.py:765] (4/8) Epoch 37, batch 1300, train_loss[loss=2.777, NarTop10Accuracy=0.7739, over 5061.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7204, over 5985.15 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:45:10,388 INFO [trainer.py:765] (4/8) Epoch 37, batch 1400, train_loss[loss=2.722, NarTop10Accuracy=0.7788, over 6084.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7207, over 6018.55 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 22:45:40,513 INFO [trainer.py:765] (4/8) Epoch 37, batch 1500, train_loss[loss=3.005, NarTop10Accuracy=0.722, over 5826.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7187, over 5946.77 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:46:08,438 INFO [trainer.py:765] (4/8) Epoch 37, batch 1600, train_loss[loss=3.347, NarTop10Accuracy=0.6523, over 7125.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7173, over 5936.38 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 22:46:35,187 INFO [trainer.py:765] (4/8) Epoch 37, batch 1700, train_loss[loss=3.292, NarTop10Accuracy=0.6585, over 6486.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7181, over 5932.24 frames. ], batch size: 14, lr: 2.29e-03 +2024-08-06 22:47:01,793 INFO [trainer.py:765] (4/8) Epoch 37, batch 1800, train_loss[loss=2.829, NarTop10Accuracy=0.7642, over 7197.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7189, over 5993.70 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 22:47:28,312 INFO [trainer.py:765] (4/8) Epoch 37, batch 1900, train_loss[loss=3.092, NarTop10Accuracy=0.7075, over 5967.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7183, over 6038.93 frames. ], batch size: 51, lr: 2.29e-03 +2024-08-06 22:47:53,925 INFO [trainer.py:765] (4/8) Epoch 37, batch 2000, train_loss[loss=3.2, NarTop10Accuracy=0.6834, over 6075.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.72, over 6010.77 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:48:19,326 INFO [trainer.py:765] (4/8) Epoch 37, batch 2100, train_loss[loss=3.011, NarTop10Accuracy=0.7258, over 4746.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7187, over 5969.97 frames. ], batch size: 5, lr: 2.29e-03 +2024-08-06 22:48:44,707 INFO [trainer.py:765] (4/8) Epoch 37, batch 2200, train_loss[loss=2.993, NarTop10Accuracy=0.7209, over 7194.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7156, over 6003.60 frames. ], batch size: 31, lr: 2.29e-03 +2024-08-06 22:49:09,913 INFO [trainer.py:765] (4/8) Epoch 37, batch 2300, train_loss[loss=2.799, NarTop10Accuracy=0.7735, over 5745.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7157, over 6008.40 frames. ], batch size: 9, lr: 2.29e-03 +2024-08-06 22:49:34,319 INFO [trainer.py:765] (4/8) Epoch 37, batch 2400, train_loss[loss=3.236, NarTop10Accuracy=0.6728, over 5298.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7198, over 5758.04 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:49:57,861 INFO [trainer.py:765] (4/8) Epoch 37, batch 2500, train_loss[loss=3.182, NarTop10Accuracy=0.6964, over 5145.00 frames. ], tot_loss[loss=2.997, NarTop10Accuracy=0.7258, over 5471.14 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:50:18,227 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 22:51:16,151 INFO [trainer.py:765] (4/8) Epoch 38, batch 100, train_loss[loss=3.046, NarTop10Accuracy=0.7191, over 7260.00 frames. ], tot_loss[loss=3.007, NarTop10Accuracy=0.7237, over 2366.02 frames. ], batch size: 31, lr: 2.25e-03 +2024-08-06 22:51:53,013 INFO [trainer.py:765] (4/8) Epoch 38, batch 200, train_loss[loss=3.255, NarTop10Accuracy=0.6808, over 6918.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7217, over 3844.48 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 22:52:25,202 INFO [trainer.py:765] (4/8) Epoch 38, batch 300, train_loss[loss=2.92, NarTop10Accuracy=0.7453, over 7272.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7182, over 4655.82 frames. ], batch size: 23, lr: 2.25e-03 +2024-08-06 22:52:55,626 INFO [trainer.py:765] (4/8) Epoch 38, batch 400, train_loss[loss=3.037, NarTop10Accuracy=0.7152, over 5118.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7224, over 5116.61 frames. ], batch size: 7, lr: 2.25e-03 +2024-08-06 22:53:32,228 INFO [trainer.py:765] (4/8) Epoch 38, batch 500, train_loss[loss=2.857, NarTop10Accuracy=0.7574, over 6066.00 frames. ], tot_loss[loss=2.987, NarTop10Accuracy=0.7278, over 5387.24 frames. ], batch size: 11, lr: 2.25e-03 +2024-08-06 22:54:05,497 INFO [trainer.py:765] (4/8) Epoch 38, batch 600, train_loss[loss=3.034, NarTop10Accuracy=0.7125, over 5754.00 frames. ], tot_loss[loss=3, NarTop10Accuracy=0.7253, over 5656.67 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 22:54:36,002 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 22:54:43,918 INFO [trainer.py:811] (4/8) Epoch 38, validation: loss=2.939, NarTop10Accuracy=0.7369, over 1905321.00 frames. +2024-08-06 22:54:43,919 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 22:54:44,427 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.313e+02 2.478e+02 2.663e+02 7.254e+02, threshold=4.957e+02, percent-clipped=0.3 +2024-08-06 22:54:46,659 INFO [trainer.py:765] (4/8) Epoch 38, batch 700, train_loss[loss=2.668, NarTop10Accuracy=0.7802, over 5130.00 frames. ], tot_loss[loss=3.007, NarTop10Accuracy=0.7239, over 5728.30 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:24,938 INFO [trainer.py:765] (4/8) Epoch 38, batch 800, train_loss[loss=2.785, NarTop10Accuracy=0.7618, over 5049.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.72, over 5797.30 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:59,704 INFO [trainer.py:765] (4/8) Epoch 38, batch 900, train_loss[loss=2.872, NarTop10Accuracy=0.7542, over 6129.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7212, over 5818.39 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 22:56:32,090 INFO [trainer.py:765] (4/8) Epoch 38, batch 1000, train_loss[loss=3.355, NarTop10Accuracy=0.6465, over 6771.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7211, over 5916.58 frames. ], batch size: 14, lr: 2.24e-03 +2024-08-06 22:57:08,991 INFO [trainer.py:765] (4/8) Epoch 38, batch 1100, train_loss[loss=3.092, NarTop10Accuracy=0.7051, over 6693.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7189, over 5958.97 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 22:57:42,662 INFO [trainer.py:765] (4/8) Epoch 38, batch 1200, train_loss[loss=2.827, NarTop10Accuracy=0.7617, over 7359.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7196, over 5960.56 frames. ], batch size: 31, lr: 2.24e-03 +2024-08-06 22:58:16,546 INFO [trainer.py:765] (4/8) Epoch 38, batch 1300, train_loss[loss=3.165, NarTop10Accuracy=0.6859, over 4941.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7191, over 6005.81 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:58:49,810 INFO [trainer.py:765] (4/8) Epoch 38, batch 1400, train_loss[loss=2.849, NarTop10Accuracy=0.754, over 6042.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7149, over 6032.75 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 22:59:22,854 INFO [trainer.py:765] (4/8) Epoch 38, batch 1500, train_loss[loss=3.474, NarTop10Accuracy=0.6243, over 6039.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7188, over 5959.78 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 22:59:50,644 INFO [trainer.py:765] (4/8) Epoch 38, batch 1600, train_loss[loss=3.417, NarTop10Accuracy=0.6434, over 7365.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7177, over 5934.91 frames. ], batch size: 23, lr: 2.23e-03 +2024-08-06 23:00:17,315 INFO [trainer.py:765] (4/8) Epoch 38, batch 1700, train_loss[loss=2.993, NarTop10Accuracy=0.7326, over 6726.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7149, over 5933.25 frames. ], batch size: 14, lr: 2.23e-03 +2024-08-06 23:00:43,764 INFO [trainer.py:765] (4/8) Epoch 38, batch 1800, train_loss[loss=3.243, NarTop10Accuracy=0.6749, over 6867.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7153, over 6000.59 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 23:01:10,192 INFO [trainer.py:765] (4/8) Epoch 38, batch 1900, train_loss[loss=3.477, NarTop10Accuracy=0.6315, over 6237.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7143, over 6031.98 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 23:01:35,681 INFO [trainer.py:765] (4/8) Epoch 38, batch 2000, train_loss[loss=3.302, NarTop10Accuracy=0.6634, over 6219.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7145, over 6001.05 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 23:02:01,050 INFO [trainer.py:765] (4/8) Epoch 38, batch 2100, train_loss[loss=2.835, NarTop10Accuracy=0.7541, over 3999.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7171, over 5987.19 frames. ], batch size: 4, lr: 2.23e-03 +2024-08-06 23:02:26,314 INFO [trainer.py:765] (4/8) Epoch 38, batch 2200, train_loss[loss=2.838, NarTop10Accuracy=0.7512, over 7464.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7173, over 6041.19 frames. ], batch size: 31, lr: 2.23e-03 +2024-08-06 23:02:51,419 INFO [trainer.py:765] (4/8) Epoch 38, batch 2300, train_loss[loss=2.624, NarTop10Accuracy=0.7938, over 5736.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7178, over 6038.02 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 23:03:16,348 INFO [trainer.py:765] (4/8) Epoch 38, batch 2400, train_loss[loss=2.684, NarTop10Accuracy=0.7881, over 5298.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.719, over 5779.75 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:39,824 INFO [trainer.py:765] (4/8) Epoch 38, batch 2500, train_loss[loss=3.045, NarTop10Accuracy=0.7117, over 5280.00 frames. ], tot_loss[loss=3.004, NarTop10Accuracy=0.724, over 5484.26 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:59,757 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 23:04:58,941 INFO [trainer.py:765] (4/8) Epoch 39, batch 100, train_loss[loss=3.25, NarTop10Accuracy=0.6796, over 7557.00 frames. ], tot_loss[loss=2.982, NarTop10Accuracy=0.7295, over 2381.74 frames. ], batch size: 32, lr: 2.19e-03 +2024-08-06 23:05:03,469 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 23:05:11,563 INFO [trainer.py:811] (4/8) Epoch 39, validation: loss=2.9, NarTop10Accuracy=0.7445, over 1905321.00 frames. +2024-08-06 23:05:11,564 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 23:05:12,137 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 2.316e+02 2.500e+02 2.688e+02 4.683e+02, threshold=5.001e+02, percent-clipped=0.0 +2024-08-06 23:05:40,163 INFO [trainer.py:765] (4/8) Epoch 39, batch 200, train_loss[loss=2.776, NarTop10Accuracy=0.773, over 6729.00 frames. ], tot_loss[loss=2.991, NarTop10Accuracy=0.7274, over 3856.35 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 23:06:17,293 INFO [trainer.py:765] (4/8) Epoch 39, batch 300, train_loss[loss=3.1, NarTop10Accuracy=0.7136, over 6801.00 frames. ], tot_loss[loss=2.988, NarTop10Accuracy=0.7277, over 4658.06 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 23:06:48,275 INFO [trainer.py:765] (4/8) Epoch 39, batch 400, train_loss[loss=2.736, NarTop10Accuracy=0.7748, over 5199.00 frames. ], tot_loss[loss=2.987, NarTop10Accuracy=0.7278, over 5109.90 frames. ], batch size: 7, lr: 2.19e-03 +2024-08-06 23:07:19,174 INFO [trainer.py:765] (4/8) Epoch 39, batch 500, train_loss[loss=3.329, NarTop10Accuracy=0.6564, over 6060.00 frames. ], tot_loss[loss=2.994, NarTop10Accuracy=0.7261, over 5408.21 frames. ], batch size: 11, lr: 2.19e-03 +2024-08-06 23:07:52,563 INFO [trainer.py:765] (4/8) Epoch 39, batch 600, train_loss[loss=2.702, NarTop10Accuracy=0.7824, over 5757.00 frames. ], tot_loss[loss=3.011, NarTop10Accuracy=0.7229, over 5667.27 frames. ], batch size: 9, lr: 2.19e-03 +2024-08-06 23:08:33,694 INFO [trainer.py:765] (4/8) Epoch 39, batch 700, train_loss[loss=3.187, NarTop10Accuracy=0.6878, over 5085.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7206, over 5735.75 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:09:05,861 INFO [trainer.py:765] (4/8) Epoch 39, batch 800, train_loss[loss=2.653, NarTop10Accuracy=0.7938, over 4350.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.72, over 5777.27 frames. ], batch size: 5, lr: 2.18e-03 +2024-08-06 23:09:38,865 INFO [trainer.py:765] (4/8) Epoch 39, batch 900, train_loss[loss=3.358, NarTop10Accuracy=0.6549, over 6180.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7204, over 5794.32 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 23:10:18,460 INFO [trainer.py:765] (4/8) Epoch 39, batch 1000, train_loss[loss=2.909, NarTop10Accuracy=0.7347, over 6672.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7221, over 5911.58 frames. ], batch size: 14, lr: 2.18e-03 +2024-08-06 23:10:53,934 INFO [trainer.py:765] (4/8) Epoch 39, batch 1100, train_loss[loss=2.748, NarTop10Accuracy=0.7813, over 6825.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7189, over 5949.91 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 23:11:27,822 INFO [trainer.py:765] (4/8) Epoch 39, batch 1200, train_loss[loss=2.883, NarTop10Accuracy=0.7485, over 7017.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7203, over 5956.34 frames. ], batch size: 31, lr: 2.18e-03 +2024-08-06 23:12:07,252 INFO [trainer.py:765] (4/8) Epoch 39, batch 1300, train_loss[loss=2.903, NarTop10Accuracy=0.7553, over 5229.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7219, over 6024.80 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:12:39,301 INFO [trainer.py:765] (4/8) Epoch 39, batch 1400, train_loss[loss=3.088, NarTop10Accuracy=0.7108, over 6105.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7215, over 6033.38 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 23:13:09,756 INFO [trainer.py:765] (4/8) Epoch 39, batch 1500, train_loss[loss=3.626, NarTop10Accuracy=0.5982, over 6585.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7211, over 5961.23 frames. ], batch size: 53, lr: 2.18e-03 +2024-08-06 23:13:37,586 INFO [trainer.py:765] (4/8) Epoch 39, batch 1600, train_loss[loss=2.884, NarTop10Accuracy=0.7564, over 7107.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7234, over 5944.35 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:04,219 INFO [trainer.py:765] (4/8) Epoch 39, batch 1700, train_loss[loss=3.396, NarTop10Accuracy=0.6466, over 6672.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7191, over 5911.76 frames. ], batch size: 14, lr: 2.17e-03 +2024-08-06 23:14:30,767 INFO [trainer.py:765] (4/8) Epoch 39, batch 1800, train_loss[loss=2.769, NarTop10Accuracy=0.7785, over 7212.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7175, over 5969.11 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:57,179 INFO [trainer.py:765] (4/8) Epoch 39, batch 1900, train_loss[loss=2.98, NarTop10Accuracy=0.7312, over 5820.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7156, over 6026.19 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 23:15:22,750 INFO [trainer.py:765] (4/8) Epoch 39, batch 2000, train_loss[loss=3.241, NarTop10Accuracy=0.6778, over 6213.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7188, over 6010.64 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 23:15:48,060 INFO [trainer.py:765] (4/8) Epoch 39, batch 2100, train_loss[loss=3.329, NarTop10Accuracy=0.6588, over 3831.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7201, over 5994.50 frames. ], batch size: 4, lr: 2.17e-03 +2024-08-06 23:15:51,871 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 23:16:02,156 INFO [trainer.py:811] (4/8) Epoch 39, validation: loss=2.85, NarTop10Accuracy=0.7552, over 1905321.00 frames. +2024-08-06 23:16:02,156 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 23:16:02,645 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.369e+02 2.530e+02 2.720e+02 6.127e+02, threshold=5.059e+02, percent-clipped=0.2 +2024-08-06 23:16:23,652 INFO [trainer.py:765] (4/8) Epoch 39, batch 2200, train_loss[loss=3.174, NarTop10Accuracy=0.6927, over 7446.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.719, over 6034.82 frames. ], batch size: 31, lr: 2.17e-03 +2024-08-06 23:16:48,847 INFO [trainer.py:765] (4/8) Epoch 39, batch 2300, train_loss[loss=2.651, NarTop10Accuracy=0.7979, over 5820.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7169, over 6026.39 frames. ], batch size: 9, lr: 2.17e-03 +2024-08-06 23:17:13,136 INFO [trainer.py:765] (4/8) Epoch 39, batch 2400, train_loss[loss=2.687, NarTop10Accuracy=0.7792, over 4977.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7226, over 5774.96 frames. ], batch size: 7, lr: 2.17e-03 +2024-08-06 23:17:36,712 INFO [trainer.py:765] (4/8) Epoch 39, batch 2500, train_loss[loss=2.917, NarTop10Accuracy=0.7547, over 5175.00 frames. ], tot_loss[loss=2.99, NarTop10Accuracy=0.7271, over 5474.43 frames. ], batch size: 7, lr: 2.16e-03 +2024-08-06 23:17:56,403 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 23:18:48,946 INFO [trainer.py:765] (4/8) Epoch 40, batch 100, train_loss[loss=2.963, NarTop10Accuracy=0.729, over 6978.00 frames. ], tot_loss[loss=3.003, NarTop10Accuracy=0.7239, over 2358.12 frames. ], batch size: 31, lr: 2.14e-03 +2024-08-06 23:19:23,035 INFO [trainer.py:765] (4/8) Epoch 40, batch 200, train_loss[loss=2.831, NarTop10Accuracy=0.7614, over 6837.00 frames. ], tot_loss[loss=2.994, NarTop10Accuracy=0.7264, over 3850.55 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 23:19:57,187 INFO [trainer.py:765] (4/8) Epoch 40, batch 300, train_loss[loss=2.838, NarTop10Accuracy=0.7656, over 6990.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.722, over 4663.71 frames. ], batch size: 22, lr: 2.13e-03 +2024-08-06 23:20:30,182 INFO [trainer.py:765] (4/8) Epoch 40, batch 400, train_loss[loss=2.784, NarTop10Accuracy=0.7696, over 5073.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7225, over 5117.30 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 23:21:00,250 INFO [trainer.py:765] (4/8) Epoch 40, batch 500, train_loss[loss=2.649, NarTop10Accuracy=0.7964, over 6165.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7236, over 5396.49 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 23:21:34,881 INFO [trainer.py:765] (4/8) Epoch 40, batch 600, train_loss[loss=2.92, NarTop10Accuracy=0.7518, over 5856.00 frames. ], tot_loss[loss=2.998, NarTop10Accuracy=0.7255, over 5649.00 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 23:22:11,097 INFO [trainer.py:765] (4/8) Epoch 40, batch 700, train_loss[loss=3.109, NarTop10Accuracy=0.7039, over 5190.00 frames. ], tot_loss[loss=3.006, NarTop10Accuracy=0.7243, over 5709.88 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:22:44,753 INFO [trainer.py:765] (4/8) Epoch 40, batch 800, train_loss[loss=2.629, NarTop10Accuracy=0.8094, over 4944.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7218, over 5770.95 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:23:16,635 INFO [trainer.py:765] (4/8) Epoch 40, batch 900, train_loss[loss=3.329, NarTop10Accuracy=0.6489, over 6438.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7215, over 5794.28 frames. ], batch size: 13, lr: 2.13e-03 +2024-08-06 23:23:55,591 INFO [trainer.py:765] (4/8) Epoch 40, batch 1000, train_loss[loss=3.427, NarTop10Accuracy=0.631, over 6711.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7201, over 5901.63 frames. ], batch size: 14, lr: 2.13e-03 +2024-08-06 23:24:30,208 INFO [trainer.py:765] (4/8) Epoch 40, batch 1100, train_loss[loss=2.648, NarTop10Accuracy=0.7906, over 6789.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7206, over 5956.79 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 23:25:03,090 INFO [trainer.py:765] (4/8) Epoch 40, batch 1200, train_loss[loss=2.958, NarTop10Accuracy=0.7335, over 7242.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7221, over 5941.27 frames. ], batch size: 31, lr: 2.12e-03 +2024-08-06 23:25:41,842 INFO [trainer.py:765] (4/8) Epoch 40, batch 1300, train_loss[loss=2.857, NarTop10Accuracy=0.749, over 5223.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7228, over 6019.29 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 23:26:13,384 INFO [trainer.py:765] (4/8) Epoch 40, batch 1400, train_loss[loss=2.756, NarTop10Accuracy=0.7748, over 6048.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7215, over 6017.88 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 23:26:43,377 INFO [trainer.py:765] (4/8) Epoch 40, batch 1500, train_loss[loss=3.335, NarTop10Accuracy=0.6556, over 6141.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.724, over 5953.12 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:26:54,419 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 23:27:02,676 INFO [trainer.py:811] (4/8) Epoch 40, validation: loss=2.86, NarTop10Accuracy=0.7522, over 1905321.00 frames. +2024-08-06 23:27:02,677 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30368MB +2024-08-06 23:27:03,156 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.329e+02 2.511e+02 2.723e+02 1.241e+03, threshold=5.022e+02, percent-clipped=0.2 +2024-08-06 23:27:19,382 INFO [trainer.py:765] (4/8) Epoch 40, batch 1600, train_loss[loss=2.971, NarTop10Accuracy=0.7263, over 7011.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7215, over 5925.46 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:27:46,056 INFO [trainer.py:765] (4/8) Epoch 40, batch 1700, train_loss[loss=3.33, NarTop10Accuracy=0.6632, over 6609.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7224, over 5920.21 frames. ], batch size: 14, lr: 2.12e-03 +2024-08-06 23:28:12,579 INFO [trainer.py:765] (4/8) Epoch 40, batch 1800, train_loss[loss=2.982, NarTop10Accuracy=0.7281, over 7161.00 frames. ], tot_loss[loss=2.991, NarTop10Accuracy=0.7265, over 5980.01 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:28:38,909 INFO [trainer.py:765] (4/8) Epoch 40, batch 1900, train_loss[loss=3.187, NarTop10Accuracy=0.6933, over 6129.00 frames. ], tot_loss[loss=3.001, NarTop10Accuracy=0.7252, over 6039.17 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:04,445 INFO [trainer.py:765] (4/8) Epoch 40, batch 2000, train_loss[loss=3.565, NarTop10Accuracy=0.5999, over 6261.00 frames. ], tot_loss[loss=3.004, NarTop10Accuracy=0.7243, over 6008.91 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:29,750 INFO [trainer.py:765] (4/8) Epoch 40, batch 2100, train_loss[loss=2.694, NarTop10Accuracy=0.7824, over 3882.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7227, over 5969.87 frames. ], batch size: 4, lr: 2.11e-03 +2024-08-06 23:29:54,939 INFO [trainer.py:765] (4/8) Epoch 40, batch 2200, train_loss[loss=3.24, NarTop10Accuracy=0.6771, over 7335.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7199, over 5993.98 frames. ], batch size: 32, lr: 2.11e-03 +2024-08-06 23:30:20,013 INFO [trainer.py:765] (4/8) Epoch 40, batch 2300, train_loss[loss=2.869, NarTop10Accuracy=0.7563, over 5808.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7192, over 5989.51 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 23:30:44,296 INFO [trainer.py:765] (4/8) Epoch 40, batch 2400, train_loss[loss=2.792, NarTop10Accuracy=0.7659, over 5118.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7203, over 5733.71 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:07,738 INFO [trainer.py:765] (4/8) Epoch 40, batch 2500, train_loss[loss=3.047, NarTop10Accuracy=0.7081, over 5169.00 frames. ], tot_loss[loss=2.987, NarTop10Accuracy=0.7276, over 5451.86 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:28,035 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 23:31:28,038 INFO [trainer.py:1069] (4/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-14-23-41-5 b/libritts-r/log/log-train-2024-08-06-14-23-41-5 new file mode 100644 index 0000000000000000000000000000000000000000..06221bde1a2f7231778863ccfe5b55ca8eaca10e --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-14-23-41-5 @@ -0,0 +1,1260 @@ +2024-08-06 14:23:41,788 INFO [trainer.py:870] (5/8) Training started +2024-08-06 14:23:41,789 INFO [trainer.py:889] (5/8) Device: cuda:5 +2024-08-06 14:23:41,790 INFO [trainer.py:890] (5/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 100000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 14:23:41,790 INFO [trainer.py:892] (5/8) About to create model +2024-08-06 14:23:42,582 INFO [trainer.py:899] (5/8) Number of model parameters: 367386628 +2024-08-06 14:23:42,582 INFO [checkpoint.py:112] (5/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 14:23:47,598 INFO [trainer.py:914] (5/8) Using DDP +2024-08-06 14:23:49,643 INFO [datamodule.py:427] (5/8) About to get train cuts +2024-08-06 14:23:49,645 INFO [datamodule.py:434] (5/8) About to get dev cuts +2024-08-06 14:23:49,646 INFO [datamodule.py:292] (5/8) Disable SpecAugment +2024-08-06 14:23:49,646 INFO [datamodule.py:294] (5/8) About to create train dataset +2024-08-06 14:23:49,646 INFO [datamodule.py:323] (5/8) Using DynamicBucketingSampler +2024-08-06 14:23:50,265 INFO [datamodule.py:344] (5/8) About to create train dataloader +2024-08-06 14:23:50,266 INFO [datamodule.py:367] (5/8) About to create dev dataset +2024-08-06 14:23:50,597 INFO [datamodule.py:388] (5/8) About to create dev dataloader +2024-08-06 14:24:38,248 INFO [trainer.py:765] (5/8) Epoch 1, batch 100, train_loss[loss=107.4, NarTop10Accuracy=0.02412, over 7362.00 frames. ], tot_loss[loss=74.42, NarTop10Accuracy=0.0463, over 2355.37 frames. ], batch size: 31, lr: 2.25e-02 +2024-08-06 14:25:07,518 INFO [trainer.py:765] (5/8) Epoch 1, batch 200, train_loss[loss=133.4, NarTop10Accuracy=0.01583, over 6780.00 frames. ], tot_loss[loss=97.51, NarTop10Accuracy=0.04142, over 3853.30 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 14:25:37,110 INFO [trainer.py:765] (5/8) Epoch 1, batch 300, train_loss[loss=103.5, NarTop10Accuracy=0.02536, over 7077.00 frames. ], tot_loss[loss=85.29, NarTop10Accuracy=0.04331, over 4644.95 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 14:26:07,482 INFO [trainer.py:765] (5/8) Epoch 1, batch 400, train_loss[loss=52.49, NarTop10Accuracy=0.02052, over 5133.00 frames. ], tot_loss[loss=67.83, NarTop10Accuracy=0.04752, over 5102.37 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 14:26:35,356 INFO [trainer.py:765] (5/8) Epoch 1, batch 500, train_loss[loss=14.22, NarTop10Accuracy=0.0275, over 5898.00 frames. ], tot_loss[loss=48.93, NarTop10Accuracy=0.04979, over 5392.11 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 14:27:03,999 INFO [trainer.py:765] (5/8) Epoch 1, batch 600, train_loss[loss=6.084, NarTop10Accuracy=0.193, over 5769.00 frames. ], tot_loss[loss=33.42, NarTop10Accuracy=0.05616, over 5648.88 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 14:27:39,490 INFO [trainer.py:765] (5/8) Epoch 1, batch 700, train_loss[loss=6.762, NarTop10Accuracy=0.1191, over 4995.00 frames. ], tot_loss[loss=23.37, NarTop10Accuracy=0.06458, over 5727.25 frames. ], batch size: 6, lr: 2.99e-02 +2024-08-06 14:28:08,832 INFO [trainer.py:765] (5/8) Epoch 1, batch 800, train_loss[loss=6.323, NarTop10Accuracy=0.1641, over 4941.00 frames. ], tot_loss[loss=17.16, NarTop10Accuracy=0.08561, over 5771.70 frames. ], batch size: 6, lr: 2.98e-02 +2024-08-06 14:28:36,758 INFO [trainer.py:765] (5/8) Epoch 1, batch 900, train_loss[loss=5.811, NarTop10Accuracy=0.1644, over 6306.00 frames. ], tot_loss[loss=12.76, NarTop10Accuracy=0.1141, over 5815.38 frames. ], batch size: 13, lr: 2.98e-02 +2024-08-06 14:29:12,586 INFO [trainer.py:765] (5/8) Epoch 1, batch 1000, train_loss[loss=5.705, NarTop10Accuracy=0.1982, over 6585.00 frames. ], tot_loss[loss=10.08, NarTop10Accuracy=0.1347, over 5924.60 frames. ], batch size: 14, lr: 2.97e-02 +2024-08-06 14:29:42,825 INFO [trainer.py:765] (5/8) Epoch 1, batch 1100, train_loss[loss=5.749, NarTop10Accuracy=0.1885, over 6822.00 frames. ], tot_loss[loss=8.399, NarTop10Accuracy=0.153, over 5971.23 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 14:30:11,468 INFO [trainer.py:765] (5/8) Epoch 1, batch 1200, train_loss[loss=5.848, NarTop10Accuracy=0.179, over 7533.00 frames. ], tot_loss[loss=7.338, NarTop10Accuracy=0.1715, over 5960.66 frames. ], batch size: 32, lr: 2.96e-02 +2024-08-06 14:30:48,747 INFO [trainer.py:765] (5/8) Epoch 1, batch 1300, train_loss[loss=5.243, NarTop10Accuracy=0.3136, over 4263.00 frames. ], tot_loss[loss=6.68, NarTop10Accuracy=0.1858, over 6012.91 frames. ], batch size: 5, lr: 2.95e-02 +2024-08-06 14:31:18,144 INFO [trainer.py:765] (5/8) Epoch 1, batch 1400, train_loss[loss=5.495, NarTop10Accuracy=0.2389, over 6045.00 frames. ], tot_loss[loss=6.248, NarTop10Accuracy=0.1976, over 6016.35 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 14:31:46,026 INFO [trainer.py:765] (5/8) Epoch 1, batch 1500, train_loss[loss=5.723, NarTop10Accuracy=0.1865, over 6102.00 frames. ], tot_loss[loss=5.968, NarTop10Accuracy=0.2094, over 5938.71 frames. ], batch size: 52, lr: 2.94e-02 +2024-08-06 14:32:13,691 INFO [trainer.py:765] (5/8) Epoch 1, batch 1600, train_loss[loss=5.443, NarTop10Accuracy=0.2313, over 7119.00 frames. ], tot_loss[loss=5.786, NarTop10Accuracy=0.2179, over 5921.42 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 14:32:40,197 INFO [trainer.py:765] (5/8) Epoch 1, batch 1700, train_loss[loss=5.442, NarTop10Accuracy=0.2396, over 6690.00 frames. ], tot_loss[loss=5.666, NarTop10Accuracy=0.225, over 5918.96 frames. ], batch size: 14, lr: 2.92e-02 +2024-08-06 14:33:06,498 INFO [trainer.py:765] (5/8) Epoch 1, batch 1800, train_loss[loss=5.43, NarTop10Accuracy=0.2447, over 7032.00 frames. ], tot_loss[loss=5.566, NarTop10Accuracy=0.2345, over 5988.46 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 14:33:32,625 INFO [trainer.py:765] (5/8) Epoch 1, batch 1900, train_loss[loss=5.679, NarTop10Accuracy=0.1946, over 6546.00 frames. ], tot_loss[loss=5.505, NarTop10Accuracy=0.2415, over 6032.02 frames. ], batch size: 52, lr: 2.90e-02 +2024-08-06 14:33:58,014 INFO [trainer.py:765] (5/8) Epoch 1, batch 2000, train_loss[loss=5.454, NarTop10Accuracy=0.2432, over 6078.00 frames. ], tot_loss[loss=5.445, NarTop10Accuracy=0.25, over 6003.75 frames. ], batch size: 50, lr: 2.89e-02 +2024-08-06 14:33:58,016 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 14:34:06,103 INFO [trainer.py:811] (5/8) Epoch 1, validation: loss=5.397, NarTop10Accuracy=0.2581, over 1905321.00 frames. +2024-08-06 14:34:06,104 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 14:34:06,612 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 4.749e+01 2.278e+02 7.300e+02 1.664e+04 7.177e+05, threshold=1.460e+03, percent-clipped=0.0 +2024-08-06 14:34:32,063 INFO [trainer.py:765] (5/8) Epoch 1, batch 2100, train_loss[loss=5.151, NarTop10Accuracy=0.3005, over 3945.00 frames. ], tot_loss[loss=5.381, NarTop10Accuracy=0.2608, over 5974.03 frames. ], batch size: 4, lr: 2.88e-02 +2024-08-06 14:34:57,303 INFO [trainer.py:765] (5/8) Epoch 1, batch 2200, train_loss[loss=5.376, NarTop10Accuracy=0.2529, over 7161.00 frames. ], tot_loss[loss=5.349, NarTop10Accuracy=0.265, over 6006.07 frames. ], batch size: 31, lr: 2.87e-02 +2024-08-06 14:35:22,455 INFO [trainer.py:765] (5/8) Epoch 1, batch 2300, train_loss[loss=5.072, NarTop10Accuracy=0.31, over 5751.00 frames. ], tot_loss[loss=5.332, NarTop10Accuracy=0.2675, over 6001.36 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 14:35:46,815 INFO [trainer.py:765] (5/8) Epoch 1, batch 2400, train_loss[loss=5.309, NarTop10Accuracy=0.2749, over 5136.00 frames. ], tot_loss[loss=5.28, NarTop10Accuracy=0.2767, over 5754.47 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 14:36:10,408 INFO [trainer.py:765] (5/8) Epoch 1, batch 2500, train_loss[loss=4.91, NarTop10Accuracy=0.3376, over 5145.00 frames. ], tot_loss[loss=5.221, NarTop10Accuracy=0.2872, over 5451.86 frames. ], batch size: 7, lr: 2.84e-02 +2024-08-06 14:36:31,220 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 14:37:29,669 INFO [trainer.py:765] (5/8) Epoch 2, batch 100, train_loss[loss=4.942, NarTop10Accuracy=0.352, over 7035.00 frames. ], tot_loss[loss=5.177, NarTop10Accuracy=0.297, over 2361.52 frames. ], batch size: 31, lr: 2.77e-02 +2024-08-06 14:38:10,015 INFO [trainer.py:765] (5/8) Epoch 2, batch 200, train_loss[loss=5.018, NarTop10Accuracy=0.3295, over 7203.00 frames. ], tot_loss[loss=5.148, NarTop10Accuracy=0.3024, over 3852.01 frames. ], batch size: 18, lr: 2.76e-02 +2024-08-06 14:38:38,297 INFO [trainer.py:765] (5/8) Epoch 2, batch 300, train_loss[loss=5.208, NarTop10Accuracy=0.2862, over 7251.00 frames. ], tot_loss[loss=5.131, NarTop10Accuracy=0.3049, over 4640.92 frames. ], batch size: 22, lr: 2.75e-02 +2024-08-06 14:39:06,999 INFO [trainer.py:765] (5/8) Epoch 2, batch 400, train_loss[loss=4.989, NarTop10Accuracy=0.3266, over 5664.00 frames. ], tot_loss[loss=5.108, NarTop10Accuracy=0.3087, over 5084.47 frames. ], batch size: 8, lr: 2.74e-02 +2024-08-06 14:39:46,119 INFO [trainer.py:765] (5/8) Epoch 2, batch 500, train_loss[loss=4.903, NarTop10Accuracy=0.3488, over 6189.00 frames. ], tot_loss[loss=5.07, NarTop10Accuracy=0.3163, over 5371.20 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 14:40:15,083 INFO [trainer.py:765] (5/8) Epoch 2, batch 600, train_loss[loss=4.987, NarTop10Accuracy=0.3423, over 5598.00 frames. ], tot_loss[loss=5.047, NarTop10Accuracy=0.3208, over 5647.76 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 14:40:44,589 INFO [trainer.py:765] (5/8) Epoch 2, batch 700, train_loss[loss=5.113, NarTop10Accuracy=0.3113, over 5073.00 frames. ], tot_loss[loss=5.035, NarTop10Accuracy=0.3221, over 5716.65 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 14:41:24,513 INFO [trainer.py:765] (5/8) Epoch 2, batch 800, train_loss[loss=5.183, NarTop10Accuracy=0.2937, over 4230.00 frames. ], tot_loss[loss=5.012, NarTop10Accuracy=0.3264, over 5779.24 frames. ], batch size: 5, lr: 2.69e-02 +2024-08-06 14:41:54,405 INFO [trainer.py:765] (5/8) Epoch 2, batch 900, train_loss[loss=4.826, NarTop10Accuracy=0.3598, over 6090.00 frames. ], tot_loss[loss=4.977, NarTop10Accuracy=0.3333, over 5817.09 frames. ], batch size: 13, lr: 2.68e-02 +2024-08-06 14:42:23,902 INFO [trainer.py:765] (5/8) Epoch 2, batch 1000, train_loss[loss=4.863, NarTop10Accuracy=0.3586, over 6276.00 frames. ], tot_loss[loss=4.947, NarTop10Accuracy=0.3394, over 5912.25 frames. ], batch size: 13, lr: 2.66e-02 +2024-08-06 14:42:56,254 INFO [trainer.py:765] (5/8) Epoch 2, batch 1100, train_loss[loss=5.001, NarTop10Accuracy=0.3167, over 6828.00 frames. ], tot_loss[loss=4.931, NarTop10Accuracy=0.3425, over 5940.72 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 14:43:35,186 INFO [trainer.py:765] (5/8) Epoch 2, batch 1200, train_loss[loss=4.825, NarTop10Accuracy=0.3607, over 7302.00 frames. ], tot_loss[loss=4.91, NarTop10Accuracy=0.3461, over 5925.62 frames. ], batch size: 31, lr: 2.64e-02 +2024-08-06 14:44:04,346 INFO [trainer.py:765] (5/8) Epoch 2, batch 1300, train_loss[loss=4.815, NarTop10Accuracy=0.3583, over 5034.00 frames. ], tot_loss[loss=4.859, NarTop10Accuracy=0.3555, over 5994.43 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 14:44:33,728 INFO [trainer.py:765] (5/8) Epoch 2, batch 1400, train_loss[loss=4.918, NarTop10Accuracy=0.343, over 6057.00 frames. ], tot_loss[loss=4.842, NarTop10Accuracy=0.3586, over 6022.21 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 14:44:40,441 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 14:44:48,506 INFO [trainer.py:811] (5/8) Epoch 2, validation: loss=4.808, NarTop10Accuracy=0.3642, over 1905321.00 frames. +2024-08-06 14:44:48,506 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 14:44:49,204 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 6.328e+01 1.178e+02 1.410e+02 1.789e+02 6.269e+02, threshold=2.821e+02, percent-clipped=0.0 +2024-08-06 14:45:09,806 INFO [trainer.py:765] (5/8) Epoch 2, batch 1500, train_loss[loss=4.717, NarTop10Accuracy=0.3793, over 5871.00 frames. ], tot_loss[loss=4.83, NarTop10Accuracy=0.361, over 5950.10 frames. ], batch size: 50, lr: 2.60e-02 +2024-08-06 14:45:37,660 INFO [trainer.py:765] (5/8) Epoch 2, batch 1600, train_loss[loss=4.688, NarTop10Accuracy=0.3879, over 6936.00 frames. ], tot_loss[loss=4.803, NarTop10Accuracy=0.3661, over 5933.27 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 14:46:04,368 INFO [trainer.py:765] (5/8) Epoch 2, batch 1700, train_loss[loss=4.729, NarTop10Accuracy=0.3761, over 6096.00 frames. ], tot_loss[loss=4.795, NarTop10Accuracy=0.3677, over 5914.20 frames. ], batch size: 13, lr: 2.58e-02 +2024-08-06 14:46:31,034 INFO [trainer.py:765] (5/8) Epoch 2, batch 1800, train_loss[loss=4.65, NarTop10Accuracy=0.3934, over 7053.00 frames. ], tot_loss[loss=4.771, NarTop10Accuracy=0.3727, over 5978.84 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 14:46:57,532 INFO [trainer.py:765] (5/8) Epoch 2, batch 1900, train_loss[loss=4.635, NarTop10Accuracy=0.4024, over 6549.00 frames. ], tot_loss[loss=4.746, NarTop10Accuracy=0.3769, over 6028.81 frames. ], batch size: 51, lr: 2.55e-02 +2024-08-06 14:47:23,234 INFO [trainer.py:765] (5/8) Epoch 2, batch 2000, train_loss[loss=4.754, NarTop10Accuracy=0.3748, over 5919.00 frames. ], tot_loss[loss=4.726, NarTop10Accuracy=0.3804, over 6019.84 frames. ], batch size: 50, lr: 2.54e-02 +2024-08-06 14:47:48,589 INFO [trainer.py:765] (5/8) Epoch 2, batch 2100, train_loss[loss=4.507, NarTop10Accuracy=0.4222, over 3915.00 frames. ], tot_loss[loss=4.715, NarTop10Accuracy=0.3824, over 5978.83 frames. ], batch size: 4, lr: 2.53e-02 +2024-08-06 14:48:13,765 INFO [trainer.py:765] (5/8) Epoch 2, batch 2200, train_loss[loss=4.623, NarTop10Accuracy=0.3993, over 7293.00 frames. ], tot_loss[loss=4.677, NarTop10Accuracy=0.3897, over 6014.68 frames. ], batch size: 31, lr: 2.51e-02 +2024-08-06 14:48:38,952 INFO [trainer.py:765] (5/8) Epoch 2, batch 2300, train_loss[loss=4.684, NarTop10Accuracy=0.3801, over 5745.00 frames. ], tot_loss[loss=4.678, NarTop10Accuracy=0.3896, over 6024.93 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 14:49:03,319 INFO [trainer.py:765] (5/8) Epoch 2, batch 2400, train_loss[loss=4.346, NarTop10Accuracy=0.4574, over 5106.00 frames. ], tot_loss[loss=4.635, NarTop10Accuracy=0.3979, over 5772.97 frames. ], batch size: 7, lr: 2.49e-02 +2024-08-06 14:49:26,868 INFO [trainer.py:765] (5/8) Epoch 2, batch 2500, train_loss[loss=4.768, NarTop10Accuracy=0.3604, over 5052.00 frames. ], tot_loss[loss=4.614, NarTop10Accuracy=0.4021, over 5469.94 frames. ], batch size: 7, lr: 2.48e-02 +2024-08-06 14:49:46,850 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 14:50:51,115 INFO [trainer.py:765] (5/8) Epoch 3, batch 100, train_loss[loss=4.725, NarTop10Accuracy=0.3843, over 7266.00 frames. ], tot_loss[loss=4.591, NarTop10Accuracy=0.4066, over 2365.23 frames. ], batch size: 31, lr: 2.36e-02 +2024-08-06 14:51:20,386 INFO [trainer.py:765] (5/8) Epoch 3, batch 200, train_loss[loss=4.545, NarTop10Accuracy=0.4176, over 6867.00 frames. ], tot_loss[loss=4.545, NarTop10Accuracy=0.416, over 3849.14 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 14:51:50,953 INFO [trainer.py:765] (5/8) Epoch 3, batch 300, train_loss[loss=4.751, NarTop10Accuracy=0.3667, over 7086.00 frames. ], tot_loss[loss=4.524, NarTop10Accuracy=0.4197, over 4653.17 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 14:52:32,357 INFO [trainer.py:765] (5/8) Epoch 3, batch 400, train_loss[loss=4.484, NarTop10Accuracy=0.4256, over 4986.00 frames. ], tot_loss[loss=4.507, NarTop10Accuracy=0.4235, over 5088.91 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 14:53:00,678 INFO [trainer.py:765] (5/8) Epoch 3, batch 500, train_loss[loss=4.402, NarTop10Accuracy=0.4569, over 6144.00 frames. ], tot_loss[loss=4.492, NarTop10Accuracy=0.4261, over 5392.24 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 14:53:29,550 INFO [trainer.py:765] (5/8) Epoch 3, batch 600, train_loss[loss=4.29, NarTop10Accuracy=0.4747, over 5670.00 frames. ], tot_loss[loss=4.479, NarTop10Accuracy=0.4288, over 5658.64 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 14:54:12,464 INFO [trainer.py:765] (5/8) Epoch 3, batch 700, train_loss[loss=3.986, NarTop10Accuracy=0.5259, over 5016.00 frames. ], tot_loss[loss=4.454, NarTop10Accuracy=0.4337, over 5735.88 frames. ], batch size: 6, lr: 2.29e-02 +2024-08-06 14:54:44,784 INFO [trainer.py:765] (5/8) Epoch 3, batch 800, train_loss[loss=4.101, NarTop10Accuracy=0.5069, over 4443.00 frames. ], tot_loss[loss=4.431, NarTop10Accuracy=0.4383, over 5787.12 frames. ], batch size: 5, lr: 2.28e-02 +2024-08-06 14:54:58,683 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 14:55:06,655 INFO [trainer.py:811] (5/8) Epoch 3, validation: loss=4.276, NarTop10Accuracy=0.4689, over 1905321.00 frames. +2024-08-06 14:55:06,656 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 14:55:07,183 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 8.443e+01 1.396e+02 1.639e+02 2.017e+02 7.124e+02, threshold=3.277e+02, percent-clipped=4.5 +2024-08-06 14:55:21,052 INFO [trainer.py:765] (5/8) Epoch 3, batch 900, train_loss[loss=4.073, NarTop10Accuracy=0.5084, over 6633.00 frames. ], tot_loss[loss=4.403, NarTop10Accuracy=0.4436, over 5803.11 frames. ], batch size: 14, lr: 2.26e-02 +2024-08-06 14:56:04,958 INFO [trainer.py:765] (5/8) Epoch 3, batch 1000, train_loss[loss=4.278, NarTop10Accuracy=0.4642, over 6216.00 frames. ], tot_loss[loss=4.382, NarTop10Accuracy=0.4475, over 5908.88 frames. ], batch size: 13, lr: 2.25e-02 +2024-08-06 14:56:37,300 INFO [trainer.py:765] (5/8) Epoch 3, batch 1100, train_loss[loss=4.579, NarTop10Accuracy=0.4015, over 6753.00 frames. ], tot_loss[loss=4.356, NarTop10Accuracy=0.4527, over 5940.87 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 14:57:06,377 INFO [trainer.py:765] (5/8) Epoch 3, batch 1200, train_loss[loss=4.395, NarTop10Accuracy=0.4402, over 7308.00 frames. ], tot_loss[loss=4.341, NarTop10Accuracy=0.4558, over 5930.22 frames. ], batch size: 31, lr: 2.23e-02 +2024-08-06 14:57:51,631 INFO [trainer.py:765] (5/8) Epoch 3, batch 1300, train_loss[loss=4.197, NarTop10Accuracy=0.479, over 4365.00 frames. ], tot_loss[loss=4.312, NarTop10Accuracy=0.4617, over 5984.52 frames. ], batch size: 5, lr: 2.22e-02 +2024-08-06 14:58:22,900 INFO [trainer.py:765] (5/8) Epoch 3, batch 1400, train_loss[loss=4.213, NarTop10Accuracy=0.4778, over 6009.00 frames. ], tot_loss[loss=4.3, NarTop10Accuracy=0.4638, over 6017.12 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 14:58:50,856 INFO [trainer.py:765] (5/8) Epoch 3, batch 1500, train_loss[loss=4.373, NarTop10Accuracy=0.4451, over 6138.00 frames. ], tot_loss[loss=4.282, NarTop10Accuracy=0.4672, over 5956.63 frames. ], batch size: 51, lr: 2.20e-02 +2024-08-06 14:59:18,715 INFO [trainer.py:765] (5/8) Epoch 3, batch 1600, train_loss[loss=4.063, NarTop10Accuracy=0.5136, over 7230.00 frames. ], tot_loss[loss=4.264, NarTop10Accuracy=0.4706, over 5935.02 frames. ], batch size: 22, lr: 2.19e-02 +2024-08-06 14:59:45,953 INFO [trainer.py:765] (5/8) Epoch 3, batch 1700, train_loss[loss=3.983, NarTop10Accuracy=0.525, over 6174.00 frames. ], tot_loss[loss=4.237, NarTop10Accuracy=0.4761, over 5929.12 frames. ], batch size: 13, lr: 2.18e-02 +2024-08-06 15:00:12,498 INFO [trainer.py:765] (5/8) Epoch 3, batch 1800, train_loss[loss=3.92, NarTop10Accuracy=0.5351, over 6918.00 frames. ], tot_loss[loss=4.215, NarTop10Accuracy=0.4805, over 5979.17 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 15:00:38,949 INFO [trainer.py:765] (5/8) Epoch 3, batch 1900, train_loss[loss=4.709, NarTop10Accuracy=0.3867, over 6465.00 frames. ], tot_loss[loss=4.194, NarTop10Accuracy=0.4848, over 6030.99 frames. ], batch size: 50, lr: 2.16e-02 +2024-08-06 15:01:04,606 INFO [trainer.py:765] (5/8) Epoch 3, batch 2000, train_loss[loss=4.452, NarTop10Accuracy=0.4298, over 5880.00 frames. ], tot_loss[loss=4.169, NarTop10Accuracy=0.4897, over 6005.27 frames. ], batch size: 50, lr: 2.15e-02 +2024-08-06 15:01:29,899 INFO [trainer.py:765] (5/8) Epoch 3, batch 2100, train_loss[loss=3.988, NarTop10Accuracy=0.5281, over 3930.00 frames. ], tot_loss[loss=4.145, NarTop10Accuracy=0.4943, over 5981.38 frames. ], batch size: 4, lr: 2.14e-02 +2024-08-06 15:01:55,183 INFO [trainer.py:765] (5/8) Epoch 3, batch 2200, train_loss[loss=3.946, NarTop10Accuracy=0.5436, over 7266.00 frames. ], tot_loss[loss=4.12, NarTop10Accuracy=0.5, over 6008.25 frames. ], batch size: 31, lr: 2.13e-02 +2024-08-06 15:02:20,410 INFO [trainer.py:765] (5/8) Epoch 3, batch 2300, train_loss[loss=4.324, NarTop10Accuracy=0.4565, over 5721.00 frames. ], tot_loss[loss=4.128, NarTop10Accuracy=0.4985, over 6005.06 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 15:02:44,663 INFO [trainer.py:765] (5/8) Epoch 3, batch 2400, train_loss[loss=4.22, NarTop10Accuracy=0.4793, over 5082.00 frames. ], tot_loss[loss=4.096, NarTop10Accuracy=0.5045, over 5761.61 frames. ], batch size: 7, lr: 2.11e-02 +2024-08-06 15:03:08,234 INFO [trainer.py:765] (5/8) Epoch 3, batch 2500, train_loss[loss=3.737, NarTop10Accuracy=0.5817, over 5097.00 frames. ], tot_loss[loss=4.043, NarTop10Accuracy=0.5153, over 5456.22 frames. ], batch size: 7, lr: 2.10e-02 +2024-08-06 15:03:28,319 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 15:04:28,130 INFO [trainer.py:765] (5/8) Epoch 4, batch 100, train_loss[loss=3.91, NarTop10Accuracy=0.5434, over 7158.00 frames. ], tot_loss[loss=4.04, NarTop10Accuracy=0.517, over 2359.48 frames. ], batch size: 31, lr: 1.97e-02 +2024-08-06 15:04:59,842 INFO [trainer.py:765] (5/8) Epoch 4, batch 200, train_loss[loss=3.845, NarTop10Accuracy=0.5585, over 6858.00 frames. ], tot_loss[loss=4.008, NarTop10Accuracy=0.5237, over 3845.46 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 15:05:27,509 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 15:05:35,694 INFO [trainer.py:811] (5/8) Epoch 4, validation: loss=3.804, NarTop10Accuracy=0.5644, over 1905321.00 frames. +2024-08-06 15:05:35,695 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 15:05:36,237 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.765e+02 1.975e+02 2.270e+02 5.852e+02, threshold=3.949e+02, percent-clipped=2.8 +2024-08-06 15:05:43,888 INFO [trainer.py:765] (5/8) Epoch 4, batch 300, train_loss[loss=3.761, NarTop10Accuracy=0.5753, over 7119.00 frames. ], tot_loss[loss=3.988, NarTop10Accuracy=0.5275, over 4657.51 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 15:06:16,123 INFO [trainer.py:765] (5/8) Epoch 4, batch 400, train_loss[loss=3.826, NarTop10Accuracy=0.5654, over 5100.00 frames. ], tot_loss[loss=3.995, NarTop10Accuracy=0.526, over 5088.45 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 15:06:46,472 INFO [trainer.py:765] (5/8) Epoch 4, batch 500, train_loss[loss=3.947, NarTop10Accuracy=0.5312, over 6159.00 frames. ], tot_loss[loss=3.98, NarTop10Accuracy=0.5288, over 5364.91 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 15:07:23,817 INFO [trainer.py:765] (5/8) Epoch 4, batch 600, train_loss[loss=3.629, NarTop10Accuracy=0.6053, over 5760.00 frames. ], tot_loss[loss=3.979, NarTop10Accuracy=0.5291, over 5642.97 frames. ], batch size: 9, lr: 1.93e-02 +2024-08-06 15:07:59,001 INFO [trainer.py:765] (5/8) Epoch 4, batch 700, train_loss[loss=4.27, NarTop10Accuracy=0.4648, over 4281.00 frames. ], tot_loss[loss=3.969, NarTop10Accuracy=0.5309, over 5715.78 frames. ], batch size: 5, lr: 1.92e-02 +2024-08-06 15:08:32,429 INFO [trainer.py:765] (5/8) Epoch 4, batch 800, train_loss[loss=3.615, NarTop10Accuracy=0.6013, over 5157.00 frames. ], tot_loss[loss=3.959, NarTop10Accuracy=0.5325, over 5763.99 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 15:09:10,688 INFO [trainer.py:765] (5/8) Epoch 4, batch 900, train_loss[loss=3.553, NarTop10Accuracy=0.6192, over 6297.00 frames. ], tot_loss[loss=3.918, NarTop10Accuracy=0.541, over 5789.14 frames. ], batch size: 13, lr: 1.90e-02 +2024-08-06 15:09:46,075 INFO [trainer.py:765] (5/8) Epoch 4, batch 1000, train_loss[loss=3.528, NarTop10Accuracy=0.6315, over 6180.00 frames. ], tot_loss[loss=3.908, NarTop10Accuracy=0.5433, over 5883.31 frames. ], batch size: 13, lr: 1.89e-02 +2024-08-06 15:10:18,138 INFO [trainer.py:765] (5/8) Epoch 4, batch 1100, train_loss[loss=3.701, NarTop10Accuracy=0.5849, over 6870.00 frames. ], tot_loss[loss=3.903, NarTop10Accuracy=0.5442, over 5936.93 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 15:10:55,075 INFO [trainer.py:765] (5/8) Epoch 4, batch 1200, train_loss[loss=4.317, NarTop10Accuracy=0.4607, over 7401.00 frames. ], tot_loss[loss=3.901, NarTop10Accuracy=0.5447, over 5944.68 frames. ], batch size: 31, lr: 1.88e-02 +2024-08-06 15:11:32,073 INFO [trainer.py:765] (5/8) Epoch 4, batch 1300, train_loss[loss=3.487, NarTop10Accuracy=0.6274, over 5199.00 frames. ], tot_loss[loss=3.857, NarTop10Accuracy=0.5534, over 5997.07 frames. ], batch size: 6, lr: 1.87e-02 +2024-08-06 15:12:05,687 INFO [trainer.py:765] (5/8) Epoch 4, batch 1400, train_loss[loss=3.757, NarTop10Accuracy=0.5868, over 6150.00 frames. ], tot_loss[loss=3.86, NarTop10Accuracy=0.5529, over 6011.03 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 15:12:33,695 INFO [trainer.py:765] (5/8) Epoch 4, batch 1500, train_loss[loss=3.842, NarTop10Accuracy=0.5593, over 5901.00 frames. ], tot_loss[loss=3.858, NarTop10Accuracy=0.553, over 5943.86 frames. ], batch size: 50, lr: 1.85e-02 +2024-08-06 15:13:01,510 INFO [trainer.py:765] (5/8) Epoch 4, batch 1600, train_loss[loss=3.843, NarTop10Accuracy=0.5658, over 6975.00 frames. ], tot_loss[loss=3.849, NarTop10Accuracy=0.555, over 5931.23 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 15:13:28,132 INFO [trainer.py:765] (5/8) Epoch 4, batch 1700, train_loss[loss=3.841, NarTop10Accuracy=0.5622, over 6690.00 frames. ], tot_loss[loss=3.827, NarTop10Accuracy=0.5594, over 5926.85 frames. ], batch size: 14, lr: 1.84e-02 +2024-08-06 15:13:54,557 INFO [trainer.py:765] (5/8) Epoch 4, batch 1800, train_loss[loss=3.583, NarTop10Accuracy=0.6071, over 7107.00 frames. ], tot_loss[loss=3.826, NarTop10Accuracy=0.5596, over 6005.17 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 15:14:20,997 INFO [trainer.py:765] (5/8) Epoch 4, batch 1900, train_loss[loss=3.738, NarTop10Accuracy=0.5782, over 5901.00 frames. ], tot_loss[loss=3.844, NarTop10Accuracy=0.5559, over 6038.21 frames. ], batch size: 51, lr: 1.82e-02 +2024-08-06 15:14:46,671 INFO [trainer.py:765] (5/8) Epoch 4, batch 2000, train_loss[loss=3.677, NarTop10Accuracy=0.5884, over 6417.00 frames. ], tot_loss[loss=3.813, NarTop10Accuracy=0.5622, over 6011.48 frames. ], batch size: 50, lr: 1.81e-02 +2024-08-06 15:15:11,858 INFO [trainer.py:765] (5/8) Epoch 4, batch 2100, train_loss[loss=3.551, NarTop10Accuracy=0.6175, over 4809.00 frames. ], tot_loss[loss=3.805, NarTop10Accuracy=0.5641, over 5970.65 frames. ], batch size: 5, lr: 1.81e-02 +2024-08-06 15:15:37,089 INFO [trainer.py:765] (5/8) Epoch 4, batch 2200, train_loss[loss=3.744, NarTop10Accuracy=0.5798, over 6954.00 frames. ], tot_loss[loss=3.797, NarTop10Accuracy=0.5657, over 6006.64 frames. ], batch size: 31, lr: 1.80e-02 +2024-08-06 15:15:55,087 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 15:16:03,243 INFO [trainer.py:811] (5/8) Epoch 4, validation: loss=3.665, NarTop10Accuracy=0.5912, over 1905321.00 frames. +2024-08-06 15:16:03,243 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 15:16:03,740 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.414e+02 1.889e+02 2.096e+02 2.369e+02 1.168e+03, threshold=4.192e+02, percent-clipped=1.7 +2024-08-06 15:16:10,347 INFO [trainer.py:765] (5/8) Epoch 4, batch 2300, train_loss[loss=3.493, NarTop10Accuracy=0.6264, over 5778.00 frames. ], tot_loss[loss=3.804, NarTop10Accuracy=0.5641, over 6013.21 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 15:16:34,840 INFO [trainer.py:765] (5/8) Epoch 4, batch 2400, train_loss[loss=3.482, NarTop10Accuracy=0.6389, over 5190.00 frames. ], tot_loss[loss=3.776, NarTop10Accuracy=0.5696, over 5780.27 frames. ], batch size: 7, lr: 1.79e-02 +2024-08-06 15:16:58,534 INFO [trainer.py:765] (5/8) Epoch 4, batch 2500, train_loss[loss=3.615, NarTop10Accuracy=0.6169, over 5193.00 frames. ], tot_loss[loss=3.758, NarTop10Accuracy=0.573, over 5488.67 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 15:17:18,466 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 15:18:24,100 INFO [trainer.py:765] (5/8) Epoch 5, batch 100, train_loss[loss=3.624, NarTop10Accuracy=0.6064, over 7107.00 frames. ], tot_loss[loss=3.769, NarTop10Accuracy=0.5716, over 2349.34 frames. ], batch size: 31, lr: 1.66e-02 +2024-08-06 15:18:59,675 INFO [trainer.py:765] (5/8) Epoch 5, batch 200, train_loss[loss=4.121, NarTop10Accuracy=0.4924, over 6780.00 frames. ], tot_loss[loss=3.757, NarTop10Accuracy=0.5743, over 3864.84 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 15:19:32,887 INFO [trainer.py:765] (5/8) Epoch 5, batch 300, train_loss[loss=3.98, NarTop10Accuracy=0.5206, over 6894.00 frames. ], tot_loss[loss=3.73, NarTop10Accuracy=0.5796, over 4659.97 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 15:20:01,656 INFO [trainer.py:765] (5/8) Epoch 5, batch 400, train_loss[loss=3.453, NarTop10Accuracy=0.6275, over 5121.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.581, over 5119.87 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 15:20:38,298 INFO [trainer.py:765] (5/8) Epoch 5, batch 500, train_loss[loss=3.968, NarTop10Accuracy=0.5287, over 6069.00 frames. ], tot_loss[loss=3.736, NarTop10Accuracy=0.5772, over 5391.24 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 15:21:13,710 INFO [trainer.py:765] (5/8) Epoch 5, batch 600, train_loss[loss=3.785, NarTop10Accuracy=0.5587, over 5709.00 frames. ], tot_loss[loss=3.725, NarTop10Accuracy=0.58, over 5642.51 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 15:21:45,881 INFO [trainer.py:765] (5/8) Epoch 5, batch 700, train_loss[loss=3.537, NarTop10Accuracy=0.6233, over 5133.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.5805, over 5721.36 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 15:22:24,498 INFO [trainer.py:765] (5/8) Epoch 5, batch 800, train_loss[loss=3.986, NarTop10Accuracy=0.5184, over 5025.00 frames. ], tot_loss[loss=3.709, NarTop10Accuracy=0.5827, over 5771.19 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 15:22:56,783 INFO [trainer.py:765] (5/8) Epoch 5, batch 900, train_loss[loss=3.639, NarTop10Accuracy=0.6016, over 6249.00 frames. ], tot_loss[loss=3.701, NarTop10Accuracy=0.5847, over 5811.55 frames. ], batch size: 13, lr: 1.61e-02 +2024-08-06 15:23:31,914 INFO [trainer.py:765] (5/8) Epoch 5, batch 1000, train_loss[loss=3.457, NarTop10Accuracy=0.6513, over 6708.00 frames. ], tot_loss[loss=3.68, NarTop10Accuracy=0.5886, over 5899.38 frames. ], batch size: 14, lr: 1.60e-02 +2024-08-06 15:24:09,571 INFO [trainer.py:765] (5/8) Epoch 5, batch 1100, train_loss[loss=3.462, NarTop10Accuracy=0.6318, over 6771.00 frames. ], tot_loss[loss=3.679, NarTop10Accuracy=0.5892, over 5929.17 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 15:24:44,528 INFO [trainer.py:765] (5/8) Epoch 5, batch 1200, train_loss[loss=3.642, NarTop10Accuracy=0.5967, over 7359.00 frames. ], tot_loss[loss=3.674, NarTop10Accuracy=0.5903, over 5921.24 frames. ], batch size: 31, lr: 1.59e-02 +2024-08-06 15:25:19,379 INFO [trainer.py:765] (5/8) Epoch 5, batch 1300, train_loss[loss=3.784, NarTop10Accuracy=0.5666, over 5178.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5926, over 5977.83 frames. ], batch size: 6, lr: 1.59e-02 +2024-08-06 15:25:51,694 INFO [trainer.py:765] (5/8) Epoch 5, batch 1400, train_loss[loss=3.85, NarTop10Accuracy=0.5496, over 6066.00 frames. ], tot_loss[loss=3.672, NarTop10Accuracy=0.5909, over 6012.52 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 15:26:26,195 INFO [trainer.py:765] (5/8) Epoch 5, batch 1500, train_loss[loss=3.697, NarTop10Accuracy=0.59, over 6012.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.5919, over 5935.97 frames. ], batch size: 50, lr: 1.58e-02 +2024-08-06 15:26:54,130 INFO [trainer.py:765] (5/8) Epoch 5, batch 1600, train_loss[loss=3.5, NarTop10Accuracy=0.6234, over 7125.00 frames. ], tot_loss[loss=3.677, NarTop10Accuracy=0.5896, over 5921.56 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 15:27:19,603 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 15:27:27,821 INFO [trainer.py:811] (5/8) Epoch 5, validation: loss=3.552, NarTop10Accuracy=0.6147, over 1905321.00 frames. +2024-08-06 15:27:27,822 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 15:27:28,341 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.756e+02 1.962e+02 2.205e+02 5.880e+02, threshold=3.924e+02, percent-clipped=0.8 +2024-08-06 15:27:29,131 INFO [trainer.py:765] (5/8) Epoch 5, batch 1700, train_loss[loss=3.811, NarTop10Accuracy=0.5511, over 6090.00 frames. ], tot_loss[loss=3.667, NarTop10Accuracy=0.5917, over 5908.53 frames. ], batch size: 13, lr: 1.56e-02 +2024-08-06 15:27:55,652 INFO [trainer.py:765] (5/8) Epoch 5, batch 1800, train_loss[loss=3.805, NarTop10Accuracy=0.5675, over 6957.00 frames. ], tot_loss[loss=3.661, NarTop10Accuracy=0.593, over 5970.72 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 15:28:22,171 INFO [trainer.py:765] (5/8) Epoch 5, batch 1900, train_loss[loss=3.682, NarTop10Accuracy=0.5963, over 6003.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5921, over 6013.98 frames. ], batch size: 50, lr: 1.55e-02 +2024-08-06 15:28:47,893 INFO [trainer.py:765] (5/8) Epoch 5, batch 2000, train_loss[loss=3.609, NarTop10Accuracy=0.607, over 6348.00 frames. ], tot_loss[loss=3.667, NarTop10Accuracy=0.5913, over 6003.27 frames. ], batch size: 52, lr: 1.55e-02 +2024-08-06 15:29:13,770 INFO [trainer.py:765] (5/8) Epoch 5, batch 2100, train_loss[loss=3.357, NarTop10Accuracy=0.6565, over 4851.00 frames. ], tot_loss[loss=3.678, NarTop10Accuracy=0.5889, over 5990.51 frames. ], batch size: 5, lr: 1.54e-02 +2024-08-06 15:29:39,177 INFO [trainer.py:765] (5/8) Epoch 5, batch 2200, train_loss[loss=4.084, NarTop10Accuracy=0.503, over 7251.00 frames. ], tot_loss[loss=3.662, NarTop10Accuracy=0.5924, over 6028.91 frames. ], batch size: 31, lr: 1.54e-02 +2024-08-06 15:30:04,429 INFO [trainer.py:765] (5/8) Epoch 5, batch 2300, train_loss[loss=3.509, NarTop10Accuracy=0.6296, over 5778.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.591, over 6035.96 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 15:30:28,862 INFO [trainer.py:765] (5/8) Epoch 5, batch 2400, train_loss[loss=3.363, NarTop10Accuracy=0.6576, over 5208.00 frames. ], tot_loss[loss=3.645, NarTop10Accuracy=0.5961, over 5790.42 frames. ], batch size: 7, lr: 1.53e-02 +2024-08-06 15:30:52,503 INFO [trainer.py:765] (5/8) Epoch 5, batch 2500, train_loss[loss=3.487, NarTop10Accuracy=0.6339, over 5088.00 frames. ], tot_loss[loss=3.611, NarTop10Accuracy=0.603, over 5494.91 frames. ], batch size: 7, lr: 1.52e-02 +2024-08-06 15:31:12,338 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 15:32:14,415 INFO [trainer.py:765] (5/8) Epoch 6, batch 100, train_loss[loss=3.473, NarTop10Accuracy=0.6349, over 7149.00 frames. ], tot_loss[loss=3.63, NarTop10Accuracy=0.5994, over 2359.08 frames. ], batch size: 31, lr: 1.42e-02 +2024-08-06 15:32:46,015 INFO [trainer.py:765] (5/8) Epoch 6, batch 200, train_loss[loss=3.993, NarTop10Accuracy=0.514, over 6831.00 frames. ], tot_loss[loss=3.615, NarTop10Accuracy=0.6021, over 3860.27 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 15:33:21,242 INFO [trainer.py:765] (5/8) Epoch 6, batch 300, train_loss[loss=3.503, NarTop10Accuracy=0.6324, over 7188.00 frames. ], tot_loss[loss=3.609, NarTop10Accuracy=0.6036, over 4659.02 frames. ], batch size: 22, lr: 1.41e-02 +2024-08-06 15:33:56,035 INFO [trainer.py:765] (5/8) Epoch 6, batch 400, train_loss[loss=3.557, NarTop10Accuracy=0.625, over 5124.00 frames. ], tot_loss[loss=3.593, NarTop10Accuracy=0.6066, over 5104.39 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 15:34:26,759 INFO [trainer.py:765] (5/8) Epoch 6, batch 500, train_loss[loss=3.408, NarTop10Accuracy=0.6469, over 6006.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.6093, over 5372.35 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 15:35:01,458 INFO [trainer.py:765] (5/8) Epoch 6, batch 600, train_loss[loss=3.18, NarTop10Accuracy=0.6918, over 5688.00 frames. ], tot_loss[loss=3.582, NarTop10Accuracy=0.6089, over 5640.38 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 15:35:32,733 INFO [trainer.py:765] (5/8) Epoch 6, batch 700, train_loss[loss=3.441, NarTop10Accuracy=0.634, over 5265.00 frames. ], tot_loss[loss=3.585, NarTop10Accuracy=0.6083, over 5716.38 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 15:36:06,844 INFO [trainer.py:765] (5/8) Epoch 6, batch 800, train_loss[loss=3.755, NarTop10Accuracy=0.5739, over 5010.00 frames. ], tot_loss[loss=3.6, NarTop10Accuracy=0.6051, over 5752.94 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 15:36:40,384 INFO [trainer.py:765] (5/8) Epoch 6, batch 900, train_loss[loss=4.006, NarTop10Accuracy=0.5298, over 6177.00 frames. ], tot_loss[loss=3.588, NarTop10Accuracy=0.6075, over 5779.07 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 15:37:15,272 INFO [trainer.py:765] (5/8) Epoch 6, batch 1000, train_loss[loss=3.385, NarTop10Accuracy=0.6541, over 6285.00 frames. ], tot_loss[loss=3.603, NarTop10Accuracy=0.6046, over 5896.31 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 15:37:50,508 INFO [trainer.py:765] (5/8) Epoch 6, batch 1100, train_loss[loss=3.37, NarTop10Accuracy=0.6543, over 6879.00 frames. ], tot_loss[loss=3.596, NarTop10Accuracy=0.6061, over 5932.05 frames. ], batch size: 17, lr: 1.38e-02 +2024-08-06 15:37:55,827 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 15:38:04,436 INFO [trainer.py:811] (5/8) Epoch 6, validation: loss=3.421, NarTop10Accuracy=0.6418, over 1905321.00 frames. +2024-08-06 15:38:04,437 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 15:38:04,966 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.809e+02 1.991e+02 2.234e+02 5.215e+02, threshold=3.983e+02, percent-clipped=0.5 +2024-08-06 15:38:36,168 INFO [trainer.py:765] (5/8) Epoch 6, batch 1200, train_loss[loss=3.451, NarTop10Accuracy=0.6427, over 7476.00 frames. ], tot_loss[loss=3.589, NarTop10Accuracy=0.6073, over 5914.76 frames. ], batch size: 31, lr: 1.37e-02 +2024-08-06 15:39:08,242 INFO [trainer.py:765] (5/8) Epoch 6, batch 1300, train_loss[loss=3.377, NarTop10Accuracy=0.6466, over 4257.00 frames. ], tot_loss[loss=3.577, NarTop10Accuracy=0.6097, over 5983.45 frames. ], batch size: 5, lr: 1.37e-02 +2024-08-06 15:39:44,070 INFO [trainer.py:765] (5/8) Epoch 6, batch 1400, train_loss[loss=3.435, NarTop10Accuracy=0.6396, over 6135.00 frames. ], tot_loss[loss=3.572, NarTop10Accuracy=0.6107, over 5994.64 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 15:40:15,383 INFO [trainer.py:765] (5/8) Epoch 6, batch 1500, train_loss[loss=3.937, NarTop10Accuracy=0.5258, over 5580.00 frames. ], tot_loss[loss=3.572, NarTop10Accuracy=0.6105, over 5949.71 frames. ], batch size: 50, lr: 1.36e-02 +2024-08-06 15:40:43,106 INFO [trainer.py:765] (5/8) Epoch 6, batch 1600, train_loss[loss=3.383, NarTop10Accuracy=0.6469, over 6951.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.6112, over 5934.08 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 15:41:09,789 INFO [trainer.py:765] (5/8) Epoch 6, batch 1700, train_loss[loss=3.459, NarTop10Accuracy=0.6275, over 6309.00 frames. ], tot_loss[loss=3.558, NarTop10Accuracy=0.6136, over 5914.60 frames. ], batch size: 13, lr: 1.35e-02 +2024-08-06 15:41:36,317 INFO [trainer.py:765] (5/8) Epoch 6, batch 1800, train_loss[loss=3.393, NarTop10Accuracy=0.6516, over 7059.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.611, over 5990.49 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 15:42:02,720 INFO [trainer.py:765] (5/8) Epoch 6, batch 1900, train_loss[loss=3.878, NarTop10Accuracy=0.5405, over 6855.00 frames. ], tot_loss[loss=3.589, NarTop10Accuracy=0.6074, over 6020.21 frames. ], batch size: 51, lr: 1.34e-02 +2024-08-06 15:42:28,319 INFO [trainer.py:765] (5/8) Epoch 6, batch 2000, train_loss[loss=3.509, NarTop10Accuracy=0.6333, over 6252.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.6096, over 5996.87 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 15:42:53,669 INFO [trainer.py:765] (5/8) Epoch 6, batch 2100, train_loss[loss=3.336, NarTop10Accuracy=0.6616, over 4866.00 frames. ], tot_loss[loss=3.567, NarTop10Accuracy=0.6114, over 5959.37 frames. ], batch size: 5, lr: 1.33e-02 +2024-08-06 15:43:18,978 INFO [trainer.py:765] (5/8) Epoch 6, batch 2200, train_loss[loss=3.899, NarTop10Accuracy=0.5471, over 7404.00 frames. ], tot_loss[loss=3.569, NarTop10Accuracy=0.6116, over 6003.12 frames. ], batch size: 32, lr: 1.33e-02 +2024-08-06 15:43:44,106 INFO [trainer.py:765] (5/8) Epoch 6, batch 2300, train_loss[loss=3.384, NarTop10Accuracy=0.652, over 5697.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.6117, over 6022.51 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 15:44:08,620 INFO [trainer.py:765] (5/8) Epoch 6, batch 2400, train_loss[loss=3.178, NarTop10Accuracy=0.6953, over 5034.00 frames. ], tot_loss[loss=3.543, NarTop10Accuracy=0.6173, over 5786.51 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:32,132 INFO [trainer.py:765] (5/8) Epoch 6, batch 2500, train_loss[loss=3.436, NarTop10Accuracy=0.6475, over 5130.00 frames. ], tot_loss[loss=3.525, NarTop10Accuracy=0.6203, over 5488.28 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:51,606 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 15:45:58,043 INFO [trainer.py:765] (5/8) Epoch 7, batch 100, train_loss[loss=3.312, NarTop10Accuracy=0.6631, over 7110.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6188, over 2364.60 frames. ], batch size: 31, lr: 1.24e-02 +2024-08-06 15:46:33,614 INFO [trainer.py:765] (5/8) Epoch 7, batch 200, train_loss[loss=3.553, NarTop10Accuracy=0.6255, over 6876.00 frames. ], tot_loss[loss=3.521, NarTop10Accuracy=0.6208, over 3852.88 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 15:47:03,246 INFO [trainer.py:765] (5/8) Epoch 7, batch 300, train_loss[loss=3.682, NarTop10Accuracy=0.5892, over 6951.00 frames. ], tot_loss[loss=3.536, NarTop10Accuracy=0.6181, over 4660.02 frames. ], batch size: 22, lr: 1.23e-02 +2024-08-06 15:47:34,495 INFO [trainer.py:765] (5/8) Epoch 7, batch 400, train_loss[loss=3.517, NarTop10Accuracy=0.6168, over 5133.00 frames. ], tot_loss[loss=3.528, NarTop10Accuracy=0.6194, over 5112.31 frames. ], batch size: 7, lr: 1.23e-02 +2024-08-06 15:48:13,730 INFO [trainer.py:765] (5/8) Epoch 7, batch 500, train_loss[loss=3.599, NarTop10Accuracy=0.6006, over 5943.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.6213, over 5383.04 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 15:48:26,369 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 15:48:34,533 INFO [trainer.py:811] (5/8) Epoch 7, validation: loss=3.326, NarTop10Accuracy=0.6612, over 1905321.00 frames. +2024-08-06 15:48:34,534 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 15:48:35,078 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.860e+02 2.018e+02 2.241e+02 5.111e+02, threshold=4.035e+02, percent-clipped=0.3 +2024-08-06 15:48:52,721 INFO [trainer.py:765] (5/8) Epoch 7, batch 600, train_loss[loss=3.092, NarTop10Accuracy=0.7073, over 5640.00 frames. ], tot_loss[loss=3.519, NarTop10Accuracy=0.6211, over 5660.25 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 15:49:24,912 INFO [trainer.py:765] (5/8) Epoch 7, batch 700, train_loss[loss=3.769, NarTop10Accuracy=0.5596, over 5031.00 frames. ], tot_loss[loss=3.51, NarTop10Accuracy=0.6235, over 5739.39 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 15:50:04,382 INFO [trainer.py:765] (5/8) Epoch 7, batch 800, train_loss[loss=3.288, NarTop10Accuracy=0.671, over 4320.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6264, over 5787.91 frames. ], batch size: 5, lr: 1.21e-02 +2024-08-06 15:50:34,549 INFO [trainer.py:765] (5/8) Epoch 7, batch 900, train_loss[loss=3.309, NarTop10Accuracy=0.6654, over 6273.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6282, over 5810.56 frames. ], batch size: 13, lr: 1.21e-02 +2024-08-06 15:51:07,156 INFO [trainer.py:765] (5/8) Epoch 7, batch 1000, train_loss[loss=3.275, NarTop10Accuracy=0.6699, over 6708.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6281, over 5914.66 frames. ], batch size: 14, lr: 1.20e-02 +2024-08-06 15:51:51,759 INFO [trainer.py:765] (5/8) Epoch 7, batch 1100, train_loss[loss=3.276, NarTop10Accuracy=0.6718, over 6801.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6268, over 5938.25 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 15:52:22,700 INFO [trainer.py:765] (5/8) Epoch 7, batch 1200, train_loss[loss=3.393, NarTop10Accuracy=0.649, over 7239.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6286, over 5908.50 frames. ], batch size: 31, lr: 1.20e-02 +2024-08-06 15:52:52,008 INFO [trainer.py:765] (5/8) Epoch 7, batch 1300, train_loss[loss=3.592, NarTop10Accuracy=0.5954, over 5154.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6273, over 5987.75 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 15:53:33,843 INFO [trainer.py:765] (5/8) Epoch 7, batch 1400, train_loss[loss=3.229, NarTop10Accuracy=0.6842, over 6237.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6272, over 6023.45 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 15:54:04,600 INFO [trainer.py:765] (5/8) Epoch 7, batch 1500, train_loss[loss=3.779, NarTop10Accuracy=0.5697, over 5847.00 frames. ], tot_loss[loss=3.467, NarTop10Accuracy=0.632, over 5963.35 frames. ], batch size: 50, lr: 1.19e-02 +2024-08-06 15:54:32,386 INFO [trainer.py:765] (5/8) Epoch 7, batch 1600, train_loss[loss=3.698, NarTop10Accuracy=0.5777, over 6918.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6308, over 5935.54 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 15:54:59,055 INFO [trainer.py:765] (5/8) Epoch 7, batch 1700, train_loss[loss=3.655, NarTop10Accuracy=0.581, over 6159.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6264, over 5936.50 frames. ], batch size: 13, lr: 1.18e-02 +2024-08-06 15:55:25,513 INFO [trainer.py:765] (5/8) Epoch 7, batch 1800, train_loss[loss=3.906, NarTop10Accuracy=0.5339, over 6963.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6272, over 5994.28 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 15:55:52,083 INFO [trainer.py:765] (5/8) Epoch 7, batch 1900, train_loss[loss=3.41, NarTop10Accuracy=0.6459, over 6015.00 frames. ], tot_loss[loss=3.51, NarTop10Accuracy=0.6227, over 6043.53 frames. ], batch size: 51, lr: 1.18e-02 +2024-08-06 15:56:17,592 INFO [trainer.py:765] (5/8) Epoch 7, batch 2000, train_loss[loss=3.78, NarTop10Accuracy=0.5662, over 6264.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6246, over 6003.59 frames. ], batch size: 50, lr: 1.17e-02 +2024-08-06 15:56:42,857 INFO [trainer.py:765] (5/8) Epoch 7, batch 2100, train_loss[loss=3.764, NarTop10Accuracy=0.5606, over 3897.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6279, over 5969.67 frames. ], batch size: 4, lr: 1.17e-02 +2024-08-06 15:57:08,080 INFO [trainer.py:765] (5/8) Epoch 7, batch 2200, train_loss[loss=3.471, NarTop10Accuracy=0.6327, over 7341.00 frames. ], tot_loss[loss=3.509, NarTop10Accuracy=0.6233, over 6003.26 frames. ], batch size: 31, lr: 1.17e-02 +2024-08-06 15:57:33,179 INFO [trainer.py:765] (5/8) Epoch 7, batch 2300, train_loss[loss=3.326, NarTop10Accuracy=0.6561, over 5817.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.6227, over 6005.65 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 15:57:57,620 INFO [trainer.py:765] (5/8) Epoch 7, batch 2400, train_loss[loss=3.132, NarTop10Accuracy=0.6951, over 4953.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6265, over 5763.41 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 15:58:21,089 INFO [trainer.py:765] (5/8) Epoch 7, batch 2500, train_loss[loss=3.794, NarTop10Accuracy=0.5609, over 5064.00 frames. ], tot_loss[loss=3.467, NarTop10Accuracy=0.6314, over 5467.11 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 15:58:31,566 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 15:58:39,769 INFO [trainer.py:811] (5/8) Epoch 7, validation: loss=3.381, NarTop10Accuracy=0.6488, over 1905321.00 frames. +2024-08-06 15:58:39,770 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 15:58:40,221 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.831e+02 1.996e+02 2.207e+02 5.229e+02, threshold=3.992e+02, percent-clipped=0.2 +2024-08-06 15:58:49,032 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 15:59:52,877 INFO [trainer.py:765] (5/8) Epoch 8, batch 100, train_loss[loss=3.524, NarTop10Accuracy=0.6129, over 7293.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6336, over 2367.73 frames. ], batch size: 31, lr: 1.09e-02 +2024-08-06 16:00:27,881 INFO [trainer.py:765] (5/8) Epoch 8, batch 200, train_loss[loss=3.206, NarTop10Accuracy=0.684, over 6840.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6315, over 3859.04 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 16:00:58,563 INFO [trainer.py:765] (5/8) Epoch 8, batch 300, train_loss[loss=3.134, NarTop10Accuracy=0.6933, over 7209.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.633, over 4651.66 frames. ], batch size: 23, lr: 1.08e-02 +2024-08-06 16:01:29,760 INFO [trainer.py:765] (5/8) Epoch 8, batch 400, train_loss[loss=3.67, NarTop10Accuracy=0.5903, over 5688.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6315, over 5091.31 frames. ], batch size: 8, lr: 1.08e-02 +2024-08-06 16:02:04,066 INFO [trainer.py:765] (5/8) Epoch 8, batch 500, train_loss[loss=3.902, NarTop10Accuracy=0.541, over 6015.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6347, over 5376.07 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 16:02:41,836 INFO [trainer.py:765] (5/8) Epoch 8, batch 600, train_loss[loss=3.164, NarTop10Accuracy=0.6911, over 5685.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6305, over 5644.43 frames. ], batch size: 9, lr: 1.08e-02 +2024-08-06 16:03:11,500 INFO [trainer.py:765] (5/8) Epoch 8, batch 700, train_loss[loss=3.735, NarTop10Accuracy=0.581, over 5145.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6286, over 5702.07 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 16:03:50,084 INFO [trainer.py:765] (5/8) Epoch 8, batch 800, train_loss[loss=3.554, NarTop10Accuracy=0.615, over 5067.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6306, over 5752.39 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 16:04:27,588 INFO [trainer.py:765] (5/8) Epoch 8, batch 900, train_loss[loss=3.332, NarTop10Accuracy=0.6667, over 6126.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6342, over 5766.06 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 16:04:57,466 INFO [trainer.py:765] (5/8) Epoch 8, batch 1000, train_loss[loss=3.56, NarTop10Accuracy=0.6102, over 6168.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6373, over 5867.17 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 16:05:37,294 INFO [trainer.py:765] (5/8) Epoch 8, batch 1100, train_loss[loss=3.868, NarTop10Accuracy=0.5454, over 6909.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6392, over 5911.41 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 16:06:15,860 INFO [trainer.py:765] (5/8) Epoch 8, batch 1200, train_loss[loss=3.422, NarTop10Accuracy=0.6451, over 7371.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6366, over 5920.40 frames. ], batch size: 31, lr: 1.06e-02 +2024-08-06 16:06:45,187 INFO [trainer.py:765] (5/8) Epoch 8, batch 1300, train_loss[loss=3.257, NarTop10Accuracy=0.6882, over 4281.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6385, over 5989.19 frames. ], batch size: 5, lr: 1.06e-02 +2024-08-06 16:07:24,235 INFO [trainer.py:765] (5/8) Epoch 8, batch 1400, train_loss[loss=3.413, NarTop10Accuracy=0.6424, over 6105.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6378, over 6010.40 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 16:07:52,169 INFO [trainer.py:765] (5/8) Epoch 8, batch 1500, train_loss[loss=3.326, NarTop10Accuracy=0.6617, over 6684.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6409, over 5958.79 frames. ], batch size: 50, lr: 1.05e-02 +2024-08-06 16:08:19,949 INFO [trainer.py:765] (5/8) Epoch 8, batch 1600, train_loss[loss=3.233, NarTop10Accuracy=0.6846, over 7344.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6413, over 5932.84 frames. ], batch size: 22, lr: 1.05e-02 +2024-08-06 16:08:46,618 INFO [trainer.py:765] (5/8) Epoch 8, batch 1700, train_loss[loss=3.382, NarTop10Accuracy=0.6543, over 6597.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6409, over 5922.16 frames. ], batch size: 14, lr: 1.05e-02 +2024-08-06 16:09:13,106 INFO [trainer.py:765] (5/8) Epoch 8, batch 1800, train_loss[loss=3.189, NarTop10Accuracy=0.6977, over 7311.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6427, over 5987.60 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 16:09:39,635 INFO [trainer.py:765] (5/8) Epoch 8, batch 1900, train_loss[loss=3.742, NarTop10Accuracy=0.5792, over 6384.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6444, over 6031.39 frames. ], batch size: 50, lr: 1.04e-02 +2024-08-06 16:09:56,939 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 16:10:04,970 INFO [trainer.py:811] (5/8) Epoch 8, validation: loss=3.282, NarTop10Accuracy=0.6699, over 1905321.00 frames. +2024-08-06 16:10:04,970 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 16:10:05,470 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.814e+02 1.981e+02 2.158e+02 5.862e+02, threshold=3.962e+02, percent-clipped=0.1 +2024-08-06 16:10:13,203 INFO [trainer.py:765] (5/8) Epoch 8, batch 2000, train_loss[loss=3.925, NarTop10Accuracy=0.527, over 6267.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6432, over 6008.78 frames. ], batch size: 50, lr: 1.04e-02 +2024-08-06 16:10:38,514 INFO [trainer.py:765] (5/8) Epoch 8, batch 2100, train_loss[loss=3.184, NarTop10Accuracy=0.6827, over 4737.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6443, over 5982.20 frames. ], batch size: 5, lr: 1.04e-02 +2024-08-06 16:11:03,747 INFO [trainer.py:765] (5/8) Epoch 8, batch 2200, train_loss[loss=3.578, NarTop10Accuracy=0.598, over 7218.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6415, over 6003.72 frames. ], batch size: 31, lr: 1.04e-02 +2024-08-06 16:11:28,905 INFO [trainer.py:765] (5/8) Epoch 8, batch 2300, train_loss[loss=3.725, NarTop10Accuracy=0.5784, over 5745.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6374, over 6016.20 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 16:11:53,092 INFO [trainer.py:765] (5/8) Epoch 8, batch 2400, train_loss[loss=3.511, NarTop10Accuracy=0.6325, over 5262.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6411, over 5777.59 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 16:12:16,444 INFO [trainer.py:765] (5/8) Epoch 8, batch 2500, train_loss[loss=3.301, NarTop10Accuracy=0.6581, over 5142.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6429, over 5459.45 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 16:12:36,202 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 16:13:37,515 INFO [trainer.py:765] (5/8) Epoch 9, batch 100, train_loss[loss=3.191, NarTop10Accuracy=0.6878, over 7239.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6521, over 2341.76 frames. ], batch size: 31, lr: 9.72e-03 +2024-08-06 16:14:14,441 INFO [trainer.py:765] (5/8) Epoch 9, batch 200, train_loss[loss=3.562, NarTop10Accuracy=0.6059, over 6756.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.654, over 3839.49 frames. ], batch size: 17, lr: 9.70e-03 +2024-08-06 16:14:44,508 INFO [trainer.py:765] (5/8) Epoch 9, batch 300, train_loss[loss=3.324, NarTop10Accuracy=0.6662, over 7185.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6507, over 4644.31 frames. ], batch size: 22, lr: 9.68e-03 +2024-08-06 16:15:14,915 INFO [trainer.py:765] (5/8) Epoch 9, batch 400, train_loss[loss=3.166, NarTop10Accuracy=0.6946, over 5196.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.654, over 5117.32 frames. ], batch size: 7, lr: 9.65e-03 +2024-08-06 16:15:50,337 INFO [trainer.py:765] (5/8) Epoch 9, batch 500, train_loss[loss=3.142, NarTop10Accuracy=0.6973, over 6090.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6565, over 5384.95 frames. ], batch size: 11, lr: 9.63e-03 +2024-08-06 16:16:23,973 INFO [trainer.py:765] (5/8) Epoch 9, batch 600, train_loss[loss=3.644, NarTop10Accuracy=0.5976, over 5790.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6582, over 5651.41 frames. ], batch size: 9, lr: 9.61e-03 +2024-08-06 16:16:57,146 INFO [trainer.py:765] (5/8) Epoch 9, batch 700, train_loss[loss=3.229, NarTop10Accuracy=0.6848, over 4275.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.656, over 5730.21 frames. ], batch size: 5, lr: 9.59e-03 +2024-08-06 16:17:32,053 INFO [trainer.py:765] (5/8) Epoch 9, batch 800, train_loss[loss=3.17, NarTop10Accuracy=0.6982, over 4395.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6493, over 5784.44 frames. ], batch size: 5, lr: 9.57e-03 +2024-08-06 16:18:07,816 INFO [trainer.py:765] (5/8) Epoch 9, batch 900, train_loss[loss=3.157, NarTop10Accuracy=0.7065, over 6189.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6492, over 5799.13 frames. ], batch size: 13, lr: 9.55e-03 +2024-08-06 16:18:39,346 INFO [trainer.py:765] (5/8) Epoch 9, batch 1000, train_loss[loss=3.243, NarTop10Accuracy=0.6657, over 6279.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6475, over 5897.92 frames. ], batch size: 13, lr: 9.53e-03 +2024-08-06 16:19:15,383 INFO [trainer.py:765] (5/8) Epoch 9, batch 1100, train_loss[loss=3.369, NarTop10Accuracy=0.6521, over 6891.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6472, over 5937.90 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 16:19:53,879 INFO [trainer.py:765] (5/8) Epoch 9, batch 1200, train_loss[loss=3.714, NarTop10Accuracy=0.5793, over 7185.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6469, over 5940.87 frames. ], batch size: 31, lr: 9.48e-03 +2024-08-06 16:20:24,907 INFO [trainer.py:765] (5/8) Epoch 9, batch 1300, train_loss[loss=3.077, NarTop10Accuracy=0.7113, over 5319.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6468, over 6002.25 frames. ], batch size: 6, lr: 9.46e-03 +2024-08-06 16:20:56,580 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 16:21:04,483 INFO [trainer.py:811] (5/8) Epoch 9, validation: loss=3.266, NarTop10Accuracy=0.6725, over 1905321.00 frames. +2024-08-06 16:21:04,484 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 16:21:05,035 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 1.808e+02 1.967e+02 2.142e+02 6.126e+02, threshold=3.935e+02, percent-clipped=0.5 +2024-08-06 16:21:06,691 INFO [trainer.py:765] (5/8) Epoch 9, batch 1400, train_loss[loss=3.679, NarTop10Accuracy=0.5834, over 6030.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6444, over 6026.82 frames. ], batch size: 11, lr: 9.44e-03 +2024-08-06 16:21:38,896 INFO [trainer.py:765] (5/8) Epoch 9, batch 1500, train_loss[loss=3.37, NarTop10Accuracy=0.6541, over 5793.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6501, over 5970.51 frames. ], batch size: 52, lr: 9.42e-03 +2024-08-06 16:22:06,721 INFO [trainer.py:765] (5/8) Epoch 9, batch 1600, train_loss[loss=3.355, NarTop10Accuracy=0.6544, over 7110.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6513, over 5931.30 frames. ], batch size: 22, lr: 9.40e-03 +2024-08-06 16:22:33,470 INFO [trainer.py:765] (5/8) Epoch 9, batch 1700, train_loss[loss=3.566, NarTop10Accuracy=0.6132, over 6576.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6475, over 5917.02 frames. ], batch size: 14, lr: 9.38e-03 +2024-08-06 16:23:00,063 INFO [trainer.py:765] (5/8) Epoch 9, batch 1800, train_loss[loss=3.287, NarTop10Accuracy=0.6768, over 7053.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.65, over 5982.12 frames. ], batch size: 22, lr: 9.36e-03 +2024-08-06 16:23:26,782 INFO [trainer.py:765] (5/8) Epoch 9, batch 1900, train_loss[loss=3.404, NarTop10Accuracy=0.6572, over 6411.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6476, over 6017.57 frames. ], batch size: 50, lr: 9.34e-03 +2024-08-06 16:23:52,485 INFO [trainer.py:765] (5/8) Epoch 9, batch 2000, train_loss[loss=3.971, NarTop10Accuracy=0.5217, over 6279.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6495, over 5999.17 frames. ], batch size: 50, lr: 9.32e-03 +2024-08-06 16:24:17,963 INFO [trainer.py:765] (5/8) Epoch 9, batch 2100, train_loss[loss=3.314, NarTop10Accuracy=0.665, over 4833.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6487, over 5997.16 frames. ], batch size: 5, lr: 9.30e-03 +2024-08-06 16:24:43,421 INFO [trainer.py:765] (5/8) Epoch 9, batch 2200, train_loss[loss=3.64, NarTop10Accuracy=0.601, over 7098.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6474, over 6027.38 frames. ], batch size: 31, lr: 9.28e-03 +2024-08-06 16:25:08,721 INFO [trainer.py:765] (5/8) Epoch 9, batch 2300, train_loss[loss=3.255, NarTop10Accuracy=0.6813, over 5658.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6447, over 6030.76 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 16:25:33,163 INFO [trainer.py:765] (5/8) Epoch 9, batch 2400, train_loss[loss=3.316, NarTop10Accuracy=0.6664, over 4974.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6453, over 5784.53 frames. ], batch size: 7, lr: 9.25e-03 +2024-08-06 16:25:56,768 INFO [trainer.py:765] (5/8) Epoch 9, batch 2500, train_loss[loss=3.278, NarTop10Accuracy=0.664, over 5316.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6518, over 5491.91 frames. ], batch size: 7, lr: 9.23e-03 +2024-08-06 16:26:16,496 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 16:27:19,583 INFO [trainer.py:765] (5/8) Epoch 10, batch 100, train_loss[loss=3.265, NarTop10Accuracy=0.6721, over 7335.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6512, over 2370.10 frames. ], batch size: 31, lr: 8.76e-03 +2024-08-06 16:27:52,628 INFO [trainer.py:765] (5/8) Epoch 10, batch 200, train_loss[loss=3.046, NarTop10Accuracy=0.7233, over 7146.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6567, over 3877.84 frames. ], batch size: 18, lr: 8.74e-03 +2024-08-06 16:28:23,057 INFO [trainer.py:765] (5/8) Epoch 10, batch 300, train_loss[loss=3.162, NarTop10Accuracy=0.699, over 7161.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6555, over 4693.36 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 16:28:59,199 INFO [trainer.py:765] (5/8) Epoch 10, batch 400, train_loss[loss=3.227, NarTop10Accuracy=0.6772, over 5181.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6568, over 5114.18 frames. ], batch size: 7, lr: 8.71e-03 +2024-08-06 16:29:29,218 INFO [trainer.py:765] (5/8) Epoch 10, batch 500, train_loss[loss=3.055, NarTop10Accuracy=0.7179, over 6009.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6579, over 5391.24 frames. ], batch size: 11, lr: 8.69e-03 +2024-08-06 16:30:02,765 INFO [trainer.py:765] (5/8) Epoch 10, batch 600, train_loss[loss=3.525, NarTop10Accuracy=0.6111, over 5748.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6558, over 5656.39 frames. ], batch size: 9, lr: 8.67e-03 +2024-08-06 16:30:34,264 INFO [trainer.py:765] (5/8) Epoch 10, batch 700, train_loss[loss=3.376, NarTop10Accuracy=0.6509, over 5151.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6548, over 5720.88 frames. ], batch size: 6, lr: 8.65e-03 +2024-08-06 16:31:09,842 INFO [trainer.py:765] (5/8) Epoch 10, batch 800, train_loss[loss=3.441, NarTop10Accuracy=0.6308, over 5145.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6537, over 5790.81 frames. ], batch size: 6, lr: 8.64e-03 +2024-08-06 16:31:16,257 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 16:31:24,565 INFO [trainer.py:811] (5/8) Epoch 10, validation: loss=3.184, NarTop10Accuracy=0.6898, over 1905321.00 frames. +2024-08-06 16:31:24,567 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 16:31:25,154 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.851e+02 2.012e+02 2.196e+02 4.599e+02, threshold=4.024e+02, percent-clipped=0.1 +2024-08-06 16:31:50,345 INFO [trainer.py:765] (5/8) Epoch 10, batch 900, train_loss[loss=3.132, NarTop10Accuracy=0.6967, over 6744.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6588, over 5810.47 frames. ], batch size: 14, lr: 8.62e-03 +2024-08-06 16:32:28,589 INFO [trainer.py:765] (5/8) Epoch 10, batch 1000, train_loss[loss=3.092, NarTop10Accuracy=0.7111, over 6189.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6579, over 5906.91 frames. ], batch size: 13, lr: 8.60e-03 +2024-08-06 16:33:06,376 INFO [trainer.py:765] (5/8) Epoch 10, batch 1100, train_loss[loss=3.016, NarTop10Accuracy=0.7252, over 7038.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6571, over 5941.33 frames. ], batch size: 17, lr: 8.59e-03 +2024-08-06 16:33:40,960 INFO [trainer.py:765] (5/8) Epoch 10, batch 1200, train_loss[loss=3.265, NarTop10Accuracy=0.6698, over 7317.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6585, over 5937.29 frames. ], batch size: 31, lr: 8.57e-03 +2024-08-06 16:34:16,170 INFO [trainer.py:765] (5/8) Epoch 10, batch 1300, train_loss[loss=3.289, NarTop10Accuracy=0.6664, over 4947.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6583, over 5997.20 frames. ], batch size: 6, lr: 8.55e-03 +2024-08-06 16:34:51,200 INFO [trainer.py:765] (5/8) Epoch 10, batch 1400, train_loss[loss=3.285, NarTop10Accuracy=0.6615, over 6048.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6532, over 6016.40 frames. ], batch size: 11, lr: 8.54e-03 +2024-08-06 16:35:22,159 INFO [trainer.py:765] (5/8) Epoch 10, batch 1500, train_loss[loss=3.64, NarTop10Accuracy=0.5889, over 6132.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6577, over 5965.66 frames. ], batch size: 50, lr: 8.52e-03 +2024-08-06 16:35:50,136 INFO [trainer.py:765] (5/8) Epoch 10, batch 1600, train_loss[loss=3.591, NarTop10Accuracy=0.6055, over 6987.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6597, over 5954.49 frames. ], batch size: 22, lr: 8.50e-03 +2024-08-06 16:36:16,976 INFO [trainer.py:765] (5/8) Epoch 10, batch 1700, train_loss[loss=3.369, NarTop10Accuracy=0.6433, over 6516.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6576, over 5947.18 frames. ], batch size: 14, lr: 8.49e-03 +2024-08-06 16:36:43,647 INFO [trainer.py:765] (5/8) Epoch 10, batch 1800, train_loss[loss=3.105, NarTop10Accuracy=0.7062, over 7146.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6606, over 5997.29 frames. ], batch size: 22, lr: 8.47e-03 +2024-08-06 16:37:10,290 INFO [trainer.py:765] (5/8) Epoch 10, batch 1900, train_loss[loss=3.226, NarTop10Accuracy=0.6885, over 6348.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6607, over 6030.85 frames. ], batch size: 54, lr: 8.45e-03 +2024-08-06 16:37:36,089 INFO [trainer.py:765] (5/8) Epoch 10, batch 2000, train_loss[loss=3.192, NarTop10Accuracy=0.6881, over 6033.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6607, over 6003.77 frames. ], batch size: 50, lr: 8.44e-03 +2024-08-06 16:38:01,650 INFO [trainer.py:765] (5/8) Epoch 10, batch 2100, train_loss[loss=3.326, NarTop10Accuracy=0.654, over 3900.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6585, over 5986.78 frames. ], batch size: 4, lr: 8.42e-03 +2024-08-06 16:38:27,120 INFO [trainer.py:765] (5/8) Epoch 10, batch 2200, train_loss[loss=3.772, NarTop10Accuracy=0.5693, over 7551.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6579, over 6031.34 frames. ], batch size: 32, lr: 8.41e-03 +2024-08-06 16:38:52,447 INFO [trainer.py:765] (5/8) Epoch 10, batch 2300, train_loss[loss=2.99, NarTop10Accuracy=0.7258, over 5673.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6568, over 6033.10 frames. ], batch size: 9, lr: 8.39e-03 +2024-08-06 16:39:17,006 INFO [trainer.py:765] (5/8) Epoch 10, batch 2400, train_loss[loss=3.232, NarTop10Accuracy=0.6785, over 5115.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6617, over 5781.19 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 16:39:40,801 INFO [trainer.py:765] (5/8) Epoch 10, batch 2500, train_loss[loss=3.596, NarTop10Accuracy=0.6048, over 5088.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6664, over 5486.70 frames. ], batch size: 7, lr: 8.36e-03 +2024-08-06 16:40:00,798 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 16:41:06,234 INFO [trainer.py:765] (5/8) Epoch 11, batch 100, train_loss[loss=3.581, NarTop10Accuracy=0.6104, over 7158.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6563, over 2357.39 frames. ], batch size: 31, lr: 7.97e-03 +2024-08-06 16:41:39,021 INFO [trainer.py:765] (5/8) Epoch 11, batch 200, train_loss[loss=3.739, NarTop10Accuracy=0.5795, over 6750.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6599, over 3863.27 frames. ], batch size: 17, lr: 7.95e-03 +2024-08-06 16:41:53,190 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 16:42:01,355 INFO [trainer.py:811] (5/8) Epoch 11, validation: loss=3.116, NarTop10Accuracy=0.7034, over 1905321.00 frames. +2024-08-06 16:42:01,356 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 16:42:01,879 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 1.889e+02 2.046e+02 2.249e+02 5.417e+02, threshold=4.093e+02, percent-clipped=0.2 +2024-08-06 16:42:17,975 INFO [trainer.py:765] (5/8) Epoch 11, batch 300, train_loss[loss=3.054, NarTop10Accuracy=0.7164, over 7341.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6642, over 4659.80 frames. ], batch size: 23, lr: 7.94e-03 +2024-08-06 16:42:55,153 INFO [trainer.py:765] (5/8) Epoch 11, batch 400, train_loss[loss=3.281, NarTop10Accuracy=0.6712, over 5127.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6662, over 5102.15 frames. ], batch size: 7, lr: 7.92e-03 +2024-08-06 16:43:25,718 INFO [trainer.py:765] (5/8) Epoch 11, batch 500, train_loss[loss=3.057, NarTop10Accuracy=0.7115, over 6135.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6686, over 5364.32 frames. ], batch size: 11, lr: 7.91e-03 +2024-08-06 16:44:02,242 INFO [trainer.py:765] (5/8) Epoch 11, batch 600, train_loss[loss=3.425, NarTop10Accuracy=0.6264, over 5733.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6657, over 5632.12 frames. ], batch size: 9, lr: 7.89e-03 +2024-08-06 16:44:35,716 INFO [trainer.py:765] (5/8) Epoch 11, batch 700, train_loss[loss=3.625, NarTop10Accuracy=0.5943, over 4887.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.667, over 5695.69 frames. ], batch size: 6, lr: 7.88e-03 +2024-08-06 16:45:10,468 INFO [trainer.py:765] (5/8) Epoch 11, batch 800, train_loss[loss=3.055, NarTop10Accuracy=0.7163, over 4407.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6633, over 5750.83 frames. ], batch size: 5, lr: 7.86e-03 +2024-08-06 16:45:46,457 INFO [trainer.py:765] (5/8) Epoch 11, batch 900, train_loss[loss=3.671, NarTop10Accuracy=0.5875, over 6195.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6659, over 5788.01 frames. ], batch size: 13, lr: 7.85e-03 +2024-08-06 16:46:20,311 INFO [trainer.py:765] (5/8) Epoch 11, batch 1000, train_loss[loss=3.273, NarTop10Accuracy=0.6665, over 6216.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.665, over 5894.65 frames. ], batch size: 13, lr: 7.84e-03 +2024-08-06 16:46:53,456 INFO [trainer.py:765] (5/8) Epoch 11, batch 1100, train_loss[loss=2.997, NarTop10Accuracy=0.7235, over 6810.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.667, over 5929.68 frames. ], batch size: 17, lr: 7.82e-03 +2024-08-06 16:47:33,030 INFO [trainer.py:765] (5/8) Epoch 11, batch 1200, train_loss[loss=3.503, NarTop10Accuracy=0.6257, over 7275.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6645, over 5930.75 frames. ], batch size: 31, lr: 7.81e-03 +2024-08-06 16:48:06,481 INFO [trainer.py:765] (5/8) Epoch 11, batch 1300, train_loss[loss=2.929, NarTop10Accuracy=0.7378, over 5061.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6628, over 5988.60 frames. ], batch size: 6, lr: 7.79e-03 +2024-08-06 16:48:41,353 INFO [trainer.py:765] (5/8) Epoch 11, batch 1400, train_loss[loss=3.552, NarTop10Accuracy=0.6112, over 6138.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6599, over 6011.25 frames. ], batch size: 11, lr: 7.78e-03 +2024-08-06 16:49:09,344 INFO [trainer.py:765] (5/8) Epoch 11, batch 1500, train_loss[loss=3.333, NarTop10Accuracy=0.6525, over 6117.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6598, over 5941.29 frames. ], batch size: 54, lr: 7.77e-03 +2024-08-06 16:49:37,103 INFO [trainer.py:765] (5/8) Epoch 11, batch 1600, train_loss[loss=3.213, NarTop10Accuracy=0.6804, over 7023.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6633, over 5936.34 frames. ], batch size: 22, lr: 7.75e-03 +2024-08-06 16:50:03,791 INFO [trainer.py:765] (5/8) Epoch 11, batch 1700, train_loss[loss=3.377, NarTop10Accuracy=0.6469, over 6570.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.6645, over 5937.01 frames. ], batch size: 14, lr: 7.74e-03 +2024-08-06 16:50:30,353 INFO [trainer.py:765] (5/8) Epoch 11, batch 1800, train_loss[loss=3.516, NarTop10Accuracy=0.6166, over 6939.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6615, over 6004.15 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 16:50:56,821 INFO [trainer.py:765] (5/8) Epoch 11, batch 1900, train_loss[loss=3.804, NarTop10Accuracy=0.5672, over 6165.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6593, over 6037.58 frames. ], batch size: 51, lr: 7.71e-03 +2024-08-06 16:51:22,404 INFO [trainer.py:765] (5/8) Epoch 11, batch 2000, train_loss[loss=3.818, NarTop10Accuracy=0.5528, over 6120.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6604, over 6007.11 frames. ], batch size: 51, lr: 7.70e-03 +2024-08-06 16:51:47,794 INFO [trainer.py:765] (5/8) Epoch 11, batch 2100, train_loss[loss=2.892, NarTop10Accuracy=0.7541, over 4821.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6631, over 5983.55 frames. ], batch size: 5, lr: 7.68e-03 +2024-08-06 16:52:13,118 INFO [trainer.py:765] (5/8) Epoch 11, batch 2200, train_loss[loss=3.288, NarTop10Accuracy=0.6693, over 7335.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6641, over 6020.04 frames. ], batch size: 31, lr: 7.67e-03 +2024-08-06 16:52:23,899 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 16:52:32,079 INFO [trainer.py:811] (5/8) Epoch 11, validation: loss=3.101, NarTop10Accuracy=0.7058, over 1905321.00 frames. +2024-08-06 16:52:32,080 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 16:52:32,593 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.920e+02 2.088e+02 2.244e+02 3.599e+02, threshold=4.177e+02, percent-clipped=0.0 +2024-08-06 16:52:46,445 INFO [trainer.py:765] (5/8) Epoch 11, batch 2300, train_loss[loss=3.383, NarTop10Accuracy=0.6463, over 5802.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6627, over 6022.88 frames. ], batch size: 9, lr: 7.66e-03 +2024-08-06 16:53:10,887 INFO [trainer.py:765] (5/8) Epoch 11, batch 2400, train_loss[loss=3.377, NarTop10Accuracy=0.6461, over 5118.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6648, over 5771.68 frames. ], batch size: 7, lr: 7.64e-03 +2024-08-06 16:53:34,372 INFO [trainer.py:765] (5/8) Epoch 11, batch 2500, train_loss[loss=3.716, NarTop10Accuracy=0.5846, over 5289.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6645, over 5482.27 frames. ], batch size: 7, lr: 7.63e-03 +2024-08-06 16:53:54,102 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 16:54:58,525 INFO [trainer.py:765] (5/8) Epoch 12, batch 100, train_loss[loss=3.692, NarTop10Accuracy=0.5869, over 7077.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.665, over 2346.08 frames. ], batch size: 31, lr: 7.30e-03 +2024-08-06 16:55:32,432 INFO [trainer.py:765] (5/8) Epoch 12, batch 200, train_loss[loss=3.04, NarTop10Accuracy=0.7236, over 6918.00 frames. ], tot_loss[loss=3.272, NarTop10Accuracy=0.6713, over 3843.81 frames. ], batch size: 17, lr: 7.29e-03 +2024-08-06 16:56:05,096 INFO [trainer.py:765] (5/8) Epoch 12, batch 300, train_loss[loss=3.025, NarTop10Accuracy=0.7197, over 7005.00 frames. ], tot_loss[loss=3.243, NarTop10Accuracy=0.6772, over 4652.89 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 16:56:36,426 INFO [trainer.py:765] (5/8) Epoch 12, batch 400, train_loss[loss=3, NarTop10Accuracy=0.7239, over 5103.00 frames. ], tot_loss[loss=3.253, NarTop10Accuracy=0.6753, over 5108.30 frames. ], batch size: 7, lr: 7.26e-03 +2024-08-06 16:57:10,503 INFO [trainer.py:765] (5/8) Epoch 12, batch 500, train_loss[loss=3.608, NarTop10Accuracy=0.6055, over 5982.00 frames. ], tot_loss[loss=3.27, NarTop10Accuracy=0.672, over 5388.12 frames. ], batch size: 11, lr: 7.25e-03 +2024-08-06 16:57:45,483 INFO [trainer.py:765] (5/8) Epoch 12, batch 600, train_loss[loss=2.913, NarTop10Accuracy=0.7404, over 5820.00 frames. ], tot_loss[loss=3.271, NarTop10Accuracy=0.6716, over 5645.21 frames. ], batch size: 9, lr: 7.24e-03 +2024-08-06 16:58:17,004 INFO [trainer.py:765] (5/8) Epoch 12, batch 700, train_loss[loss=3.465, NarTop10Accuracy=0.6421, over 5010.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6698, over 5712.68 frames. ], batch size: 6, lr: 7.22e-03 +2024-08-06 16:58:53,469 INFO [trainer.py:765] (5/8) Epoch 12, batch 800, train_loss[loss=3.16, NarTop10Accuracy=0.6809, over 5214.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6683, over 5762.34 frames. ], batch size: 6, lr: 7.21e-03 +2024-08-06 16:59:27,205 INFO [trainer.py:765] (5/8) Epoch 12, batch 900, train_loss[loss=2.887, NarTop10Accuracy=0.748, over 6291.00 frames. ], tot_loss[loss=3.27, NarTop10Accuracy=0.6719, over 5782.64 frames. ], batch size: 13, lr: 7.20e-03 +2024-08-06 17:00:01,574 INFO [trainer.py:765] (5/8) Epoch 12, batch 1000, train_loss[loss=2.911, NarTop10Accuracy=0.7446, over 6534.00 frames. ], tot_loss[loss=3.282, NarTop10Accuracy=0.6692, over 5883.56 frames. ], batch size: 14, lr: 7.19e-03 +2024-08-06 17:00:39,188 INFO [trainer.py:765] (5/8) Epoch 12, batch 1100, train_loss[loss=3.749, NarTop10Accuracy=0.5781, over 6777.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6656, over 5929.52 frames. ], batch size: 17, lr: 7.18e-03 +2024-08-06 17:01:13,964 INFO [trainer.py:765] (5/8) Epoch 12, batch 1200, train_loss[loss=3.077, NarTop10Accuracy=0.7133, over 7263.00 frames. ], tot_loss[loss=3.269, NarTop10Accuracy=0.6721, over 5920.74 frames. ], batch size: 31, lr: 7.17e-03 +2024-08-06 17:01:48,107 INFO [trainer.py:765] (5/8) Epoch 12, batch 1300, train_loss[loss=3.155, NarTop10Accuracy=0.6896, over 4881.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6698, over 5989.30 frames. ], batch size: 6, lr: 7.15e-03 +2024-08-06 17:02:22,323 INFO [trainer.py:765] (5/8) Epoch 12, batch 1400, train_loss[loss=3.587, NarTop10Accuracy=0.5999, over 6159.00 frames. ], tot_loss[loss=3.289, NarTop10Accuracy=0.668, over 5988.69 frames. ], batch size: 11, lr: 7.14e-03 +2024-08-06 17:02:52,877 INFO [trainer.py:765] (5/8) Epoch 12, batch 1500, train_loss[loss=3.363, NarTop10Accuracy=0.6597, over 5946.00 frames. ], tot_loss[loss=3.272, NarTop10Accuracy=0.6714, over 5936.17 frames. ], batch size: 50, lr: 7.13e-03 +2024-08-06 17:03:20,691 INFO [trainer.py:765] (5/8) Epoch 12, batch 1600, train_loss[loss=3.198, NarTop10Accuracy=0.6876, over 7080.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6694, over 5926.42 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 17:03:38,296 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 17:03:46,474 INFO [trainer.py:811] (5/8) Epoch 12, validation: loss=3.054, NarTop10Accuracy=0.7153, over 1905321.00 frames. +2024-08-06 17:03:46,474 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 17:03:46,988 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.899e+02 2.078e+02 2.276e+02 5.455e+02, threshold=4.157e+02, percent-clipped=0.1 +2024-08-06 17:03:55,602 INFO [trainer.py:765] (5/8) Epoch 12, batch 1700, train_loss[loss=3.402, NarTop10Accuracy=0.657, over 6681.00 frames. ], tot_loss[loss=3.289, NarTop10Accuracy=0.6683, over 5922.08 frames. ], batch size: 14, lr: 7.11e-03 +2024-08-06 17:04:22,120 INFO [trainer.py:765] (5/8) Epoch 12, batch 1800, train_loss[loss=3.604, NarTop10Accuracy=0.6036, over 7464.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6677, over 5986.03 frames. ], batch size: 23, lr: 7.10e-03 +2024-08-06 17:04:48,590 INFO [trainer.py:765] (5/8) Epoch 12, batch 1900, train_loss[loss=3.28, NarTop10Accuracy=0.6743, over 5520.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6688, over 6023.28 frames. ], batch size: 50, lr: 7.08e-03 +2024-08-06 17:05:14,197 INFO [trainer.py:765] (5/8) Epoch 12, batch 2000, train_loss[loss=3.56, NarTop10Accuracy=0.6084, over 5886.00 frames. ], tot_loss[loss=3.27, NarTop10Accuracy=0.672, over 5999.20 frames. ], batch size: 50, lr: 7.07e-03 +2024-08-06 17:05:39,467 INFO [trainer.py:765] (5/8) Epoch 12, batch 2100, train_loss[loss=3.286, NarTop10Accuracy=0.6727, over 3897.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6689, over 5973.80 frames. ], batch size: 4, lr: 7.06e-03 +2024-08-06 17:06:04,690 INFO [trainer.py:765] (5/8) Epoch 12, batch 2200, train_loss[loss=3.503, NarTop10Accuracy=0.6242, over 7311.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6664, over 6014.44 frames. ], batch size: 31, lr: 7.05e-03 +2024-08-06 17:06:29,846 INFO [trainer.py:765] (5/8) Epoch 12, batch 2300, train_loss[loss=3.498, NarTop10Accuracy=0.6351, over 5760.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6675, over 6020.49 frames. ], batch size: 9, lr: 7.04e-03 +2024-08-06 17:06:54,199 INFO [trainer.py:765] (5/8) Epoch 12, batch 2400, train_loss[loss=3.122, NarTop10Accuracy=0.6957, over 5190.00 frames. ], tot_loss[loss=3.276, NarTop10Accuracy=0.6704, over 5769.87 frames. ], batch size: 7, lr: 7.03e-03 +2024-08-06 17:07:17,645 INFO [trainer.py:765] (5/8) Epoch 12, batch 2500, train_loss[loss=3.192, NarTop10Accuracy=0.6886, over 5898.00 frames. ], tot_loss[loss=3.255, NarTop10Accuracy=0.6744, over 5464.10 frames. ], batch size: 8, lr: 7.02e-03 +2024-08-06 17:07:37,434 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 17:08:40,078 INFO [trainer.py:765] (5/8) Epoch 13, batch 100, train_loss[loss=2.983, NarTop10Accuracy=0.729, over 7239.00 frames. ], tot_loss[loss=3.275, NarTop10Accuracy=0.6703, over 2377.72 frames. ], batch size: 31, lr: 6.73e-03 +2024-08-06 17:09:14,119 INFO [trainer.py:765] (5/8) Epoch 13, batch 200, train_loss[loss=2.897, NarTop10Accuracy=0.7428, over 6768.00 frames. ], tot_loss[loss=3.28, NarTop10Accuracy=0.6703, over 3859.95 frames. ], batch size: 17, lr: 6.72e-03 +2024-08-06 17:09:46,276 INFO [trainer.py:765] (5/8) Epoch 13, batch 300, train_loss[loss=3.589, NarTop10Accuracy=0.6069, over 7251.00 frames. ], tot_loss[loss=3.269, NarTop10Accuracy=0.6726, over 4658.90 frames. ], batch size: 23, lr: 6.71e-03 +2024-08-06 17:10:19,163 INFO [trainer.py:765] (5/8) Epoch 13, batch 400, train_loss[loss=2.781, NarTop10Accuracy=0.7675, over 5025.00 frames. ], tot_loss[loss=3.254, NarTop10Accuracy=0.6756, over 5091.94 frames. ], batch size: 7, lr: 6.70e-03 +2024-08-06 17:10:49,334 INFO [trainer.py:765] (5/8) Epoch 13, batch 500, train_loss[loss=3.058, NarTop10Accuracy=0.7203, over 6183.00 frames. ], tot_loss[loss=3.236, NarTop10Accuracy=0.6789, over 5383.27 frames. ], batch size: 11, lr: 6.69e-03 +2024-08-06 17:11:26,244 INFO [trainer.py:765] (5/8) Epoch 13, batch 600, train_loss[loss=3.046, NarTop10Accuracy=0.718, over 5763.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6798, over 5644.67 frames. ], batch size: 9, lr: 6.68e-03 +2024-08-06 17:11:57,381 INFO [trainer.py:765] (5/8) Epoch 13, batch 700, train_loss[loss=3.117, NarTop10Accuracy=0.7002, over 5193.00 frames. ], tot_loss[loss=3.232, NarTop10Accuracy=0.6796, over 5717.75 frames. ], batch size: 6, lr: 6.67e-03 +2024-08-06 17:12:33,441 INFO [trainer.py:765] (5/8) Epoch 13, batch 800, train_loss[loss=3.048, NarTop10Accuracy=0.7111, over 5142.00 frames. ], tot_loss[loss=3.242, NarTop10Accuracy=0.6774, over 5789.91 frames. ], batch size: 6, lr: 6.66e-03 +2024-08-06 17:13:10,031 INFO [trainer.py:765] (5/8) Epoch 13, batch 900, train_loss[loss=3.212, NarTop10Accuracy=0.6854, over 6273.00 frames. ], tot_loss[loss=3.236, NarTop10Accuracy=0.6789, over 5807.37 frames. ], batch size: 13, lr: 6.65e-03 +2024-08-06 17:13:41,442 INFO [trainer.py:765] (5/8) Epoch 13, batch 1000, train_loss[loss=3.499, NarTop10Accuracy=0.6188, over 6636.00 frames. ], tot_loss[loss=3.242, NarTop10Accuracy=0.6773, over 5888.38 frames. ], batch size: 14, lr: 6.64e-03 +2024-08-06 17:14:15,536 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 17:14:23,644 INFO [trainer.py:811] (5/8) Epoch 13, validation: loss=3.099, NarTop10Accuracy=0.7062, over 1905321.00 frames. +2024-08-06 17:14:23,645 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 17:14:24,471 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 1.948e+02 2.091e+02 2.295e+02 3.353e+02, threshold=4.181e+02, percent-clipped=0.0 +2024-08-06 17:14:26,697 INFO [trainer.py:765] (5/8) Epoch 13, batch 1100, train_loss[loss=3.637, NarTop10Accuracy=0.6007, over 7035.00 frames. ], tot_loss[loss=3.252, NarTop10Accuracy=0.6755, over 5927.60 frames. ], batch size: 18, lr: 6.63e-03 +2024-08-06 17:15:03,475 INFO [trainer.py:765] (5/8) Epoch 13, batch 1200, train_loss[loss=3.406, NarTop10Accuracy=0.6414, over 7101.00 frames. ], tot_loss[loss=3.254, NarTop10Accuracy=0.6746, over 5908.38 frames. ], batch size: 31, lr: 6.62e-03 +2024-08-06 17:15:35,514 INFO [trainer.py:765] (5/8) Epoch 13, batch 1300, train_loss[loss=3.036, NarTop10Accuracy=0.7268, over 5106.00 frames. ], tot_loss[loss=3.256, NarTop10Accuracy=0.6745, over 5984.26 frames. ], batch size: 6, lr: 6.61e-03 +2024-08-06 17:16:11,782 INFO [trainer.py:765] (5/8) Epoch 13, batch 1400, train_loss[loss=3.155, NarTop10Accuracy=0.7044, over 6084.00 frames. ], tot_loss[loss=3.263, NarTop10Accuracy=0.6734, over 6001.96 frames. ], batch size: 11, lr: 6.60e-03 +2024-08-06 17:16:39,788 INFO [trainer.py:765] (5/8) Epoch 13, batch 1500, train_loss[loss=3.501, NarTop10Accuracy=0.6265, over 6363.00 frames. ], tot_loss[loss=3.264, NarTop10Accuracy=0.6732, over 5953.67 frames. ], batch size: 52, lr: 6.59e-03 +2024-08-06 17:17:07,603 INFO [trainer.py:765] (5/8) Epoch 13, batch 1600, train_loss[loss=2.956, NarTop10Accuracy=0.7404, over 6888.00 frames. ], tot_loss[loss=3.266, NarTop10Accuracy=0.6725, over 5948.03 frames. ], batch size: 22, lr: 6.58e-03 +2024-08-06 17:17:34,259 INFO [trainer.py:765] (5/8) Epoch 13, batch 1700, train_loss[loss=3.217, NarTop10Accuracy=0.6824, over 6072.00 frames. ], tot_loss[loss=3.264, NarTop10Accuracy=0.673, over 5918.26 frames. ], batch size: 13, lr: 6.57e-03 +2024-08-06 17:18:00,762 INFO [trainer.py:765] (5/8) Epoch 13, batch 1800, train_loss[loss=3.09, NarTop10Accuracy=0.7055, over 7029.00 frames. ], tot_loss[loss=3.26, NarTop10Accuracy=0.6738, over 5971.77 frames. ], batch size: 22, lr: 6.56e-03 +2024-08-06 17:18:27,244 INFO [trainer.py:765] (5/8) Epoch 13, batch 1900, train_loss[loss=3.545, NarTop10Accuracy=0.6203, over 6315.00 frames. ], tot_loss[loss=3.252, NarTop10Accuracy=0.6758, over 6025.41 frames. ], batch size: 51, lr: 6.55e-03 +2024-08-06 17:18:52,777 INFO [trainer.py:765] (5/8) Epoch 13, batch 2000, train_loss[loss=3.555, NarTop10Accuracy=0.6155, over 6000.00 frames. ], tot_loss[loss=3.24, NarTop10Accuracy=0.6785, over 6005.49 frames. ], batch size: 50, lr: 6.54e-03 +2024-08-06 17:19:18,148 INFO [trainer.py:765] (5/8) Epoch 13, batch 2100, train_loss[loss=2.945, NarTop10Accuracy=0.7444, over 4944.00 frames. ], tot_loss[loss=3.238, NarTop10Accuracy=0.6785, over 5969.33 frames. ], batch size: 5, lr: 6.53e-03 +2024-08-06 17:19:43,412 INFO [trainer.py:765] (5/8) Epoch 13, batch 2200, train_loss[loss=3.409, NarTop10Accuracy=0.6374, over 7293.00 frames. ], tot_loss[loss=3.249, NarTop10Accuracy=0.6763, over 5998.74 frames. ], batch size: 31, lr: 6.52e-03 +2024-08-06 17:20:08,543 INFO [trainer.py:765] (5/8) Epoch 13, batch 2300, train_loss[loss=3.529, NarTop10Accuracy=0.6276, over 5688.00 frames. ], tot_loss[loss=3.271, NarTop10Accuracy=0.6719, over 6005.59 frames. ], batch size: 9, lr: 6.51e-03 +2024-08-06 17:20:32,939 INFO [trainer.py:765] (5/8) Epoch 13, batch 2400, train_loss[loss=3.411, NarTop10Accuracy=0.6389, over 5136.00 frames. ], tot_loss[loss=3.24, NarTop10Accuracy=0.6776, over 5756.71 frames. ], batch size: 7, lr: 6.50e-03 +2024-08-06 17:20:56,408 INFO [trainer.py:765] (5/8) Epoch 13, batch 2500, train_loss[loss=3.649, NarTop10Accuracy=0.5924, over 5010.00 frames. ], tot_loss[loss=3.229, NarTop10Accuracy=0.6798, over 5471.97 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 17:21:16,065 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 17:22:19,315 INFO [trainer.py:765] (5/8) Epoch 14, batch 100, train_loss[loss=3.012, NarTop10Accuracy=0.7184, over 7461.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.6821, over 2361.94 frames. ], batch size: 32, lr: 6.24e-03 +2024-08-06 17:22:50,379 INFO [trainer.py:765] (5/8) Epoch 14, batch 200, train_loss[loss=3.287, NarTop10Accuracy=0.6659, over 6879.00 frames. ], tot_loss[loss=3.233, NarTop10Accuracy=0.6795, over 3860.09 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 17:23:23,879 INFO [trainer.py:765] (5/8) Epoch 14, batch 300, train_loss[loss=3.135, NarTop10Accuracy=0.7049, over 7221.00 frames. ], tot_loss[loss=3.212, NarTop10Accuracy=0.6839, over 4654.94 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 17:23:57,484 INFO [trainer.py:765] (5/8) Epoch 14, batch 400, train_loss[loss=3.196, NarTop10Accuracy=0.6939, over 5103.00 frames. ], tot_loss[loss=3.228, NarTop10Accuracy=0.6806, over 5100.86 frames. ], batch size: 7, lr: 6.22e-03 +2024-08-06 17:24:32,113 INFO [trainer.py:765] (5/8) Epoch 14, batch 500, train_loss[loss=3.287, NarTop10Accuracy=0.665, over 6033.00 frames. ], tot_loss[loss=3.239, NarTop10Accuracy=0.6781, over 5373.89 frames. ], batch size: 11, lr: 6.21e-03 +2024-08-06 17:24:36,213 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 17:24:44,275 INFO [trainer.py:811] (5/8) Epoch 14, validation: loss=3.004, NarTop10Accuracy=0.726, over 1905321.00 frames. +2024-08-06 17:24:44,275 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 17:24:44,822 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 1.969e+02 2.114e+02 2.287e+02 4.406e+02, threshold=4.227e+02, percent-clipped=0.1 +2024-08-06 17:25:12,914 INFO [trainer.py:765] (5/8) Epoch 14, batch 600, train_loss[loss=2.897, NarTop10Accuracy=0.7581, over 5718.00 frames. ], tot_loss[loss=3.238, NarTop10Accuracy=0.6784, over 5647.08 frames. ], batch size: 9, lr: 6.20e-03 +2024-08-06 17:25:48,547 INFO [trainer.py:765] (5/8) Epoch 14, batch 700, train_loss[loss=3.403, NarTop10Accuracy=0.6476, over 4911.00 frames. ], tot_loss[loss=3.23, NarTop10Accuracy=0.6798, over 5723.86 frames. ], batch size: 6, lr: 6.19e-03 +2024-08-06 17:26:25,279 INFO [trainer.py:765] (5/8) Epoch 14, batch 800, train_loss[loss=2.997, NarTop10Accuracy=0.7342, over 4191.00 frames. ], tot_loss[loss=3.214, NarTop10Accuracy=0.6835, over 5775.66 frames. ], batch size: 5, lr: 6.18e-03 +2024-08-06 17:26:57,658 INFO [trainer.py:765] (5/8) Epoch 14, batch 900, train_loss[loss=3.156, NarTop10Accuracy=0.6872, over 6291.00 frames. ], tot_loss[loss=3.205, NarTop10Accuracy=0.6847, over 5788.67 frames. ], batch size: 13, lr: 6.17e-03 +2024-08-06 17:27:31,716 INFO [trainer.py:765] (5/8) Epoch 14, batch 1000, train_loss[loss=3.449, NarTop10Accuracy=0.6271, over 6348.00 frames. ], tot_loss[loss=3.219, NarTop10Accuracy=0.6818, over 5894.46 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 17:28:11,596 INFO [trainer.py:765] (5/8) Epoch 14, batch 1100, train_loss[loss=3.034, NarTop10Accuracy=0.7222, over 6735.00 frames. ], tot_loss[loss=3.219, NarTop10Accuracy=0.6818, over 5928.11 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 17:28:40,733 INFO [trainer.py:765] (5/8) Epoch 14, batch 1200, train_loss[loss=3.575, NarTop10Accuracy=0.6112, over 7104.00 frames. ], tot_loss[loss=3.219, NarTop10Accuracy=0.682, over 5906.11 frames. ], batch size: 31, lr: 6.15e-03 +2024-08-06 17:29:16,214 INFO [trainer.py:765] (5/8) Epoch 14, batch 1300, train_loss[loss=3.587, NarTop10Accuracy=0.6, over 5085.00 frames. ], tot_loss[loss=3.219, NarTop10Accuracy=0.682, over 5977.89 frames. ], batch size: 6, lr: 6.14e-03 +2024-08-06 17:29:54,601 INFO [trainer.py:765] (5/8) Epoch 14, batch 1400, train_loss[loss=3.36, NarTop10Accuracy=0.664, over 6084.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6798, over 6005.92 frames. ], batch size: 11, lr: 6.13e-03 +2024-08-06 17:30:25,315 INFO [trainer.py:765] (5/8) Epoch 14, batch 1500, train_loss[loss=3.713, NarTop10Accuracy=0.5763, over 6243.00 frames. ], tot_loss[loss=3.241, NarTop10Accuracy=0.6781, over 5949.08 frames. ], batch size: 51, lr: 6.12e-03 +2024-08-06 17:30:53,043 INFO [trainer.py:765] (5/8) Epoch 14, batch 1600, train_loss[loss=3.002, NarTop10Accuracy=0.7274, over 7149.00 frames. ], tot_loss[loss=3.232, NarTop10Accuracy=0.6799, over 5928.71 frames. ], batch size: 22, lr: 6.11e-03 +2024-08-06 17:31:19,728 INFO [trainer.py:765] (5/8) Epoch 14, batch 1700, train_loss[loss=3.066, NarTop10Accuracy=0.7146, over 6171.00 frames. ], tot_loss[loss=3.211, NarTop10Accuracy=0.6841, over 5916.63 frames. ], batch size: 13, lr: 6.10e-03 +2024-08-06 17:31:46,289 INFO [trainer.py:765] (5/8) Epoch 14, batch 1800, train_loss[loss=3.083, NarTop10Accuracy=0.711, over 7131.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.6886, over 5979.14 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 17:32:12,727 INFO [trainer.py:765] (5/8) Epoch 14, batch 1900, train_loss[loss=3.606, NarTop10Accuracy=0.6078, over 6318.00 frames. ], tot_loss[loss=3.209, NarTop10Accuracy=0.6848, over 6022.01 frames. ], batch size: 50, lr: 6.09e-03 +2024-08-06 17:32:38,282 INFO [trainer.py:765] (5/8) Epoch 14, batch 2000, train_loss[loss=3.215, NarTop10Accuracy=0.6886, over 6108.00 frames. ], tot_loss[loss=3.222, NarTop10Accuracy=0.6822, over 6017.07 frames. ], batch size: 50, lr: 6.08e-03 +2024-08-06 17:33:03,646 INFO [trainer.py:765] (5/8) Epoch 14, batch 2100, train_loss[loss=3.085, NarTop10Accuracy=0.7154, over 3957.00 frames. ], tot_loss[loss=3.226, NarTop10Accuracy=0.6813, over 6002.15 frames. ], batch size: 4, lr: 6.07e-03 +2024-08-06 17:33:28,998 INFO [trainer.py:765] (5/8) Epoch 14, batch 2200, train_loss[loss=3.324, NarTop10Accuracy=0.6641, over 7350.00 frames. ], tot_loss[loss=3.223, NarTop10Accuracy=0.6816, over 6029.72 frames. ], batch size: 31, lr: 6.06e-03 +2024-08-06 17:33:54,087 INFO [trainer.py:765] (5/8) Epoch 14, batch 2300, train_loss[loss=2.899, NarTop10Accuracy=0.7528, over 5733.00 frames. ], tot_loss[loss=3.24, NarTop10Accuracy=0.6783, over 6025.70 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 17:34:18,534 INFO [trainer.py:765] (5/8) Epoch 14, batch 2400, train_loss[loss=2.895, NarTop10Accuracy=0.7501, over 5097.00 frames. ], tot_loss[loss=3.237, NarTop10Accuracy=0.678, over 5771.52 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:42,116 INFO [trainer.py:765] (5/8) Epoch 14, batch 2500, train_loss[loss=3.002, NarTop10Accuracy=0.727, over 5070.00 frames. ], tot_loss[loss=3.203, NarTop10Accuracy=0.6844, over 5480.97 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:45,394 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 17:34:53,209 INFO [trainer.py:811] (5/8) Epoch 14, validation: loss=3.062, NarTop10Accuracy=0.7136, over 1905321.00 frames. +2024-08-06 17:34:53,209 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 17:34:53,679 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 1.975e+02 2.132e+02 2.304e+02 3.875e+02, threshold=4.265e+02, percent-clipped=0.0 +2024-08-06 17:35:09,737 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 17:36:11,738 INFO [trainer.py:765] (5/8) Epoch 15, batch 100, train_loss[loss=2.998, NarTop10Accuracy=0.7287, over 7224.00 frames. ], tot_loss[loss=3.212, NarTop10Accuracy=0.6838, over 2383.46 frames. ], batch size: 31, lr: 5.82e-03 +2024-08-06 17:36:44,334 INFO [trainer.py:765] (5/8) Epoch 15, batch 200, train_loss[loss=3.417, NarTop10Accuracy=0.64, over 6918.00 frames. ], tot_loss[loss=3.191, NarTop10Accuracy=0.6883, over 3854.51 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 17:37:17,714 INFO [trainer.py:765] (5/8) Epoch 15, batch 300, train_loss[loss=3.193, NarTop10Accuracy=0.6823, over 7053.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.6861, over 4640.03 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 17:37:48,903 INFO [trainer.py:765] (5/8) Epoch 15, batch 400, train_loss[loss=2.901, NarTop10Accuracy=0.7318, over 5121.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6876, over 5096.25 frames. ], batch size: 7, lr: 5.80e-03 +2024-08-06 17:38:22,354 INFO [trainer.py:765] (5/8) Epoch 15, batch 500, train_loss[loss=2.945, NarTop10Accuracy=0.7474, over 6060.00 frames. ], tot_loss[loss=3.191, NarTop10Accuracy=0.6881, over 5380.13 frames. ], batch size: 11, lr: 5.79e-03 +2024-08-06 17:38:53,093 INFO [trainer.py:765] (5/8) Epoch 15, batch 600, train_loss[loss=2.791, NarTop10Accuracy=0.7633, over 5694.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.6858, over 5623.56 frames. ], batch size: 9, lr: 5.78e-03 +2024-08-06 17:39:27,921 INFO [trainer.py:765] (5/8) Epoch 15, batch 700, train_loss[loss=2.836, NarTop10Accuracy=0.7611, over 5079.00 frames. ], tot_loss[loss=3.209, NarTop10Accuracy=0.6836, over 5724.95 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 17:40:05,564 INFO [trainer.py:765] (5/8) Epoch 15, batch 800, train_loss[loss=3.282, NarTop10Accuracy=0.6629, over 5019.00 frames. ], tot_loss[loss=3.235, NarTop10Accuracy=0.6783, over 5787.54 frames. ], batch size: 6, lr: 5.76e-03 +2024-08-06 17:40:35,790 INFO [trainer.py:765] (5/8) Epoch 15, batch 900, train_loss[loss=3.386, NarTop10Accuracy=0.6505, over 6450.00 frames. ], tot_loss[loss=3.211, NarTop10Accuracy=0.6836, over 5795.74 frames. ], batch size: 14, lr: 5.76e-03 +2024-08-06 17:41:11,250 INFO [trainer.py:765] (5/8) Epoch 15, batch 1000, train_loss[loss=3.281, NarTop10Accuracy=0.6641, over 6606.00 frames. ], tot_loss[loss=3.197, NarTop10Accuracy=0.6862, over 5915.19 frames. ], batch size: 14, lr: 5.75e-03 +2024-08-06 17:41:46,451 INFO [trainer.py:765] (5/8) Epoch 15, batch 1100, train_loss[loss=3.198, NarTop10Accuracy=0.6938, over 6729.00 frames. ], tot_loss[loss=3.198, NarTop10Accuracy=0.6864, over 5955.69 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 17:42:19,455 INFO [trainer.py:765] (5/8) Epoch 15, batch 1200, train_loss[loss=3.253, NarTop10Accuracy=0.6727, over 7116.00 frames. ], tot_loss[loss=3.221, NarTop10Accuracy=0.6816, over 5943.38 frames. ], batch size: 31, lr: 5.73e-03 +2024-08-06 17:42:54,427 INFO [trainer.py:765] (5/8) Epoch 15, batch 1300, train_loss[loss=2.939, NarTop10Accuracy=0.7323, over 5085.00 frames. ], tot_loss[loss=3.207, NarTop10Accuracy=0.6843, over 6007.06 frames. ], batch size: 6, lr: 5.73e-03 +2024-08-06 17:43:26,607 INFO [trainer.py:765] (5/8) Epoch 15, batch 1400, train_loss[loss=3.432, NarTop10Accuracy=0.6363, over 6159.00 frames. ], tot_loss[loss=3.217, NarTop10Accuracy=0.6821, over 6011.99 frames. ], batch size: 11, lr: 5.72e-03 +2024-08-06 17:43:56,558 INFO [trainer.py:765] (5/8) Epoch 15, batch 1500, train_loss[loss=3.186, NarTop10Accuracy=0.7, over 6012.00 frames. ], tot_loss[loss=3.219, NarTop10Accuracy=0.6818, over 5951.66 frames. ], batch size: 51, lr: 5.71e-03 +2024-08-06 17:44:24,241 INFO [trainer.py:765] (5/8) Epoch 15, batch 1600, train_loss[loss=3.52, NarTop10Accuracy=0.6172, over 7125.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.6859, over 5927.62 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 17:44:50,856 INFO [trainer.py:765] (5/8) Epoch 15, batch 1700, train_loss[loss=3.003, NarTop10Accuracy=0.719, over 6336.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.6875, over 5917.21 frames. ], batch size: 13, lr: 5.70e-03 +2024-08-06 17:45:17,293 INFO [trainer.py:765] (5/8) Epoch 15, batch 1800, train_loss[loss=3.23, NarTop10Accuracy=0.6896, over 6867.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6878, over 5970.85 frames. ], batch size: 22, lr: 5.69e-03 +2024-08-06 17:45:43,679 INFO [trainer.py:765] (5/8) Epoch 15, batch 1900, train_loss[loss=3.198, NarTop10Accuracy=0.6877, over 5865.00 frames. ], tot_loss[loss=3.218, NarTop10Accuracy=0.6822, over 6012.22 frames. ], batch size: 50, lr: 5.68e-03 +2024-08-06 17:45:53,540 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 17:46:01,742 INFO [trainer.py:811] (5/8) Epoch 15, validation: loss=3.006, NarTop10Accuracy=0.725, over 1905321.00 frames. +2024-08-06 17:46:01,743 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 17:46:02,217 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.004e+02 2.149e+02 2.324e+02 3.721e+02, threshold=4.298e+02, percent-clipped=0.0 +2024-08-06 17:46:17,371 INFO [trainer.py:765] (5/8) Epoch 15, batch 2000, train_loss[loss=3.222, NarTop10Accuracy=0.685, over 6678.00 frames. ], tot_loss[loss=3.21, NarTop10Accuracy=0.6839, over 5982.63 frames. ], batch size: 50, lr: 5.67e-03 +2024-08-06 17:46:42,773 INFO [trainer.py:765] (5/8) Epoch 15, batch 2100, train_loss[loss=3.167, NarTop10Accuracy=0.6941, over 4959.00 frames. ], tot_loss[loss=3.201, NarTop10Accuracy=0.6857, over 5985.38 frames. ], batch size: 5, lr: 5.67e-03 +2024-08-06 17:47:08,033 INFO [trainer.py:765] (5/8) Epoch 15, batch 2200, train_loss[loss=3.052, NarTop10Accuracy=0.7225, over 7215.00 frames. ], tot_loss[loss=3.209, NarTop10Accuracy=0.684, over 6016.28 frames. ], batch size: 31, lr: 5.66e-03 +2024-08-06 17:47:33,291 INFO [trainer.py:765] (5/8) Epoch 15, batch 2300, train_loss[loss=3.636, NarTop10Accuracy=0.5961, over 5769.00 frames. ], tot_loss[loss=3.212, NarTop10Accuracy=0.6833, over 6025.19 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 17:47:57,640 INFO [trainer.py:765] (5/8) Epoch 15, batch 2400, train_loss[loss=3.276, NarTop10Accuracy=0.6705, over 5208.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6881, over 5775.83 frames. ], batch size: 7, lr: 5.65e-03 +2024-08-06 17:48:21,161 INFO [trainer.py:765] (5/8) Epoch 15, batch 2500, train_loss[loss=2.807, NarTop10Accuracy=0.7649, over 5118.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6932, over 5468.24 frames. ], batch size: 7, lr: 5.64e-03 +2024-08-06 17:48:40,519 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 17:49:41,221 INFO [trainer.py:765] (5/8) Epoch 16, batch 100, train_loss[loss=3.566, NarTop10Accuracy=0.6104, over 7218.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6968, over 2355.67 frames. ], batch size: 31, lr: 5.45e-03 +2024-08-06 17:50:12,157 INFO [trainer.py:765] (5/8) Epoch 16, batch 200, train_loss[loss=2.881, NarTop10Accuracy=0.7536, over 6897.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6868, over 3850.36 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 17:50:45,159 INFO [trainer.py:765] (5/8) Epoch 16, batch 300, train_loss[loss=3.24, NarTop10Accuracy=0.6768, over 7014.00 frames. ], tot_loss[loss=3.185, NarTop10Accuracy=0.6886, over 4669.84 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 17:51:15,976 INFO [trainer.py:765] (5/8) Epoch 16, batch 400, train_loss[loss=3.517, NarTop10Accuracy=0.6262, over 5088.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6881, over 5109.76 frames. ], batch size: 7, lr: 5.43e-03 +2024-08-06 17:51:50,323 INFO [trainer.py:765] (5/8) Epoch 16, batch 500, train_loss[loss=3.001, NarTop10Accuracy=0.727, over 6123.00 frames. ], tot_loss[loss=3.181, NarTop10Accuracy=0.6898, over 5386.13 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 17:52:24,251 INFO [trainer.py:765] (5/8) Epoch 16, batch 600, train_loss[loss=2.972, NarTop10Accuracy=0.7446, over 5649.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6877, over 5653.04 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 17:52:55,387 INFO [trainer.py:765] (5/8) Epoch 16, batch 700, train_loss[loss=2.915, NarTop10Accuracy=0.7454, over 4995.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.6886, over 5723.51 frames. ], batch size: 6, lr: 5.41e-03 +2024-08-06 17:53:33,815 INFO [trainer.py:765] (5/8) Epoch 16, batch 800, train_loss[loss=3.23, NarTop10Accuracy=0.6758, over 5055.00 frames. ], tot_loss[loss=3.184, NarTop10Accuracy=0.6896, over 5772.34 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 17:54:03,923 INFO [trainer.py:765] (5/8) Epoch 16, batch 900, train_loss[loss=3.428, NarTop10Accuracy=0.643, over 6159.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6928, over 5804.79 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 17:54:37,607 INFO [trainer.py:765] (5/8) Epoch 16, batch 1000, train_loss[loss=2.915, NarTop10Accuracy=0.7473, over 6201.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6945, over 5907.87 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 17:55:17,197 INFO [trainer.py:765] (5/8) Epoch 16, batch 1100, train_loss[loss=3.11, NarTop10Accuracy=0.7066, over 6801.00 frames. ], tot_loss[loss=3.191, NarTop10Accuracy=0.6876, over 5942.01 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 17:55:46,209 INFO [trainer.py:765] (5/8) Epoch 16, batch 1200, train_loss[loss=3.387, NarTop10Accuracy=0.6525, over 7089.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6875, over 5931.35 frames. ], batch size: 31, lr: 5.37e-03 +2024-08-06 17:56:22,775 INFO [trainer.py:765] (5/8) Epoch 16, batch 1300, train_loss[loss=3.444, NarTop10Accuracy=0.6284, over 4953.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.6879, over 5998.17 frames. ], batch size: 6, lr: 5.37e-03 +2024-08-06 17:56:44,648 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 17:56:53,428 INFO [trainer.py:811] (5/8) Epoch 16, validation: loss=3.112, NarTop10Accuracy=0.703, over 1905321.00 frames. +2024-08-06 17:56:53,429 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 17:56:54,007 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 1.974e+02 2.136e+02 2.310e+02 5.351e+02, threshold=4.271e+02, percent-clipped=0.2 +2024-08-06 17:57:06,170 INFO [trainer.py:765] (5/8) Epoch 16, batch 1400, train_loss[loss=3.227, NarTop10Accuracy=0.6894, over 6108.00 frames. ], tot_loss[loss=3.187, NarTop10Accuracy=0.6883, over 6035.83 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 17:57:34,032 INFO [trainer.py:765] (5/8) Epoch 16, batch 1500, train_loss[loss=3.298, NarTop10Accuracy=0.671, over 5892.00 frames. ], tot_loss[loss=3.181, NarTop10Accuracy=0.6895, over 5967.43 frames. ], batch size: 50, lr: 5.35e-03 +2024-08-06 17:58:01,773 INFO [trainer.py:765] (5/8) Epoch 16, batch 1600, train_loss[loss=3.005, NarTop10Accuracy=0.7317, over 7002.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6913, over 5949.91 frames. ], batch size: 22, lr: 5.35e-03 +2024-08-06 17:58:28,474 INFO [trainer.py:765] (5/8) Epoch 16, batch 1700, train_loss[loss=2.957, NarTop10Accuracy=0.7327, over 6174.00 frames. ], tot_loss[loss=3.191, NarTop10Accuracy=0.6878, over 5944.87 frames. ], batch size: 13, lr: 5.34e-03 +2024-08-06 17:58:54,975 INFO [trainer.py:765] (5/8) Epoch 16, batch 1800, train_loss[loss=3.18, NarTop10Accuracy=0.6932, over 7389.00 frames. ], tot_loss[loss=3.182, NarTop10Accuracy=0.6894, over 5998.83 frames. ], batch size: 22, lr: 5.33e-03 +2024-08-06 17:59:21,360 INFO [trainer.py:765] (5/8) Epoch 16, batch 1900, train_loss[loss=3.528, NarTop10Accuracy=0.6238, over 6669.00 frames. ], tot_loss[loss=3.211, NarTop10Accuracy=0.684, over 6039.97 frames. ], batch size: 50, lr: 5.33e-03 +2024-08-06 17:59:46,856 INFO [trainer.py:765] (5/8) Epoch 16, batch 2000, train_loss[loss=3.155, NarTop10Accuracy=0.7029, over 6087.00 frames. ], tot_loss[loss=3.176, NarTop10Accuracy=0.6911, over 6013.82 frames. ], batch size: 52, lr: 5.32e-03 +2024-08-06 18:00:12,116 INFO [trainer.py:765] (5/8) Epoch 16, batch 2100, train_loss[loss=3.487, NarTop10Accuracy=0.6218, over 4821.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.6863, over 5989.94 frames. ], batch size: 5, lr: 5.32e-03 +2024-08-06 18:00:37,333 INFO [trainer.py:765] (5/8) Epoch 16, batch 2200, train_loss[loss=3.261, NarTop10Accuracy=0.6718, over 7164.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.6841, over 6040.88 frames. ], batch size: 31, lr: 5.31e-03 +2024-08-06 18:01:02,502 INFO [trainer.py:765] (5/8) Epoch 16, batch 2300, train_loss[loss=3.052, NarTop10Accuracy=0.7263, over 5640.00 frames. ], tot_loss[loss=3.21, NarTop10Accuracy=0.6837, over 6023.99 frames. ], batch size: 9, lr: 5.30e-03 +2024-08-06 18:01:26,883 INFO [trainer.py:765] (5/8) Epoch 16, batch 2400, train_loss[loss=3.033, NarTop10Accuracy=0.7156, over 5031.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.6871, over 5788.79 frames. ], batch size: 7, lr: 5.30e-03 +2024-08-06 18:01:50,405 INFO [trainer.py:765] (5/8) Epoch 16, batch 2500, train_loss[loss=3.003, NarTop10Accuracy=0.7268, over 5034.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6939, over 5488.96 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 18:02:10,712 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 18:03:08,531 INFO [trainer.py:765] (5/8) Epoch 17, batch 100, train_loss[loss=3.205, NarTop10Accuracy=0.6938, over 7422.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.6992, over 2371.02 frames. ], batch size: 32, lr: 5.12e-03 +2024-08-06 18:03:45,145 INFO [trainer.py:765] (5/8) Epoch 17, batch 200, train_loss[loss=3.379, NarTop10Accuracy=0.6485, over 6846.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6958, over 3863.13 frames. ], batch size: 17, lr: 5.12e-03 +2024-08-06 18:04:19,590 INFO [trainer.py:765] (5/8) Epoch 17, batch 300, train_loss[loss=3.251, NarTop10Accuracy=0.6739, over 7197.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6913, over 4653.65 frames. ], batch size: 22, lr: 5.11e-03 +2024-08-06 18:04:48,402 INFO [trainer.py:765] (5/8) Epoch 17, batch 400, train_loss[loss=3.386, NarTop10Accuracy=0.6426, over 5190.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6922, over 5086.20 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 18:05:24,680 INFO [trainer.py:765] (5/8) Epoch 17, batch 500, train_loss[loss=2.83, NarTop10Accuracy=0.7614, over 6201.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6957, over 5381.31 frames. ], batch size: 11, lr: 5.10e-03 +2024-08-06 18:05:58,739 INFO [trainer.py:765] (5/8) Epoch 17, batch 600, train_loss[loss=3.21, NarTop10Accuracy=0.6922, over 5739.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6923, over 5646.11 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 18:06:32,475 INFO [trainer.py:765] (5/8) Epoch 17, batch 700, train_loss[loss=3.021, NarTop10Accuracy=0.7209, over 4947.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.693, over 5728.94 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 18:07:02,724 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 18:07:10,763 INFO [trainer.py:811] (5/8) Epoch 17, validation: loss=3.018, NarTop10Accuracy=0.7223, over 1905321.00 frames. +2024-08-06 18:07:10,763 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 18:07:11,312 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.005e+02 2.161e+02 2.341e+02 3.806e+02, threshold=4.323e+02, percent-clipped=0.0 +2024-08-06 18:07:14,353 INFO [trainer.py:765] (5/8) Epoch 17, batch 800, train_loss[loss=3.085, NarTop10Accuracy=0.7059, over 4182.00 frames. ], tot_loss[loss=3.182, NarTop10Accuracy=0.6896, over 5774.46 frames. ], batch size: 5, lr: 5.08e-03 +2024-08-06 18:07:49,721 INFO [trainer.py:765] (5/8) Epoch 17, batch 900, train_loss[loss=3.572, NarTop10Accuracy=0.6266, over 6192.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6949, over 5786.31 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 18:08:21,597 INFO [trainer.py:765] (5/8) Epoch 17, batch 1000, train_loss[loss=3.255, NarTop10Accuracy=0.6767, over 6225.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6926, over 5889.42 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 18:09:03,106 INFO [trainer.py:765] (5/8) Epoch 17, batch 1100, train_loss[loss=2.979, NarTop10Accuracy=0.7327, over 6816.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6915, over 5918.83 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 18:09:36,745 INFO [trainer.py:765] (5/8) Epoch 17, batch 1200, train_loss[loss=3.005, NarTop10Accuracy=0.7289, over 7128.00 frames. ], tot_loss[loss=3.167, NarTop10Accuracy=0.6923, over 5917.41 frames. ], batch size: 31, lr: 5.06e-03 +2024-08-06 18:10:10,688 INFO [trainer.py:765] (5/8) Epoch 17, batch 1300, train_loss[loss=3.283, NarTop10Accuracy=0.6625, over 4923.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6916, over 6005.71 frames. ], batch size: 6, lr: 5.05e-03 +2024-08-06 18:10:48,026 INFO [trainer.py:765] (5/8) Epoch 17, batch 1400, train_loss[loss=3.249, NarTop10Accuracy=0.6749, over 6249.00 frames. ], tot_loss[loss=3.178, NarTop10Accuracy=0.6902, over 6018.59 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 18:11:19,105 INFO [trainer.py:765] (5/8) Epoch 17, batch 1500, train_loss[loss=3.529, NarTop10Accuracy=0.6219, over 6357.00 frames. ], tot_loss[loss=3.17, NarTop10Accuracy=0.6919, over 5966.28 frames. ], batch size: 50, lr: 5.04e-03 +2024-08-06 18:11:46,855 INFO [trainer.py:765] (5/8) Epoch 17, batch 1600, train_loss[loss=3.103, NarTop10Accuracy=0.7064, over 7011.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6952, over 5954.67 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 18:12:13,508 INFO [trainer.py:765] (5/8) Epoch 17, batch 1700, train_loss[loss=3.458, NarTop10Accuracy=0.6229, over 6693.00 frames. ], tot_loss[loss=3.175, NarTop10Accuracy=0.6908, over 5944.20 frames. ], batch size: 14, lr: 5.03e-03 +2024-08-06 18:12:40,001 INFO [trainer.py:765] (5/8) Epoch 17, batch 1800, train_loss[loss=2.905, NarTop10Accuracy=0.7458, over 7215.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6878, over 6003.08 frames. ], batch size: 23, lr: 5.02e-03 +2024-08-06 18:13:06,380 INFO [trainer.py:765] (5/8) Epoch 17, batch 1900, train_loss[loss=3.172, NarTop10Accuracy=0.6919, over 5604.00 frames. ], tot_loss[loss=3.2, NarTop10Accuracy=0.6856, over 6027.97 frames. ], batch size: 50, lr: 5.01e-03 +2024-08-06 18:13:31,922 INFO [trainer.py:765] (5/8) Epoch 17, batch 2000, train_loss[loss=3.576, NarTop10Accuracy=0.6085, over 6141.00 frames. ], tot_loss[loss=3.177, NarTop10Accuracy=0.6905, over 6015.42 frames. ], batch size: 50, lr: 5.01e-03 +2024-08-06 18:13:57,228 INFO [trainer.py:765] (5/8) Epoch 17, batch 2100, train_loss[loss=2.886, NarTop10Accuracy=0.7362, over 3891.00 frames. ], tot_loss[loss=3.185, NarTop10Accuracy=0.6886, over 5989.62 frames. ], batch size: 4, lr: 5.00e-03 +2024-08-06 18:14:22,434 INFO [trainer.py:765] (5/8) Epoch 17, batch 2200, train_loss[loss=2.919, NarTop10Accuracy=0.7396, over 6996.00 frames. ], tot_loss[loss=3.202, NarTop10Accuracy=0.6852, over 6016.41 frames. ], batch size: 31, lr: 5.00e-03 +2024-08-06 18:14:47,592 INFO [trainer.py:765] (5/8) Epoch 17, batch 2300, train_loss[loss=2.936, NarTop10Accuracy=0.7453, over 5796.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.6873, over 6041.09 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 18:15:12,061 INFO [trainer.py:765] (5/8) Epoch 17, batch 2400, train_loss[loss=2.845, NarTop10Accuracy=0.7519, over 5106.00 frames. ], tot_loss[loss=3.187, NarTop10Accuracy=0.688, over 5779.36 frames. ], batch size: 7, lr: 4.99e-03 +2024-08-06 18:15:35,514 INFO [trainer.py:765] (5/8) Epoch 17, batch 2500, train_loss[loss=3.009, NarTop10Accuracy=0.7314, over 5145.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6921, over 5483.04 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 18:15:55,169 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 18:16:49,908 INFO [trainer.py:765] (5/8) Epoch 18, batch 100, train_loss[loss=3.124, NarTop10Accuracy=0.7041, over 7146.00 frames. ], tot_loss[loss=3.176, NarTop10Accuracy=0.6911, over 2358.19 frames. ], batch size: 31, lr: 4.83e-03 +2024-08-06 18:17:24,749 INFO [trainer.py:765] (5/8) Epoch 18, batch 200, train_loss[loss=2.978, NarTop10Accuracy=0.7329, over 6786.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6929, over 3844.79 frames. ], batch size: 17, lr: 4.83e-03 +2024-08-06 18:17:27,717 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 18:17:35,926 INFO [trainer.py:811] (5/8) Epoch 18, validation: loss=3.062, NarTop10Accuracy=0.7137, over 1905321.00 frames. +2024-08-06 18:17:35,927 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 18:17:36,529 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.024e+02 2.164e+02 2.334e+02 7.024e+02, threshold=4.329e+02, percent-clipped=0.1 +2024-08-06 18:18:06,912 INFO [trainer.py:765] (5/8) Epoch 18, batch 300, train_loss[loss=3.404, NarTop10Accuracy=0.6376, over 7191.00 frames. ], tot_loss[loss=3.17, NarTop10Accuracy=0.6922, over 4654.36 frames. ], batch size: 22, lr: 4.82e-03 +2024-08-06 18:18:38,183 INFO [trainer.py:765] (5/8) Epoch 18, batch 400, train_loss[loss=3.181, NarTop10Accuracy=0.6779, over 5148.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6957, over 5115.70 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 18:19:13,600 INFO [trainer.py:765] (5/8) Epoch 18, batch 500, train_loss[loss=3.05, NarTop10Accuracy=0.7096, over 6063.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6959, over 5373.31 frames. ], batch size: 11, lr: 4.81e-03 +2024-08-06 18:19:48,151 INFO [trainer.py:765] (5/8) Epoch 18, batch 600, train_loss[loss=3.384, NarTop10Accuracy=0.6399, over 5763.00 frames. ], tot_loss[loss=3.156, NarTop10Accuracy=0.695, over 5645.50 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 18:20:23,870 INFO [trainer.py:765] (5/8) Epoch 18, batch 700, train_loss[loss=3.35, NarTop10Accuracy=0.6514, over 5019.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6935, over 5734.30 frames. ], batch size: 6, lr: 4.80e-03 +2024-08-06 18:21:01,027 INFO [trainer.py:765] (5/8) Epoch 18, batch 800, train_loss[loss=2.845, NarTop10Accuracy=0.7547, over 5052.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.6913, over 5791.95 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 18:21:32,409 INFO [trainer.py:765] (5/8) Epoch 18, batch 900, train_loss[loss=3.081, NarTop10Accuracy=0.7108, over 6627.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6948, over 5799.60 frames. ], batch size: 14, lr: 4.79e-03 +2024-08-06 18:22:11,192 INFO [trainer.py:765] (5/8) Epoch 18, batch 1000, train_loss[loss=2.841, NarTop10Accuracy=0.7504, over 6264.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6922, over 5904.56 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 18:22:46,970 INFO [trainer.py:765] (5/8) Epoch 18, batch 1100, train_loss[loss=3.561, NarTop10Accuracy=0.6142, over 6957.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6933, over 5934.21 frames. ], batch size: 17, lr: 4.78e-03 +2024-08-06 18:23:18,605 INFO [trainer.py:765] (5/8) Epoch 18, batch 1200, train_loss[loss=3.598, NarTop10Accuracy=0.6108, over 7113.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6905, over 5939.56 frames. ], batch size: 31, lr: 4.77e-03 +2024-08-06 18:24:00,100 INFO [trainer.py:765] (5/8) Epoch 18, batch 1300, train_loss[loss=2.734, NarTop10Accuracy=0.7793, over 5151.00 frames. ], tot_loss[loss=3.156, NarTop10Accuracy=0.6943, over 6003.40 frames. ], batch size: 6, lr: 4.77e-03 +2024-08-06 18:24:29,575 INFO [trainer.py:765] (5/8) Epoch 18, batch 1400, train_loss[loss=3.041, NarTop10Accuracy=0.7251, over 6075.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6947, over 6006.24 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 18:25:00,307 INFO [trainer.py:765] (5/8) Epoch 18, batch 1500, train_loss[loss=3.072, NarTop10Accuracy=0.7108, over 6495.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6953, over 5957.13 frames. ], batch size: 50, lr: 4.76e-03 +2024-08-06 18:25:28,085 INFO [trainer.py:765] (5/8) Epoch 18, batch 1600, train_loss[loss=3.078, NarTop10Accuracy=0.7083, over 6927.00 frames. ], tot_loss[loss=3.162, NarTop10Accuracy=0.6934, over 5933.49 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 18:25:54,688 INFO [trainer.py:765] (5/8) Epoch 18, batch 1700, train_loss[loss=3.138, NarTop10Accuracy=0.7052, over 6633.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6937, over 5927.87 frames. ], batch size: 14, lr: 4.75e-03 +2024-08-06 18:26:21,197 INFO [trainer.py:765] (5/8) Epoch 18, batch 1800, train_loss[loss=3.424, NarTop10Accuracy=0.6375, over 7212.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6955, over 5983.51 frames. ], batch size: 22, lr: 4.74e-03 +2024-08-06 18:26:47,567 INFO [trainer.py:765] (5/8) Epoch 18, batch 1900, train_loss[loss=3.101, NarTop10Accuracy=0.7078, over 5883.00 frames. ], tot_loss[loss=3.167, NarTop10Accuracy=0.6923, over 6018.39 frames. ], batch size: 50, lr: 4.74e-03 +2024-08-06 18:27:13,176 INFO [trainer.py:765] (5/8) Epoch 18, batch 2000, train_loss[loss=3.065, NarTop10Accuracy=0.7232, over 6216.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6947, over 5996.85 frames. ], batch size: 50, lr: 4.73e-03 +2024-08-06 18:27:38,529 INFO [trainer.py:765] (5/8) Epoch 18, batch 2100, train_loss[loss=3.272, NarTop10Accuracy=0.6699, over 3975.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6955, over 5982.95 frames. ], batch size: 4, lr: 4.73e-03 +2024-08-06 18:28:03,812 INFO [trainer.py:765] (5/8) Epoch 18, batch 2200, train_loss[loss=3.123, NarTop10Accuracy=0.7066, over 7305.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.694, over 6029.89 frames. ], batch size: 31, lr: 4.72e-03 +2024-08-06 18:28:06,571 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 18:28:14,649 INFO [trainer.py:811] (5/8) Epoch 18, validation: loss=3.028, NarTop10Accuracy=0.7201, over 1905321.00 frames. +2024-08-06 18:28:14,650 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 18:28:15,147 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.054e+02 2.220e+02 2.384e+02 3.992e+02, threshold=4.441e+02, percent-clipped=0.0 +2024-08-06 18:28:37,096 INFO [trainer.py:765] (5/8) Epoch 18, batch 2300, train_loss[loss=2.824, NarTop10Accuracy=0.7696, over 5631.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6913, over 6030.20 frames. ], batch size: 9, lr: 4.72e-03 +2024-08-06 18:29:01,593 INFO [trainer.py:765] (5/8) Epoch 18, batch 2400, train_loss[loss=2.963, NarTop10Accuracy=0.7225, over 5124.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6962, over 5782.76 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 18:29:25,027 INFO [trainer.py:765] (5/8) Epoch 18, batch 2500, train_loss[loss=3.133, NarTop10Accuracy=0.704, over 5193.00 frames. ], tot_loss[loss=3.136, NarTop10Accuracy=0.699, over 5495.23 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 18:29:45,034 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 18:30:41,231 INFO [trainer.py:765] (5/8) Epoch 19, batch 100, train_loss[loss=2.955, NarTop10Accuracy=0.7307, over 7182.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.6929, over 2360.40 frames. ], batch size: 31, lr: 4.57e-03 +2024-08-06 18:31:15,602 INFO [trainer.py:765] (5/8) Epoch 19, batch 200, train_loss[loss=3.018, NarTop10Accuracy=0.7313, over 6837.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6946, over 3850.22 frames. ], batch size: 17, lr: 4.57e-03 +2024-08-06 18:31:47,468 INFO [trainer.py:765] (5/8) Epoch 19, batch 300, train_loss[loss=3.531, NarTop10Accuracy=0.6169, over 7275.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.6989, over 4657.45 frames. ], batch size: 23, lr: 4.56e-03 +2024-08-06 18:32:20,355 INFO [trainer.py:765] (5/8) Epoch 19, batch 400, train_loss[loss=3.11, NarTop10Accuracy=0.6924, over 5166.00 frames. ], tot_loss[loss=3.136, NarTop10Accuracy=0.6985, over 5132.98 frames. ], batch size: 7, lr: 4.56e-03 +2024-08-06 18:32:50,335 INFO [trainer.py:765] (5/8) Epoch 19, batch 500, train_loss[loss=3.145, NarTop10Accuracy=0.697, over 5937.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.699, over 5411.33 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 18:33:29,610 INFO [trainer.py:765] (5/8) Epoch 19, batch 600, train_loss[loss=3.059, NarTop10Accuracy=0.7173, over 5583.00 frames. ], tot_loss[loss=3.14, NarTop10Accuracy=0.6979, over 5669.53 frames. ], batch size: 9, lr: 4.55e-03 +2024-08-06 18:34:03,592 INFO [trainer.py:765] (5/8) Epoch 19, batch 700, train_loss[loss=2.976, NarTop10Accuracy=0.7288, over 5052.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6962, over 5724.52 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 18:34:35,179 INFO [trainer.py:765] (5/8) Epoch 19, batch 800, train_loss[loss=3.14, NarTop10Accuracy=0.6948, over 5184.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6949, over 5780.43 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 18:35:10,263 INFO [trainer.py:765] (5/8) Epoch 19, batch 900, train_loss[loss=2.88, NarTop10Accuracy=0.7471, over 6258.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6974, over 5791.02 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 18:35:48,637 INFO [trainer.py:765] (5/8) Epoch 19, batch 1000, train_loss[loss=3.432, NarTop10Accuracy=0.6362, over 6201.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6965, over 5892.65 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 18:36:20,938 INFO [trainer.py:765] (5/8) Epoch 19, batch 1100, train_loss[loss=2.952, NarTop10Accuracy=0.7314, over 6942.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6934, over 5922.89 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 18:36:57,130 INFO [trainer.py:765] (5/8) Epoch 19, batch 1200, train_loss[loss=3.036, NarTop10Accuracy=0.7215, over 7008.00 frames. ], tot_loss[loss=3.167, NarTop10Accuracy=0.6917, over 5928.18 frames. ], batch size: 31, lr: 4.52e-03 +2024-08-06 18:37:35,315 INFO [trainer.py:765] (5/8) Epoch 19, batch 1300, train_loss[loss=2.852, NarTop10Accuracy=0.7539, over 5097.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6924, over 5999.48 frames. ], batch size: 6, lr: 4.51e-03 +2024-08-06 18:38:04,679 INFO [trainer.py:765] (5/8) Epoch 19, batch 1400, train_loss[loss=2.869, NarTop10Accuracy=0.7537, over 6219.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6913, over 6026.80 frames. ], batch size: 11, lr: 4.51e-03 +2024-08-06 18:38:34,550 INFO [trainer.py:765] (5/8) Epoch 19, batch 1500, train_loss[loss=3.495, NarTop10Accuracy=0.6226, over 6057.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6956, over 5952.16 frames. ], batch size: 52, lr: 4.50e-03 +2024-08-06 18:39:02,311 INFO [trainer.py:765] (5/8) Epoch 19, batch 1600, train_loss[loss=3.429, NarTop10Accuracy=0.6424, over 7050.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6967, over 5937.68 frames. ], batch size: 22, lr: 4.50e-03 +2024-08-06 18:39:11,590 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 18:39:19,795 INFO [trainer.py:811] (5/8) Epoch 19, validation: loss=2.958, NarTop10Accuracy=0.7345, over 1905321.00 frames. +2024-08-06 18:39:19,796 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 18:39:20,378 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.040e+02 2.194e+02 2.364e+02 6.410e+02, threshold=4.387e+02, percent-clipped=0.2 +2024-08-06 18:39:37,191 INFO [trainer.py:765] (5/8) Epoch 19, batch 1700, train_loss[loss=3.454, NarTop10Accuracy=0.6246, over 6204.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.6973, over 5923.49 frames. ], batch size: 13, lr: 4.49e-03 +2024-08-06 18:40:03,789 INFO [trainer.py:765] (5/8) Epoch 19, batch 1800, train_loss[loss=3.56, NarTop10Accuracy=0.6122, over 6921.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.696, over 5982.05 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 18:40:30,217 INFO [trainer.py:765] (5/8) Epoch 19, batch 1900, train_loss[loss=3.093, NarTop10Accuracy=0.7146, over 6171.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6956, over 6003.07 frames. ], batch size: 50, lr: 4.49e-03 +2024-08-06 18:40:55,793 INFO [trainer.py:765] (5/8) Epoch 19, batch 2000, train_loss[loss=3.336, NarTop10Accuracy=0.6655, over 5964.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6956, over 5999.38 frames. ], batch size: 51, lr: 4.48e-03 +2024-08-06 18:41:21,183 INFO [trainer.py:765] (5/8) Epoch 19, batch 2100, train_loss[loss=2.775, NarTop10Accuracy=0.7707, over 4875.00 frames. ], tot_loss[loss=3.14, NarTop10Accuracy=0.6977, over 5964.06 frames. ], batch size: 5, lr: 4.48e-03 +2024-08-06 18:41:46,455 INFO [trainer.py:765] (5/8) Epoch 19, batch 2200, train_loss[loss=3.185, NarTop10Accuracy=0.6886, over 7344.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6956, over 6005.09 frames. ], batch size: 31, lr: 4.47e-03 +2024-08-06 18:42:11,559 INFO [trainer.py:765] (5/8) Epoch 19, batch 2300, train_loss[loss=3.158, NarTop10Accuracy=0.691, over 5679.00 frames. ], tot_loss[loss=3.167, NarTop10Accuracy=0.6922, over 6027.71 frames. ], batch size: 9, lr: 4.47e-03 +2024-08-06 18:42:35,988 INFO [trainer.py:765] (5/8) Epoch 19, batch 2400, train_loss[loss=3.1, NarTop10Accuracy=0.701, over 5448.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6971, over 5768.71 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:42:59,690 INFO [trainer.py:765] (5/8) Epoch 19, batch 2500, train_loss[loss=2.894, NarTop10Accuracy=0.7448, over 5247.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7019, over 5466.13 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:43:19,838 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 18:44:22,973 INFO [trainer.py:765] (5/8) Epoch 20, batch 100, train_loss[loss=3.355, NarTop10Accuracy=0.6647, over 7068.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6936, over 2358.79 frames. ], batch size: 31, lr: 4.34e-03 +2024-08-06 18:44:58,379 INFO [trainer.py:765] (5/8) Epoch 20, batch 200, train_loss[loss=3.537, NarTop10Accuracy=0.6191, over 6729.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.7002, over 3863.86 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 18:45:32,279 INFO [trainer.py:765] (5/8) Epoch 20, batch 300, train_loss[loss=3.425, NarTop10Accuracy=0.6362, over 7029.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7031, over 4660.73 frames. ], batch size: 22, lr: 4.33e-03 +2024-08-06 18:46:05,128 INFO [trainer.py:765] (5/8) Epoch 20, batch 400, train_loss[loss=2.83, NarTop10Accuracy=0.7654, over 5322.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7012, over 5127.74 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 18:46:35,770 INFO [trainer.py:765] (5/8) Epoch 20, batch 500, train_loss[loss=2.836, NarTop10Accuracy=0.7612, over 6114.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.6996, over 5393.26 frames. ], batch size: 11, lr: 4.32e-03 +2024-08-06 18:47:13,255 INFO [trainer.py:765] (5/8) Epoch 20, batch 600, train_loss[loss=3.01, NarTop10Accuracy=0.732, over 5847.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7008, over 5643.64 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 18:47:44,481 INFO [trainer.py:765] (5/8) Epoch 20, batch 700, train_loss[loss=2.729, NarTop10Accuracy=0.7826, over 4251.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7024, over 5710.76 frames. ], batch size: 5, lr: 4.31e-03 +2024-08-06 18:48:21,016 INFO [trainer.py:765] (5/8) Epoch 20, batch 800, train_loss[loss=2.767, NarTop10Accuracy=0.7743, over 5121.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.6997, over 5780.72 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 18:48:56,534 INFO [trainer.py:765] (5/8) Epoch 20, batch 900, train_loss[loss=2.836, NarTop10Accuracy=0.7576, over 6618.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7009, over 5785.66 frames. ], batch size: 14, lr: 4.30e-03 +2024-08-06 18:49:29,805 INFO [trainer.py:765] (5/8) Epoch 20, batch 1000, train_loss[loss=3.309, NarTop10Accuracy=0.6722, over 6162.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6954, over 5899.99 frames. ], batch size: 13, lr: 4.30e-03 +2024-08-06 18:49:52,237 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 18:50:00,327 INFO [trainer.py:811] (5/8) Epoch 20, validation: loss=2.962, NarTop10Accuracy=0.7336, over 1905321.00 frames. +2024-08-06 18:50:00,327 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 18:50:00,875 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.061e+02 2.223e+02 2.401e+02 3.871e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 18:50:15,426 INFO [trainer.py:765] (5/8) Epoch 20, batch 1100, train_loss[loss=3.314, NarTop10Accuracy=0.659, over 6843.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6961, over 5925.10 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 18:50:53,776 INFO [trainer.py:765] (5/8) Epoch 20, batch 1200, train_loss[loss=2.9, NarTop10Accuracy=0.7498, over 7431.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6966, over 5930.66 frames. ], batch size: 31, lr: 4.29e-03 +2024-08-06 18:51:25,129 INFO [trainer.py:765] (5/8) Epoch 20, batch 1300, train_loss[loss=3.335, NarTop10Accuracy=0.657, over 4989.00 frames. ], tot_loss[loss=3.14, NarTop10Accuracy=0.6979, over 5985.72 frames. ], batch size: 6, lr: 4.29e-03 +2024-08-06 18:51:59,314 INFO [trainer.py:765] (5/8) Epoch 20, batch 1400, train_loss[loss=3.182, NarTop10Accuracy=0.6932, over 5997.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6992, over 6003.57 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 18:52:32,805 INFO [trainer.py:765] (5/8) Epoch 20, batch 1500, train_loss[loss=3.277, NarTop10Accuracy=0.667, over 5994.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6965, over 5958.91 frames. ], batch size: 50, lr: 4.28e-03 +2024-08-06 18:53:00,635 INFO [trainer.py:765] (5/8) Epoch 20, batch 1600, train_loss[loss=2.922, NarTop10Accuracy=0.7457, over 7446.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6952, over 5944.74 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 18:53:27,328 INFO [trainer.py:765] (5/8) Epoch 20, batch 1700, train_loss[loss=3.394, NarTop10Accuracy=0.6387, over 6675.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6963, over 5923.27 frames. ], batch size: 14, lr: 4.27e-03 +2024-08-06 18:53:53,850 INFO [trainer.py:765] (5/8) Epoch 20, batch 1800, train_loss[loss=3.02, NarTop10Accuracy=0.7161, over 7203.00 frames. ], tot_loss[loss=3.138, NarTop10Accuracy=0.6982, over 5984.69 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 18:54:20,316 INFO [trainer.py:765] (5/8) Epoch 20, batch 1900, train_loss[loss=3.032, NarTop10Accuracy=0.7258, over 6093.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6937, over 6041.15 frames. ], batch size: 51, lr: 4.26e-03 +2024-08-06 18:54:45,890 INFO [trainer.py:765] (5/8) Epoch 20, batch 2000, train_loss[loss=3.696, NarTop10Accuracy=0.5725, over 6396.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6933, over 6023.23 frames. ], batch size: 52, lr: 4.26e-03 +2024-08-06 18:55:11,182 INFO [trainer.py:765] (5/8) Epoch 20, batch 2100, train_loss[loss=3.456, NarTop10Accuracy=0.6332, over 3981.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6941, over 5983.54 frames. ], batch size: 4, lr: 4.25e-03 +2024-08-06 18:55:36,414 INFO [trainer.py:765] (5/8) Epoch 20, batch 2200, train_loss[loss=3.036, NarTop10Accuracy=0.7253, over 7164.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6942, over 6009.81 frames. ], batch size: 31, lr: 4.25e-03 +2024-08-06 18:56:01,635 INFO [trainer.py:765] (5/8) Epoch 20, batch 2300, train_loss[loss=3.14, NarTop10Accuracy=0.6909, over 5547.00 frames. ], tot_loss[loss=3.162, NarTop10Accuracy=0.6931, over 6014.19 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 18:56:26,050 INFO [trainer.py:765] (5/8) Epoch 20, batch 2400, train_loss[loss=2.942, NarTop10Accuracy=0.7359, over 5100.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6955, over 5777.06 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:56:49,566 INFO [trainer.py:765] (5/8) Epoch 20, batch 2500, train_loss[loss=2.863, NarTop10Accuracy=0.7592, over 5154.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7017, over 5461.79 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:57:09,595 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 18:58:09,585 INFO [trainer.py:765] (5/8) Epoch 21, batch 100, train_loss[loss=3.284, NarTop10Accuracy=0.6667, over 7146.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.703, over 2354.06 frames. ], batch size: 31, lr: 4.13e-03 +2024-08-06 18:58:40,417 INFO [trainer.py:765] (5/8) Epoch 21, batch 200, train_loss[loss=2.837, NarTop10Accuracy=0.7688, over 6774.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7012, over 3848.83 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 18:59:13,333 INFO [trainer.py:765] (5/8) Epoch 21, batch 300, train_loss[loss=2.917, NarTop10Accuracy=0.7383, over 7209.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.699, over 4647.66 frames. ], batch size: 22, lr: 4.12e-03 +2024-08-06 18:59:48,151 INFO [trainer.py:765] (5/8) Epoch 21, batch 400, train_loss[loss=2.885, NarTop10Accuracy=0.7535, over 5097.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7027, over 5105.75 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 19:00:16,840 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 19:00:25,076 INFO [trainer.py:811] (5/8) Epoch 21, validation: loss=2.992, NarTop10Accuracy=0.7268, over 1905321.00 frames. +2024-08-06 19:00:25,077 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 19:00:25,623 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.071e+02 2.224e+02 2.387e+02 3.839e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 19:00:29,891 INFO [trainer.py:765] (5/8) Epoch 21, batch 500, train_loss[loss=2.877, NarTop10Accuracy=0.7523, over 6123.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.7021, over 5375.50 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 19:01:03,329 INFO [trainer.py:765] (5/8) Epoch 21, batch 600, train_loss[loss=3.434, NarTop10Accuracy=0.6383, over 5709.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.7059, over 5644.23 frames. ], batch size: 9, lr: 4.11e-03 +2024-08-06 19:01:39,388 INFO [trainer.py:765] (5/8) Epoch 21, batch 700, train_loss[loss=2.886, NarTop10Accuracy=0.7423, over 4989.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7033, over 5725.41 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 19:02:18,047 INFO [trainer.py:765] (5/8) Epoch 21, batch 800, train_loss[loss=2.942, NarTop10Accuracy=0.7373, over 5109.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7004, over 5770.64 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 19:02:48,663 INFO [trainer.py:765] (5/8) Epoch 21, batch 900, train_loss[loss=3.015, NarTop10Accuracy=0.7155, over 6636.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.7008, over 5783.77 frames. ], batch size: 14, lr: 4.09e-03 +2024-08-06 19:03:25,801 INFO [trainer.py:765] (5/8) Epoch 21, batch 1000, train_loss[loss=2.97, NarTop10Accuracy=0.729, over 6582.00 frames. ], tot_loss[loss=3.136, NarTop10Accuracy=0.6982, over 5883.74 frames. ], batch size: 14, lr: 4.09e-03 +2024-08-06 19:04:07,207 INFO [trainer.py:765] (5/8) Epoch 21, batch 1100, train_loss[loss=3.41, NarTop10Accuracy=0.636, over 6909.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6956, over 5935.71 frames. ], batch size: 17, lr: 4.09e-03 +2024-08-06 19:04:38,463 INFO [trainer.py:765] (5/8) Epoch 21, batch 1200, train_loss[loss=3.361, NarTop10Accuracy=0.656, over 7338.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.6986, over 5940.63 frames. ], batch size: 31, lr: 4.08e-03 +2024-08-06 19:05:15,316 INFO [trainer.py:765] (5/8) Epoch 21, batch 1300, train_loss[loss=3.037, NarTop10Accuracy=0.7236, over 5040.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7035, over 6004.37 frames. ], batch size: 6, lr: 4.08e-03 +2024-08-06 19:05:55,559 INFO [trainer.py:765] (5/8) Epoch 21, batch 1400, train_loss[loss=3.384, NarTop10Accuracy=0.6486, over 6579.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7019, over 6033.25 frames. ], batch size: 12, lr: 4.07e-03 +2024-08-06 19:06:23,600 INFO [trainer.py:765] (5/8) Epoch 21, batch 1500, train_loss[loss=3.278, NarTop10Accuracy=0.6716, over 6009.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6991, over 5952.18 frames. ], batch size: 52, lr: 4.07e-03 +2024-08-06 19:06:51,461 INFO [trainer.py:765] (5/8) Epoch 21, batch 1600, train_loss[loss=2.915, NarTop10Accuracy=0.7458, over 7299.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.6996, over 5926.42 frames. ], batch size: 22, lr: 4.07e-03 +2024-08-06 19:07:18,212 INFO [trainer.py:765] (5/8) Epoch 21, batch 1700, train_loss[loss=3.251, NarTop10Accuracy=0.6756, over 6726.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6992, over 5921.71 frames. ], batch size: 14, lr: 4.06e-03 +2024-08-06 19:07:44,809 INFO [trainer.py:765] (5/8) Epoch 21, batch 1800, train_loss[loss=2.962, NarTop10Accuracy=0.7307, over 7272.00 frames. ], tot_loss[loss=3.138, NarTop10Accuracy=0.6976, over 5983.45 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 19:08:11,370 INFO [trainer.py:765] (5/8) Epoch 21, batch 1900, train_loss[loss=3.705, NarTop10Accuracy=0.595, over 6225.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6955, over 6021.34 frames. ], batch size: 51, lr: 4.06e-03 +2024-08-06 19:08:37,106 INFO [trainer.py:765] (5/8) Epoch 21, batch 2000, train_loss[loss=3.56, NarTop10Accuracy=0.6172, over 6360.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.698, over 5997.22 frames. ], batch size: 52, lr: 4.05e-03 +2024-08-06 19:09:02,507 INFO [trainer.py:765] (5/8) Epoch 21, batch 2100, train_loss[loss=2.82, NarTop10Accuracy=0.7527, over 3909.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6963, over 5968.95 frames. ], batch size: 4, lr: 4.05e-03 +2024-08-06 19:09:27,891 INFO [trainer.py:765] (5/8) Epoch 21, batch 2200, train_loss[loss=2.877, NarTop10Accuracy=0.756, over 7590.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6947, over 6024.60 frames. ], batch size: 32, lr: 4.04e-03 +2024-08-06 19:09:53,223 INFO [trainer.py:765] (5/8) Epoch 21, batch 2300, train_loss[loss=2.988, NarTop10Accuracy=0.7301, over 5634.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6922, over 6033.24 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 19:10:17,597 INFO [trainer.py:765] (5/8) Epoch 21, batch 2400, train_loss[loss=3.482, NarTop10Accuracy=0.62, over 5172.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.696, over 5785.57 frames. ], batch size: 7, lr: 4.04e-03 +2024-08-06 19:10:37,229 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 19:10:45,275 INFO [trainer.py:811] (5/8) Epoch 21, validation: loss=2.971, NarTop10Accuracy=0.7316, over 1905321.00 frames. +2024-08-06 19:10:45,276 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 19:10:45,741 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.100e+02 2.242e+02 2.407e+02 6.546e+02, threshold=4.484e+02, percent-clipped=0.1 +2024-08-06 19:10:49,272 INFO [trainer.py:765] (5/8) Epoch 21, batch 2500, train_loss[loss=3.212, NarTop10Accuracy=0.6758, over 4968.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7053, over 5483.29 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 19:11:08,850 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 19:12:09,054 INFO [trainer.py:765] (5/8) Epoch 22, batch 100, train_loss[loss=2.876, NarTop10Accuracy=0.7431, over 7434.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7082, over 2375.56 frames. ], batch size: 31, lr: 3.93e-03 +2024-08-06 19:12:44,462 INFO [trainer.py:765] (5/8) Epoch 22, batch 200, train_loss[loss=3.215, NarTop10Accuracy=0.6848, over 6798.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7062, over 3859.90 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 19:13:14,533 INFO [trainer.py:765] (5/8) Epoch 22, batch 300, train_loss[loss=2.843, NarTop10Accuracy=0.754, over 7011.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.7065, over 4664.13 frames. ], batch size: 22, lr: 3.93e-03 +2024-08-06 19:13:49,229 INFO [trainer.py:765] (5/8) Epoch 22, batch 400, train_loss[loss=2.921, NarTop10Accuracy=0.7373, over 5079.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7085, over 5139.88 frames. ], batch size: 7, lr: 3.92e-03 +2024-08-06 19:14:24,850 INFO [trainer.py:765] (5/8) Epoch 22, batch 500, train_loss[loss=3.174, NarTop10Accuracy=0.6883, over 6171.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7091, over 5418.63 frames. ], batch size: 11, lr: 3.92e-03 +2024-08-06 19:14:55,701 INFO [trainer.py:765] (5/8) Epoch 22, batch 600, train_loss[loss=2.816, NarTop10Accuracy=0.7588, over 5889.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7026, over 5675.01 frames. ], batch size: 9, lr: 3.92e-03 +2024-08-06 19:15:30,867 INFO [trainer.py:765] (5/8) Epoch 22, batch 700, train_loss[loss=3.324, NarTop10Accuracy=0.6592, over 5085.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7019, over 5736.75 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 19:16:10,664 INFO [trainer.py:765] (5/8) Epoch 22, batch 800, train_loss[loss=3.169, NarTop10Accuracy=0.6874, over 4962.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7027, over 5800.80 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 19:16:40,953 INFO [trainer.py:765] (5/8) Epoch 22, batch 900, train_loss[loss=2.874, NarTop10Accuracy=0.7535, over 6276.00 frames. ], tot_loss[loss=3.119, NarTop10Accuracy=0.7015, over 5815.09 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 19:17:16,434 INFO [trainer.py:765] (5/8) Epoch 22, batch 1000, train_loss[loss=3.066, NarTop10Accuracy=0.7066, over 6150.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7034, over 5899.18 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 19:17:52,086 INFO [trainer.py:765] (5/8) Epoch 22, batch 1100, train_loss[loss=2.923, NarTop10Accuracy=0.7388, over 6765.00 frames. ], tot_loss[loss=3.119, NarTop10Accuracy=0.7015, over 5925.76 frames. ], batch size: 17, lr: 3.90e-03 +2024-08-06 19:18:25,927 INFO [trainer.py:765] (5/8) Epoch 22, batch 1200, train_loss[loss=2.838, NarTop10Accuracy=0.7574, over 7425.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7051, over 5926.21 frames. ], batch size: 31, lr: 3.89e-03 +2024-08-06 19:19:01,253 INFO [trainer.py:765] (5/8) Epoch 22, batch 1300, train_loss[loss=2.877, NarTop10Accuracy=0.7529, over 5028.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7052, over 5996.57 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 19:19:33,317 INFO [trainer.py:765] (5/8) Epoch 22, batch 1400, train_loss[loss=2.863, NarTop10Accuracy=0.7555, over 6075.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7028, over 6005.71 frames. ], batch size: 11, lr: 3.89e-03 +2024-08-06 19:20:03,830 INFO [trainer.py:765] (5/8) Epoch 22, batch 1500, train_loss[loss=3.563, NarTop10Accuracy=0.6176, over 5763.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7038, over 5928.71 frames. ], batch size: 50, lr: 3.88e-03 +2024-08-06 19:20:31,647 INFO [trainer.py:765] (5/8) Epoch 22, batch 1600, train_loss[loss=3.176, NarTop10Accuracy=0.6856, over 7194.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.701, over 5922.34 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 19:20:58,418 INFO [trainer.py:765] (5/8) Epoch 22, batch 1700, train_loss[loss=3.203, NarTop10Accuracy=0.68, over 6672.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7013, over 5907.42 frames. ], batch size: 14, lr: 3.88e-03 +2024-08-06 19:21:25,010 INFO [trainer.py:765] (5/8) Epoch 22, batch 1800, train_loss[loss=3.019, NarTop10Accuracy=0.7239, over 7053.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.7024, over 5981.65 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 19:21:51,372 INFO [trainer.py:765] (5/8) Epoch 22, batch 1900, train_loss[loss=3.007, NarTop10Accuracy=0.7229, over 6198.00 frames. ], tot_loss[loss=3.139, NarTop10Accuracy=0.6976, over 6030.06 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 19:21:53,109 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 19:22:01,088 INFO [trainer.py:811] (5/8) Epoch 22, validation: loss=3.009, NarTop10Accuracy=0.7241, over 1905321.00 frames. +2024-08-06 19:22:01,089 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 19:22:01,575 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.114e+02 2.276e+02 2.445e+02 4.438e+02, threshold=4.551e+02, percent-clipped=0.0 +2024-08-06 19:22:24,818 INFO [trainer.py:765] (5/8) Epoch 22, batch 2000, train_loss[loss=3.55, NarTop10Accuracy=0.62, over 6117.00 frames. ], tot_loss[loss=3.119, NarTop10Accuracy=0.7018, over 6020.88 frames. ], batch size: 51, lr: 3.87e-03 +2024-08-06 19:22:50,041 INFO [trainer.py:765] (5/8) Epoch 22, batch 2100, train_loss[loss=3.148, NarTop10Accuracy=0.6987, over 4869.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7039, over 5993.93 frames. ], batch size: 5, lr: 3.86e-03 +2024-08-06 19:23:15,229 INFO [trainer.py:765] (5/8) Epoch 22, batch 2200, train_loss[loss=3.109, NarTop10Accuracy=0.7067, over 7497.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7039, over 6017.02 frames. ], batch size: 32, lr: 3.86e-03 +2024-08-06 19:23:40,314 INFO [trainer.py:765] (5/8) Epoch 22, batch 2300, train_loss[loss=3.029, NarTop10Accuracy=0.7164, over 5850.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7006, over 6015.44 frames. ], batch size: 9, lr: 3.86e-03 +2024-08-06 19:24:04,602 INFO [trainer.py:765] (5/8) Epoch 22, batch 2400, train_loss[loss=3.058, NarTop10Accuracy=0.7095, over 5124.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7016, over 5768.61 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:28,024 INFO [trainer.py:765] (5/8) Epoch 22, batch 2500, train_loss[loss=3.22, NarTop10Accuracy=0.6908, over 5055.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7036, over 5463.39 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:47,355 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 19:25:45,383 INFO [trainer.py:765] (5/8) Epoch 23, batch 100, train_loss[loss=2.91, NarTop10Accuracy=0.743, over 7416.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.699, over 2366.31 frames. ], batch size: 32, lr: 3.76e-03 +2024-08-06 19:26:21,307 INFO [trainer.py:765] (5/8) Epoch 23, batch 200, train_loss[loss=3.394, NarTop10Accuracy=0.6394, over 6870.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.6996, over 3862.21 frames. ], batch size: 17, lr: 3.76e-03 +2024-08-06 19:26:57,601 INFO [trainer.py:765] (5/8) Epoch 23, batch 300, train_loss[loss=3.052, NarTop10Accuracy=0.716, over 7023.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.706, over 4657.41 frames. ], batch size: 22, lr: 3.75e-03 +2024-08-06 19:27:26,539 INFO [trainer.py:765] (5/8) Epoch 23, batch 400, train_loss[loss=3.275, NarTop10Accuracy=0.6659, over 5106.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7043, over 5107.20 frames. ], batch size: 7, lr: 3.75e-03 +2024-08-06 19:27:59,711 INFO [trainer.py:765] (5/8) Epoch 23, batch 500, train_loss[loss=3.484, NarTop10Accuracy=0.6306, over 6021.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7028, over 5372.95 frames. ], batch size: 11, lr: 3.75e-03 +2024-08-06 19:28:35,881 INFO [trainer.py:765] (5/8) Epoch 23, batch 600, train_loss[loss=3.298, NarTop10Accuracy=0.6531, over 5703.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.7051, over 5636.09 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 19:29:11,365 INFO [trainer.py:765] (5/8) Epoch 23, batch 700, train_loss[loss=3.234, NarTop10Accuracy=0.6746, over 5145.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7071, over 5729.04 frames. ], batch size: 6, lr: 3.74e-03 +2024-08-06 19:29:43,611 INFO [trainer.py:765] (5/8) Epoch 23, batch 800, train_loss[loss=2.937, NarTop10Accuracy=0.7481, over 5082.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7039, over 5784.25 frames. ], batch size: 6, lr: 3.74e-03 +2024-08-06 19:30:19,388 INFO [trainer.py:765] (5/8) Epoch 23, batch 900, train_loss[loss=3.278, NarTop10Accuracy=0.6661, over 6591.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7053, over 5795.87 frames. ], batch size: 14, lr: 3.73e-03 +2024-08-06 19:30:58,193 INFO [trainer.py:765] (5/8) Epoch 23, batch 1000, train_loss[loss=3.023, NarTop10Accuracy=0.7182, over 6213.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7069, over 5901.02 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 19:31:31,519 INFO [trainer.py:765] (5/8) Epoch 23, batch 1100, train_loss[loss=3.115, NarTop10Accuracy=0.707, over 6816.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7059, over 5918.94 frames. ], batch size: 17, lr: 3.73e-03 +2024-08-06 19:32:08,516 INFO [trainer.py:765] (5/8) Epoch 23, batch 1200, train_loss[loss=3.022, NarTop10Accuracy=0.729, over 7473.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.704, over 5927.81 frames. ], batch size: 31, lr: 3.72e-03 +2024-08-06 19:32:46,935 INFO [trainer.py:765] (5/8) Epoch 23, batch 1300, train_loss[loss=3.069, NarTop10Accuracy=0.7049, over 5142.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7033, over 6004.18 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 19:32:56,401 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 19:33:04,722 INFO [trainer.py:811] (5/8) Epoch 23, validation: loss=2.893, NarTop10Accuracy=0.7468, over 1905321.00 frames. +2024-08-06 19:33:04,723 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 19:33:05,262 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.108e+02 2.273e+02 2.457e+02 3.966e+02, threshold=4.546e+02, percent-clipped=0.0 +2024-08-06 19:33:27,406 INFO [trainer.py:765] (5/8) Epoch 23, batch 1400, train_loss[loss=2.803, NarTop10Accuracy=0.7698, over 5964.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7029, over 6019.55 frames. ], batch size: 11, lr: 3.72e-03 +2024-08-06 19:33:58,215 INFO [trainer.py:765] (5/8) Epoch 23, batch 1500, train_loss[loss=3.252, NarTop10Accuracy=0.6742, over 6126.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7069, over 5956.34 frames. ], batch size: 50, lr: 3.71e-03 +2024-08-06 19:34:26,014 INFO [trainer.py:765] (5/8) Epoch 23, batch 1600, train_loss[loss=3.013, NarTop10Accuracy=0.7283, over 6981.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7053, over 5927.72 frames. ], batch size: 22, lr: 3.71e-03 +2024-08-06 19:34:52,782 INFO [trainer.py:765] (5/8) Epoch 23, batch 1700, train_loss[loss=3.219, NarTop10Accuracy=0.6772, over 6195.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7013, over 5907.18 frames. ], batch size: 13, lr: 3.71e-03 +2024-08-06 19:35:19,261 INFO [trainer.py:765] (5/8) Epoch 23, batch 1800, train_loss[loss=3.008, NarTop10Accuracy=0.7278, over 7041.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.703, over 5975.26 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 19:35:45,595 INFO [trainer.py:765] (5/8) Epoch 23, batch 1900, train_loss[loss=3.432, NarTop10Accuracy=0.645, over 5940.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7006, over 6013.25 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 19:36:11,170 INFO [trainer.py:765] (5/8) Epoch 23, batch 2000, train_loss[loss=3.626, NarTop10Accuracy=0.5982, over 5763.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7034, over 6002.02 frames. ], batch size: 51, lr: 3.70e-03 +2024-08-06 19:36:36,517 INFO [trainer.py:765] (5/8) Epoch 23, batch 2100, train_loss[loss=3.319, NarTop10Accuracy=0.6582, over 4776.00 frames. ], tot_loss[loss=3.119, NarTop10Accuracy=0.7022, over 5973.59 frames. ], batch size: 5, lr: 3.69e-03 +2024-08-06 19:37:01,907 INFO [trainer.py:765] (5/8) Epoch 23, batch 2200, train_loss[loss=3.05, NarTop10Accuracy=0.7133, over 7392.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.6998, over 5999.41 frames. ], batch size: 32, lr: 3.69e-03 +2024-08-06 19:37:27,059 INFO [trainer.py:765] (5/8) Epoch 23, batch 2300, train_loss[loss=2.931, NarTop10Accuracy=0.7394, over 6114.00 frames. ], tot_loss[loss=3.121, NarTop10Accuracy=0.702, over 6020.49 frames. ], batch size: 10, lr: 3.69e-03 +2024-08-06 19:37:51,423 INFO [trainer.py:765] (5/8) Epoch 23, batch 2400, train_loss[loss=3.055, NarTop10Accuracy=0.7216, over 5196.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7003, over 5785.41 frames. ], batch size: 7, lr: 3.69e-03 +2024-08-06 19:38:15,052 INFO [trainer.py:765] (5/8) Epoch 23, batch 2500, train_loss[loss=3.431, NarTop10Accuracy=0.6348, over 5190.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7051, over 5501.62 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 19:38:34,638 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 19:39:37,631 INFO [trainer.py:765] (5/8) Epoch 24, batch 100, train_loss[loss=3.449, NarTop10Accuracy=0.6302, over 7500.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7012, over 2358.82 frames. ], batch size: 31, lr: 3.60e-03 +2024-08-06 19:40:10,189 INFO [trainer.py:765] (5/8) Epoch 24, batch 200, train_loss[loss=2.841, NarTop10Accuracy=0.7663, over 6711.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.7047, over 3849.31 frames. ], batch size: 17, lr: 3.60e-03 +2024-08-06 19:40:40,555 INFO [trainer.py:765] (5/8) Epoch 24, batch 300, train_loss[loss=2.848, NarTop10Accuracy=0.7597, over 7026.00 frames. ], tot_loss[loss=3.105, NarTop10Accuracy=0.7048, over 4667.62 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 19:41:18,233 INFO [trainer.py:765] (5/8) Epoch 24, batch 400, train_loss[loss=2.941, NarTop10Accuracy=0.7401, over 5763.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.7051, over 5112.80 frames. ], batch size: 8, lr: 3.59e-03 +2024-08-06 19:41:50,322 INFO [trainer.py:765] (5/8) Epoch 24, batch 500, train_loss[loss=2.959, NarTop10Accuracy=0.7332, over 5979.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7074, over 5391.18 frames. ], batch size: 11, lr: 3.59e-03 +2024-08-06 19:42:21,451 INFO [trainer.py:765] (5/8) Epoch 24, batch 600, train_loss[loss=2.82, NarTop10Accuracy=0.7717, over 5745.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7071, over 5662.36 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 19:42:52,843 INFO [trainer.py:765] (5/8) Epoch 24, batch 700, train_loss[loss=2.959, NarTop10Accuracy=0.7317, over 5130.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7077, over 5726.05 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 19:43:17,381 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 19:43:25,410 INFO [trainer.py:811] (5/8) Epoch 24, validation: loss=3.021, NarTop10Accuracy=0.7204, over 1905321.00 frames. +2024-08-06 19:43:25,411 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 19:43:28,562 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.113e+02 2.282e+02 2.472e+02 2.357e+03, threshold=4.564e+02, percent-clipped=0.2 +2024-08-06 19:43:40,814 INFO [trainer.py:765] (5/8) Epoch 24, batch 800, train_loss[loss=2.831, NarTop10Accuracy=0.7632, over 5070.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.709, over 5787.51 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 19:44:11,409 INFO [trainer.py:765] (5/8) Epoch 24, batch 900, train_loss[loss=2.908, NarTop10Accuracy=0.7469, over 6315.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.71, over 5830.84 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 19:44:47,489 INFO [trainer.py:765] (5/8) Epoch 24, batch 1000, train_loss[loss=3.23, NarTop10Accuracy=0.676, over 6636.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7066, over 5912.47 frames. ], batch size: 14, lr: 3.57e-03 +2024-08-06 19:45:27,107 INFO [trainer.py:765] (5/8) Epoch 24, batch 1100, train_loss[loss=3.463, NarTop10Accuracy=0.6366, over 6813.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7045, over 5952.67 frames. ], batch size: 17, lr: 3.57e-03 +2024-08-06 19:45:58,437 INFO [trainer.py:765] (5/8) Epoch 24, batch 1200, train_loss[loss=3.061, NarTop10Accuracy=0.7172, over 7290.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7054, over 5929.47 frames. ], batch size: 31, lr: 3.57e-03 +2024-08-06 19:46:30,294 INFO [trainer.py:765] (5/8) Epoch 24, batch 1300, train_loss[loss=3.413, NarTop10Accuracy=0.631, over 4317.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7061, over 5975.86 frames. ], batch size: 5, lr: 3.56e-03 +2024-08-06 19:47:07,859 INFO [trainer.py:765] (5/8) Epoch 24, batch 1400, train_loss[loss=3.341, NarTop10Accuracy=0.6494, over 6189.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7033, over 6013.28 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 19:47:40,957 INFO [trainer.py:765] (5/8) Epoch 24, batch 1500, train_loss[loss=3.465, NarTop10Accuracy=0.6339, over 6465.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.6997, over 5950.30 frames. ], batch size: 51, lr: 3.56e-03 +2024-08-06 19:48:08,676 INFO [trainer.py:765] (5/8) Epoch 24, batch 1600, train_loss[loss=3.331, NarTop10Accuracy=0.6667, over 7038.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.699, over 5911.12 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:48:35,267 INFO [trainer.py:765] (5/8) Epoch 24, batch 1700, train_loss[loss=2.837, NarTop10Accuracy=0.7565, over 6135.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7003, over 5889.01 frames. ], batch size: 13, lr: 3.55e-03 +2024-08-06 19:49:01,638 INFO [trainer.py:765] (5/8) Epoch 24, batch 1800, train_loss[loss=2.921, NarTop10Accuracy=0.7419, over 6945.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.6988, over 5956.75 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:49:28,042 INFO [trainer.py:765] (5/8) Epoch 24, batch 1900, train_loss[loss=3.563, NarTop10Accuracy=0.6169, over 6516.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.6982, over 6019.60 frames. ], batch size: 50, lr: 3.55e-03 +2024-08-06 19:49:53,533 INFO [trainer.py:765] (5/8) Epoch 24, batch 2000, train_loss[loss=3.618, NarTop10Accuracy=0.5983, over 5799.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7034, over 6001.97 frames. ], batch size: 51, lr: 3.54e-03 +2024-08-06 19:50:18,819 INFO [trainer.py:765] (5/8) Epoch 24, batch 2100, train_loss[loss=2.78, NarTop10Accuracy=0.7648, over 4815.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7032, over 5982.28 frames. ], batch size: 5, lr: 3.54e-03 +2024-08-06 19:50:43,942 INFO [trainer.py:765] (5/8) Epoch 24, batch 2200, train_loss[loss=3.448, NarTop10Accuracy=0.6389, over 6879.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.703, over 6022.01 frames. ], batch size: 31, lr: 3.54e-03 +2024-08-06 19:51:09,025 INFO [trainer.py:765] (5/8) Epoch 24, batch 2300, train_loss[loss=3.036, NarTop10Accuracy=0.719, over 5748.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7033, over 6023.81 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 19:51:33,348 INFO [trainer.py:765] (5/8) Epoch 24, batch 2400, train_loss[loss=3.077, NarTop10Accuracy=0.7113, over 5034.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7055, over 5782.52 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 19:51:56,783 INFO [trainer.py:765] (5/8) Epoch 24, batch 2500, train_loss[loss=2.734, NarTop10Accuracy=0.7754, over 5127.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7108, over 5479.80 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 19:52:17,079 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 19:53:22,197 INFO [trainer.py:765] (5/8) Epoch 25, batch 100, train_loss[loss=3.397, NarTop10Accuracy=0.6354, over 7188.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7091, over 2367.83 frames. ], batch size: 31, lr: 3.45e-03 +2024-08-06 19:53:47,262 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 19:53:55,329 INFO [trainer.py:811] (5/8) Epoch 25, validation: loss=2.96, NarTop10Accuracy=0.7332, over 1905321.00 frames. +2024-08-06 19:53:55,330 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 19:53:55,916 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.155e+02 2.306e+02 2.475e+02 6.485e+02, threshold=4.611e+02, percent-clipped=0.1 +2024-08-06 19:54:01,177 INFO [trainer.py:765] (5/8) Epoch 25, batch 200, train_loss[loss=3.024, NarTop10Accuracy=0.7261, over 6966.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7084, over 3855.18 frames. ], batch size: 17, lr: 3.45e-03 +2024-08-06 19:54:35,647 INFO [trainer.py:765] (5/8) Epoch 25, batch 300, train_loss[loss=3.286, NarTop10Accuracy=0.6656, over 7383.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7087, over 4641.88 frames. ], batch size: 23, lr: 3.45e-03 +2024-08-06 19:55:12,958 INFO [trainer.py:765] (5/8) Epoch 25, batch 400, train_loss[loss=3.151, NarTop10Accuracy=0.6999, over 5190.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7077, over 5095.67 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 19:55:43,738 INFO [trainer.py:765] (5/8) Epoch 25, batch 500, train_loss[loss=2.875, NarTop10Accuracy=0.755, over 6096.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7094, over 5384.09 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 19:56:14,815 INFO [trainer.py:765] (5/8) Epoch 25, batch 600, train_loss[loss=2.861, NarTop10Accuracy=0.7536, over 5649.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7099, over 5650.68 frames. ], batch size: 9, lr: 3.44e-03 +2024-08-06 19:56:55,496 INFO [trainer.py:765] (5/8) Epoch 25, batch 700, train_loss[loss=2.8, NarTop10Accuracy=0.762, over 4167.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7115, over 5709.96 frames. ], batch size: 5, lr: 3.43e-03 +2024-08-06 19:57:30,136 INFO [trainer.py:765] (5/8) Epoch 25, batch 800, train_loss[loss=2.91, NarTop10Accuracy=0.7334, over 4989.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7092, over 5769.51 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 19:58:00,679 INFO [trainer.py:765] (5/8) Epoch 25, batch 900, train_loss[loss=3.076, NarTop10Accuracy=0.7089, over 6564.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7115, over 5805.04 frames. ], batch size: 14, lr: 3.43e-03 +2024-08-06 19:58:37,639 INFO [trainer.py:765] (5/8) Epoch 25, batch 1000, train_loss[loss=2.891, NarTop10Accuracy=0.7502, over 6189.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7083, over 5912.62 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 19:59:14,855 INFO [trainer.py:765] (5/8) Epoch 25, batch 1100, train_loss[loss=3.444, NarTop10Accuracy=0.638, over 6978.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7071, over 5950.72 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 19:59:49,039 INFO [trainer.py:765] (5/8) Epoch 25, batch 1200, train_loss[loss=3.288, NarTop10Accuracy=0.6717, over 7206.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7074, over 5939.55 frames. ], batch size: 31, lr: 3.42e-03 +2024-08-06 20:00:25,598 INFO [trainer.py:765] (5/8) Epoch 25, batch 1300, train_loss[loss=2.82, NarTop10Accuracy=0.7642, over 5040.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7079, over 6002.56 frames. ], batch size: 6, lr: 3.42e-03 +2024-08-06 20:01:02,016 INFO [trainer.py:765] (5/8) Epoch 25, batch 1400, train_loss[loss=2.87, NarTop10Accuracy=0.7582, over 6114.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7096, over 6018.45 frames. ], batch size: 11, lr: 3.42e-03 +2024-08-06 20:01:32,823 INFO [trainer.py:765] (5/8) Epoch 25, batch 1500, train_loss[loss=3.324, NarTop10Accuracy=0.6615, over 5550.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7086, over 5956.85 frames. ], batch size: 50, lr: 3.41e-03 +2024-08-06 20:02:00,624 INFO [trainer.py:765] (5/8) Epoch 25, batch 1600, train_loss[loss=2.989, NarTop10Accuracy=0.7386, over 7281.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7084, over 5928.60 frames. ], batch size: 23, lr: 3.41e-03 +2024-08-06 20:02:27,359 INFO [trainer.py:765] (5/8) Epoch 25, batch 1700, train_loss[loss=3.015, NarTop10Accuracy=0.7072, over 6648.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7076, over 5921.54 frames. ], batch size: 14, lr: 3.41e-03 +2024-08-06 20:02:53,853 INFO [trainer.py:765] (5/8) Epoch 25, batch 1800, train_loss[loss=3.385, NarTop10Accuracy=0.6495, over 7326.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.7056, over 5986.35 frames. ], batch size: 23, lr: 3.40e-03 +2024-08-06 20:03:20,340 INFO [trainer.py:765] (5/8) Epoch 25, batch 1900, train_loss[loss=3.262, NarTop10Accuracy=0.677, over 5970.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.704, over 6019.84 frames. ], batch size: 50, lr: 3.40e-03 +2024-08-06 20:03:45,935 INFO [trainer.py:765] (5/8) Epoch 25, batch 2000, train_loss[loss=3.552, NarTop10Accuracy=0.6082, over 5904.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7004, over 5982.67 frames. ], batch size: 52, lr: 3.40e-03 +2024-08-06 20:04:11,245 INFO [trainer.py:765] (5/8) Epoch 25, batch 2100, train_loss[loss=3.036, NarTop10Accuracy=0.7283, over 4794.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.703, over 5974.78 frames. ], batch size: 5, lr: 3.40e-03 +2024-08-06 20:04:31,409 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 20:04:39,343 INFO [trainer.py:811] (5/8) Epoch 25, validation: loss=2.999, NarTop10Accuracy=0.7251, over 1905321.00 frames. +2024-08-06 20:04:39,344 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 20:04:39,840 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.185e+02 2.339e+02 2.507e+02 3.640e+02, threshold=4.678e+02, percent-clipped=0.0 +2024-08-06 20:04:44,512 INFO [trainer.py:765] (5/8) Epoch 25, batch 2200, train_loss[loss=3.291, NarTop10Accuracy=0.6661, over 7065.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7024, over 5999.43 frames. ], batch size: 31, lr: 3.39e-03 +2024-08-06 20:05:09,645 INFO [trainer.py:765] (5/8) Epoch 25, batch 2300, train_loss[loss=3.159, NarTop10Accuracy=0.6882, over 5697.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.7024, over 6004.17 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 20:05:34,141 INFO [trainer.py:765] (5/8) Epoch 25, batch 2400, train_loss[loss=2.779, NarTop10Accuracy=0.7653, over 5265.00 frames. ], tot_loss[loss=3.101, NarTop10Accuracy=0.7055, over 5763.67 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:05:57,845 INFO [trainer.py:765] (5/8) Epoch 25, batch 2500, train_loss[loss=2.732, NarTop10Accuracy=0.7778, over 5061.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7107, over 5466.07 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:06:17,603 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 20:07:19,304 INFO [trainer.py:765] (5/8) Epoch 26, batch 100, train_loss[loss=3.105, NarTop10Accuracy=0.7073, over 7434.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7055, over 2363.67 frames. ], batch size: 32, lr: 3.32e-03 +2024-08-06 20:07:52,382 INFO [trainer.py:765] (5/8) Epoch 26, batch 200, train_loss[loss=2.827, NarTop10Accuracy=0.7627, over 6921.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7063, over 3846.93 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 20:08:24,733 INFO [trainer.py:765] (5/8) Epoch 26, batch 300, train_loss[loss=2.977, NarTop10Accuracy=0.7314, over 7074.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7083, over 4629.92 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 20:08:58,184 INFO [trainer.py:765] (5/8) Epoch 26, batch 400, train_loss[loss=3.096, NarTop10Accuracy=0.6939, over 5013.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7073, over 5101.09 frames. ], batch size: 7, lr: 3.31e-03 +2024-08-06 20:09:33,147 INFO [trainer.py:765] (5/8) Epoch 26, batch 500, train_loss[loss=2.835, NarTop10Accuracy=0.7653, over 6063.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7062, over 5377.08 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 20:10:03,890 INFO [trainer.py:765] (5/8) Epoch 26, batch 600, train_loss[loss=2.881, NarTop10Accuracy=0.7489, over 5640.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7117, over 5642.51 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 20:10:39,872 INFO [trainer.py:765] (5/8) Epoch 26, batch 700, train_loss[loss=3.238, NarTop10Accuracy=0.6797, over 4194.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7077, over 5717.53 frames. ], batch size: 5, lr: 3.30e-03 +2024-08-06 20:11:19,060 INFO [trainer.py:765] (5/8) Epoch 26, batch 800, train_loss[loss=3.075, NarTop10Accuracy=0.7157, over 5082.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7084, over 5769.39 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 20:11:49,315 INFO [trainer.py:765] (5/8) Epoch 26, batch 900, train_loss[loss=2.77, NarTop10Accuracy=0.7801, over 6195.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7082, over 5803.80 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 20:12:25,972 INFO [trainer.py:765] (5/8) Epoch 26, batch 1000, train_loss[loss=2.77, NarTop10Accuracy=0.7771, over 6780.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7069, over 5879.98 frames. ], batch size: 14, lr: 3.29e-03 +2024-08-06 20:13:06,376 INFO [trainer.py:765] (5/8) Epoch 26, batch 1100, train_loss[loss=3.442, NarTop10Accuracy=0.6346, over 6768.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7049, over 5921.87 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 20:13:37,535 INFO [trainer.py:765] (5/8) Epoch 26, batch 1200, train_loss[loss=3.384, NarTop10Accuracy=0.6551, over 7425.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7084, over 5927.27 frames. ], batch size: 32, lr: 3.29e-03 +2024-08-06 20:14:13,695 INFO [trainer.py:765] (5/8) Epoch 26, batch 1300, train_loss[loss=2.954, NarTop10Accuracy=0.7376, over 4284.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7091, over 5980.15 frames. ], batch size: 5, lr: 3.28e-03 +2024-08-06 20:14:50,538 INFO [trainer.py:765] (5/8) Epoch 26, batch 1400, train_loss[loss=2.86, NarTop10Accuracy=0.7583, over 6114.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7085, over 5982.40 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 20:15:21,155 INFO [trainer.py:765] (5/8) Epoch 26, batch 1500, train_loss[loss=3.155, NarTop10Accuracy=0.7004, over 6387.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7074, over 5933.23 frames. ], batch size: 50, lr: 3.28e-03 +2024-08-06 20:15:48,979 INFO [trainer.py:765] (5/8) Epoch 26, batch 1600, train_loss[loss=2.956, NarTop10Accuracy=0.7318, over 7230.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7093, over 5929.18 frames. ], batch size: 22, lr: 3.28e-03 +2024-08-06 20:15:50,002 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 20:15:58,239 INFO [trainer.py:811] (5/8) Epoch 26, validation: loss=2.899, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 20:15:58,239 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 20:15:58,779 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.166e+02 2.322e+02 2.511e+02 3.952e+02, threshold=4.644e+02, percent-clipped=0.0 +2024-08-06 20:16:23,952 INFO [trainer.py:765] (5/8) Epoch 26, batch 1700, train_loss[loss=3.152, NarTop10Accuracy=0.6914, over 6300.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.711, over 5903.85 frames. ], batch size: 13, lr: 3.28e-03 +2024-08-06 20:16:50,426 INFO [trainer.py:765] (5/8) Epoch 26, batch 1800, train_loss[loss=2.739, NarTop10Accuracy=0.7693, over 7176.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7089, over 5971.45 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 20:17:16,840 INFO [trainer.py:765] (5/8) Epoch 26, batch 1900, train_loss[loss=3.045, NarTop10Accuracy=0.7175, over 5481.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7069, over 6014.47 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 20:17:42,379 INFO [trainer.py:765] (5/8) Epoch 26, batch 2000, train_loss[loss=3.713, NarTop10Accuracy=0.5721, over 6243.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7066, over 6002.80 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 20:18:07,563 INFO [trainer.py:765] (5/8) Epoch 26, batch 2100, train_loss[loss=3.037, NarTop10Accuracy=0.7153, over 4827.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7066, over 5957.19 frames. ], batch size: 5, lr: 3.27e-03 +2024-08-06 20:18:32,776 INFO [trainer.py:765] (5/8) Epoch 26, batch 2200, train_loss[loss=2.907, NarTop10Accuracy=0.7408, over 7329.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.707, over 6009.78 frames. ], batch size: 31, lr: 3.26e-03 +2024-08-06 20:18:57,897 INFO [trainer.py:765] (5/8) Epoch 26, batch 2300, train_loss[loss=3.167, NarTop10Accuracy=0.6934, over 5691.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7057, over 6016.23 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 20:19:22,205 INFO [trainer.py:765] (5/8) Epoch 26, batch 2400, train_loss[loss=2.837, NarTop10Accuracy=0.758, over 5055.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7114, over 5767.10 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:19:45,651 INFO [trainer.py:765] (5/8) Epoch 26, batch 2500, train_loss[loss=2.762, NarTop10Accuracy=0.7801, over 5220.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.715, over 5470.58 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:20:06,065 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 20:21:04,874 INFO [trainer.py:765] (5/8) Epoch 27, batch 100, train_loss[loss=3.278, NarTop10Accuracy=0.6745, over 7128.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7097, over 2354.28 frames. ], batch size: 31, lr: 3.19e-03 +2024-08-06 20:21:39,783 INFO [trainer.py:765] (5/8) Epoch 27, batch 200, train_loss[loss=2.834, NarTop10Accuracy=0.7727, over 6822.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7095, over 3852.04 frames. ], batch size: 17, lr: 3.19e-03 +2024-08-06 20:22:13,049 INFO [trainer.py:765] (5/8) Epoch 27, batch 300, train_loss[loss=2.726, NarTop10Accuracy=0.7797, over 6993.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7098, over 4653.19 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 20:22:43,557 INFO [trainer.py:765] (5/8) Epoch 27, batch 400, train_loss[loss=2.856, NarTop10Accuracy=0.7539, over 5082.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7125, over 5100.45 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 20:23:18,084 INFO [trainer.py:765] (5/8) Epoch 27, batch 500, train_loss[loss=2.82, NarTop10Accuracy=0.7676, over 6150.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7158, over 5398.86 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 20:23:51,435 INFO [trainer.py:765] (5/8) Epoch 27, batch 600, train_loss[loss=3.19, NarTop10Accuracy=0.684, over 5829.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7159, over 5657.77 frames. ], batch size: 9, lr: 3.18e-03 +2024-08-06 20:24:24,975 INFO [trainer.py:765] (5/8) Epoch 27, batch 700, train_loss[loss=2.869, NarTop10Accuracy=0.7471, over 4257.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7164, over 5744.15 frames. ], batch size: 5, lr: 3.18e-03 +2024-08-06 20:25:03,407 INFO [trainer.py:765] (5/8) Epoch 27, batch 800, train_loss[loss=3.265, NarTop10Accuracy=0.6753, over 5049.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7124, over 5801.26 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 20:25:34,176 INFO [trainer.py:765] (5/8) Epoch 27, batch 900, train_loss[loss=3.34, NarTop10Accuracy=0.6593, over 6228.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7129, over 5811.34 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 20:26:10,097 INFO [trainer.py:765] (5/8) Epoch 27, batch 1000, train_loss[loss=2.767, NarTop10Accuracy=0.7678, over 6612.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.712, over 5906.58 frames. ], batch size: 14, lr: 3.17e-03 +2024-08-06 20:26:18,314 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 20:26:26,346 INFO [trainer.py:811] (5/8) Epoch 27, validation: loss=2.95, NarTop10Accuracy=0.735, over 1905321.00 frames. +2024-08-06 20:26:26,347 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 20:26:26,878 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.166e+02 2.331e+02 2.512e+02 4.284e+02, threshold=4.663e+02, percent-clipped=0.0 +2024-08-06 20:26:50,899 INFO [trainer.py:765] (5/8) Epoch 27, batch 1100, train_loss[loss=3.051, NarTop10Accuracy=0.7097, over 6768.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7111, over 5927.47 frames. ], batch size: 17, lr: 3.17e-03 +2024-08-06 20:27:24,544 INFO [trainer.py:765] (5/8) Epoch 27, batch 1200, train_loss[loss=2.878, NarTop10Accuracy=0.7593, over 7176.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7115, over 5919.78 frames. ], batch size: 32, lr: 3.16e-03 +2024-08-06 20:27:58,567 INFO [trainer.py:765] (5/8) Epoch 27, batch 1300, train_loss[loss=2.799, NarTop10Accuracy=0.7624, over 4302.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7125, over 5989.24 frames. ], batch size: 5, lr: 3.16e-03 +2024-08-06 20:28:36,744 INFO [trainer.py:765] (5/8) Epoch 27, batch 1400, train_loss[loss=3.379, NarTop10Accuracy=0.6516, over 6051.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7081, over 5998.10 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 20:29:04,632 INFO [trainer.py:765] (5/8) Epoch 27, batch 1500, train_loss[loss=3.068, NarTop10Accuracy=0.714, over 6081.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7096, over 5942.20 frames. ], batch size: 50, lr: 3.16e-03 +2024-08-06 20:29:32,361 INFO [trainer.py:765] (5/8) Epoch 27, batch 1600, train_loss[loss=2.888, NarTop10Accuracy=0.7541, over 7278.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7078, over 5908.62 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:29:58,977 INFO [trainer.py:765] (5/8) Epoch 27, batch 1700, train_loss[loss=3.225, NarTop10Accuracy=0.6745, over 6279.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7094, over 5885.89 frames. ], batch size: 13, lr: 3.15e-03 +2024-08-06 20:30:25,463 INFO [trainer.py:765] (5/8) Epoch 27, batch 1800, train_loss[loss=3.516, NarTop10Accuracy=0.6199, over 7173.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7085, over 5974.17 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:30:51,844 INFO [trainer.py:765] (5/8) Epoch 27, batch 1900, train_loss[loss=3.049, NarTop10Accuracy=0.7108, over 6582.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7088, over 6008.19 frames. ], batch size: 50, lr: 3.15e-03 +2024-08-06 20:31:17,390 INFO [trainer.py:765] (5/8) Epoch 27, batch 2000, train_loss[loss=3.158, NarTop10Accuracy=0.6982, over 6312.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7108, over 5985.53 frames. ], batch size: 50, lr: 3.15e-03 +2024-08-06 20:31:42,659 INFO [trainer.py:765] (5/8) Epoch 27, batch 2100, train_loss[loss=2.787, NarTop10Accuracy=0.7592, over 3969.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7104, over 5956.10 frames. ], batch size: 4, lr: 3.14e-03 +2024-08-06 20:32:07,804 INFO [trainer.py:765] (5/8) Epoch 27, batch 2200, train_loss[loss=3.253, NarTop10Accuracy=0.6776, over 7260.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7093, over 6002.06 frames. ], batch size: 32, lr: 3.14e-03 +2024-08-06 20:32:32,941 INFO [trainer.py:765] (5/8) Epoch 27, batch 2300, train_loss[loss=2.965, NarTop10Accuracy=0.7353, over 5640.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7086, over 6008.61 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 20:32:57,246 INFO [trainer.py:765] (5/8) Epoch 27, batch 2400, train_loss[loss=2.844, NarTop10Accuracy=0.7604, over 5073.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.709, over 5756.18 frames. ], batch size: 7, lr: 3.14e-03 +2024-08-06 20:33:20,615 INFO [trainer.py:765] (5/8) Epoch 27, batch 2500, train_loss[loss=3.436, NarTop10Accuracy=0.6338, over 5262.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7154, over 5468.57 frames. ], batch size: 7, lr: 3.13e-03 +2024-08-06 20:33:40,589 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 20:34:35,831 INFO [trainer.py:765] (5/8) Epoch 28, batch 100, train_loss[loss=2.918, NarTop10Accuracy=0.7483, over 7158.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7096, over 2354.87 frames. ], batch size: 31, lr: 3.07e-03 +2024-08-06 20:35:07,393 INFO [trainer.py:765] (5/8) Epoch 28, batch 200, train_loss[loss=2.816, NarTop10Accuracy=0.7555, over 6840.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7091, over 3843.92 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 20:35:45,422 INFO [trainer.py:765] (5/8) Epoch 28, batch 300, train_loss[loss=3.082, NarTop10Accuracy=0.7069, over 7152.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7113, over 4662.60 frames. ], batch size: 22, lr: 3.07e-03 +2024-08-06 20:36:15,865 INFO [trainer.py:765] (5/8) Epoch 28, batch 400, train_loss[loss=3.17, NarTop10Accuracy=0.6808, over 5115.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7075, over 5120.88 frames. ], batch size: 7, lr: 3.07e-03 +2024-08-06 20:36:32,406 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 20:36:40,530 INFO [trainer.py:811] (5/8) Epoch 28, validation: loss=2.963, NarTop10Accuracy=0.7327, over 1905321.00 frames. +2024-08-06 20:36:40,531 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 20:36:41,102 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.179e+02 2.348e+02 2.536e+02 3.573e+02, threshold=4.696e+02, percent-clipped=0.0 +2024-08-06 20:36:56,664 INFO [trainer.py:765] (5/8) Epoch 28, batch 500, train_loss[loss=3.122, NarTop10Accuracy=0.6946, over 6084.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7101, over 5387.56 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 20:37:29,463 INFO [trainer.py:765] (5/8) Epoch 28, batch 600, train_loss[loss=2.989, NarTop10Accuracy=0.73, over 5784.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7104, over 5661.49 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 20:38:08,891 INFO [trainer.py:765] (5/8) Epoch 28, batch 700, train_loss[loss=2.988, NarTop10Accuracy=0.7311, over 5169.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7084, over 5730.43 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 20:38:42,489 INFO [trainer.py:765] (5/8) Epoch 28, batch 800, train_loss[loss=2.872, NarTop10Accuracy=0.7543, over 4992.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.714, over 5779.97 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 20:39:15,507 INFO [trainer.py:765] (5/8) Epoch 28, batch 900, train_loss[loss=3.241, NarTop10Accuracy=0.6732, over 6195.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7136, over 5789.44 frames. ], batch size: 13, lr: 3.06e-03 +2024-08-06 20:39:53,240 INFO [trainer.py:765] (5/8) Epoch 28, batch 1000, train_loss[loss=3.306, NarTop10Accuracy=0.6598, over 6165.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7139, over 5901.56 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 20:40:25,867 INFO [trainer.py:765] (5/8) Epoch 28, batch 1100, train_loss[loss=2.818, NarTop10Accuracy=0.7612, over 6834.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7101, over 5931.22 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 20:40:59,419 INFO [trainer.py:765] (5/8) Epoch 28, batch 1200, train_loss[loss=3.485, NarTop10Accuracy=0.6275, over 7275.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7084, over 5935.42 frames. ], batch size: 31, lr: 3.05e-03 +2024-08-06 20:41:38,681 INFO [trainer.py:765] (5/8) Epoch 28, batch 1300, train_loss[loss=3.148, NarTop10Accuracy=0.696, over 5097.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7098, over 5995.96 frames. ], batch size: 6, lr: 3.05e-03 +2024-08-06 20:42:13,047 INFO [trainer.py:765] (5/8) Epoch 28, batch 1400, train_loss[loss=2.781, NarTop10Accuracy=0.7612, over 6030.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7087, over 6020.95 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 20:42:43,171 INFO [trainer.py:765] (5/8) Epoch 28, batch 1500, train_loss[loss=3.556, NarTop10Accuracy=0.6101, over 6615.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7122, over 5944.91 frames. ], batch size: 50, lr: 3.04e-03 +2024-08-06 20:43:11,081 INFO [trainer.py:765] (5/8) Epoch 28, batch 1600, train_loss[loss=2.924, NarTop10Accuracy=0.7459, over 6906.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7115, over 5921.61 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 20:43:37,785 INFO [trainer.py:765] (5/8) Epoch 28, batch 1700, train_loss[loss=3.074, NarTop10Accuracy=0.7129, over 6594.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7105, over 5916.43 frames. ], batch size: 14, lr: 3.04e-03 +2024-08-06 20:44:04,326 INFO [trainer.py:765] (5/8) Epoch 28, batch 1800, train_loss[loss=3.078, NarTop10Accuracy=0.7102, over 7215.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7108, over 5983.80 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 20:44:30,757 INFO [trainer.py:765] (5/8) Epoch 28, batch 1900, train_loss[loss=3.085, NarTop10Accuracy=0.7014, over 6780.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7111, over 6019.82 frames. ], batch size: 51, lr: 3.03e-03 +2024-08-06 20:44:56,328 INFO [trainer.py:765] (5/8) Epoch 28, batch 2000, train_loss[loss=2.994, NarTop10Accuracy=0.729, over 5940.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7143, over 5995.33 frames. ], batch size: 51, lr: 3.03e-03 +2024-08-06 20:45:21,651 INFO [trainer.py:765] (5/8) Epoch 28, batch 2100, train_loss[loss=3.007, NarTop10Accuracy=0.7154, over 3981.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7145, over 5962.26 frames. ], batch size: 4, lr: 3.03e-03 +2024-08-06 20:45:47,076 INFO [trainer.py:765] (5/8) Epoch 28, batch 2200, train_loss[loss=3.026, NarTop10Accuracy=0.7318, over 7410.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7122, over 6003.41 frames. ], batch size: 32, lr: 3.03e-03 +2024-08-06 20:46:12,308 INFO [trainer.py:765] (5/8) Epoch 28, batch 2300, train_loss[loss=3.389, NarTop10Accuracy=0.6396, over 5706.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7083, over 6011.26 frames. ], batch size: 9, lr: 3.03e-03 +2024-08-06 20:46:36,807 INFO [trainer.py:765] (5/8) Epoch 28, batch 2400, train_loss[loss=2.922, NarTop10Accuracy=0.7474, over 5169.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7086, over 5799.48 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:46:48,595 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 20:46:56,604 INFO [trainer.py:811] (5/8) Epoch 28, validation: loss=2.931, NarTop10Accuracy=0.7396, over 1905321.00 frames. +2024-08-06 20:46:56,605 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 20:46:57,082 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.201e+02 2.381e+02 2.551e+02 4.872e+02, threshold=4.762e+02, percent-clipped=0.1 +2024-08-06 20:47:08,293 INFO [trainer.py:765] (5/8) Epoch 28, batch 2500, train_loss[loss=3.081, NarTop10Accuracy=0.7258, over 5349.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7131, over 5488.33 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:47:28,163 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 20:48:21,053 INFO [trainer.py:765] (5/8) Epoch 29, batch 100, train_loss[loss=2.909, NarTop10Accuracy=0.7379, over 7149.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7097, over 2368.64 frames. ], batch size: 31, lr: 2.96e-03 +2024-08-06 20:48:53,405 INFO [trainer.py:765] (5/8) Epoch 29, batch 200, train_loss[loss=3.39, NarTop10Accuracy=0.6467, over 6660.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7161, over 3856.85 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 20:49:27,476 INFO [trainer.py:765] (5/8) Epoch 29, batch 300, train_loss[loss=3.238, NarTop10Accuracy=0.6749, over 7317.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7179, over 4668.97 frames. ], batch size: 23, lr: 2.96e-03 +2024-08-06 20:49:56,052 INFO [trainer.py:765] (5/8) Epoch 29, batch 400, train_loss[loss=3.389, NarTop10Accuracy=0.6525, over 5220.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7126, over 5110.89 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 20:50:29,435 INFO [trainer.py:765] (5/8) Epoch 29, batch 500, train_loss[loss=3.184, NarTop10Accuracy=0.6877, over 6084.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7153, over 5394.92 frames. ], batch size: 11, lr: 2.96e-03 +2024-08-06 20:51:00,023 INFO [trainer.py:765] (5/8) Epoch 29, batch 600, train_loss[loss=2.95, NarTop10Accuracy=0.7416, over 5787.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7151, over 5643.78 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 20:51:35,677 INFO [trainer.py:765] (5/8) Epoch 29, batch 700, train_loss[loss=2.748, NarTop10Accuracy=0.777, over 5058.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7108, over 5740.25 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 20:52:10,724 INFO [trainer.py:765] (5/8) Epoch 29, batch 800, train_loss[loss=2.709, NarTop10Accuracy=0.7831, over 5085.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7121, over 5792.18 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 20:52:40,742 INFO [trainer.py:765] (5/8) Epoch 29, batch 900, train_loss[loss=2.747, NarTop10Accuracy=0.7771, over 6657.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7105, over 5805.75 frames. ], batch size: 14, lr: 2.95e-03 +2024-08-06 20:53:16,861 INFO [trainer.py:765] (5/8) Epoch 29, batch 1000, train_loss[loss=3.309, NarTop10Accuracy=0.6538, over 6717.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7087, over 5913.58 frames. ], batch size: 14, lr: 2.95e-03 +2024-08-06 20:53:52,902 INFO [trainer.py:765] (5/8) Epoch 29, batch 1100, train_loss[loss=3.226, NarTop10Accuracy=0.6751, over 6837.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.708, over 5945.46 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 20:54:23,690 INFO [trainer.py:765] (5/8) Epoch 29, batch 1200, train_loss[loss=3.103, NarTop10Accuracy=0.7066, over 7155.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7084, over 5923.74 frames. ], batch size: 31, lr: 2.94e-03 +2024-08-06 20:55:01,428 INFO [trainer.py:765] (5/8) Epoch 29, batch 1300, train_loss[loss=2.806, NarTop10Accuracy=0.7564, over 5127.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7093, over 5978.63 frames. ], batch size: 6, lr: 2.94e-03 +2024-08-06 20:55:32,557 INFO [trainer.py:765] (5/8) Epoch 29, batch 1400, train_loss[loss=3.477, NarTop10Accuracy=0.6308, over 6072.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7082, over 6005.09 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 20:56:04,359 INFO [trainer.py:765] (5/8) Epoch 29, batch 1500, train_loss[loss=3.369, NarTop10Accuracy=0.6489, over 5988.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7091, over 5970.45 frames. ], batch size: 52, lr: 2.94e-03 +2024-08-06 20:56:32,041 INFO [trainer.py:765] (5/8) Epoch 29, batch 1600, train_loss[loss=3.321, NarTop10Accuracy=0.6666, over 6966.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7073, over 5944.09 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:56:58,639 INFO [trainer.py:765] (5/8) Epoch 29, batch 1700, train_loss[loss=2.885, NarTop10Accuracy=0.7479, over 6654.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7078, over 5922.83 frames. ], batch size: 14, lr: 2.93e-03 +2024-08-06 20:57:25,000 INFO [trainer.py:765] (5/8) Epoch 29, batch 1800, train_loss[loss=3.132, NarTop10Accuracy=0.7001, over 7143.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7105, over 5985.98 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:57:44,621 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 20:57:52,863 INFO [trainer.py:811] (5/8) Epoch 29, validation: loss=2.897, NarTop10Accuracy=0.7458, over 1905321.00 frames. +2024-08-06 20:57:52,864 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 20:57:53,424 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.206e+02 2.380e+02 2.554e+02 4.464e+02, threshold=4.759e+02, percent-clipped=0.0 +2024-08-06 20:57:59,757 INFO [trainer.py:765] (5/8) Epoch 29, batch 1900, train_loss[loss=3.036, NarTop10Accuracy=0.7201, over 5727.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7073, over 6026.70 frames. ], batch size: 50, lr: 2.93e-03 +2024-08-06 20:58:25,309 INFO [trainer.py:765] (5/8) Epoch 29, batch 2000, train_loss[loss=3.61, NarTop10Accuracy=0.6043, over 5862.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7076, over 6006.10 frames. ], batch size: 51, lr: 2.93e-03 +2024-08-06 20:58:50,630 INFO [trainer.py:765] (5/8) Epoch 29, batch 2100, train_loss[loss=2.73, NarTop10Accuracy=0.76, over 3867.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7083, over 5977.52 frames. ], batch size: 4, lr: 2.92e-03 +2024-08-06 20:59:15,807 INFO [trainer.py:765] (5/8) Epoch 29, batch 2200, train_loss[loss=2.924, NarTop10Accuracy=0.7409, over 7518.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7093, over 6005.60 frames. ], batch size: 31, lr: 2.92e-03 +2024-08-06 20:59:40,910 INFO [trainer.py:765] (5/8) Epoch 29, batch 2300, train_loss[loss=2.906, NarTop10Accuracy=0.7518, over 5781.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7063, over 6012.40 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 21:00:05,156 INFO [trainer.py:765] (5/8) Epoch 29, batch 2400, train_loss[loss=2.857, NarTop10Accuracy=0.7609, over 4956.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7093, over 5765.07 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 21:00:28,742 INFO [trainer.py:765] (5/8) Epoch 29, batch 2500, train_loss[loss=3.341, NarTop10Accuracy=0.649, over 5700.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7141, over 5484.48 frames. ], batch size: 8, lr: 2.92e-03 +2024-08-06 21:00:48,854 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 21:01:41,716 INFO [trainer.py:765] (5/8) Epoch 30, batch 100, train_loss[loss=2.817, NarTop10Accuracy=0.7614, over 7401.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7202, over 2358.96 frames. ], batch size: 31, lr: 2.86e-03 +2024-08-06 21:02:17,013 INFO [trainer.py:765] (5/8) Epoch 30, batch 200, train_loss[loss=2.869, NarTop10Accuracy=0.7502, over 6783.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.723, over 3849.59 frames. ], batch size: 17, lr: 2.86e-03 +2024-08-06 21:02:51,343 INFO [trainer.py:765] (5/8) Epoch 30, batch 300, train_loss[loss=2.833, NarTop10Accuracy=0.7473, over 7263.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7246, over 4660.06 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 21:03:21,642 INFO [trainer.py:765] (5/8) Epoch 30, batch 400, train_loss[loss=2.729, NarTop10Accuracy=0.7818, over 5112.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7203, over 5095.64 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 21:03:58,545 INFO [trainer.py:765] (5/8) Epoch 30, batch 500, train_loss[loss=3.26, NarTop10Accuracy=0.6639, over 6120.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7179, over 5385.91 frames. ], batch size: 11, lr: 2.86e-03 +2024-08-06 21:04:31,655 INFO [trainer.py:765] (5/8) Epoch 30, batch 600, train_loss[loss=2.968, NarTop10Accuracy=0.7181, over 5658.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7171, over 5660.02 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 21:05:03,525 INFO [trainer.py:765] (5/8) Epoch 30, batch 700, train_loss[loss=2.873, NarTop10Accuracy=0.7488, over 5151.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7195, over 5723.77 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 21:05:44,131 INFO [trainer.py:765] (5/8) Epoch 30, batch 800, train_loss[loss=2.983, NarTop10Accuracy=0.7313, over 5094.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7203, over 5771.57 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 21:06:14,843 INFO [trainer.py:765] (5/8) Epoch 30, batch 900, train_loss[loss=2.938, NarTop10Accuracy=0.7407, over 6186.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.719, over 5794.12 frames. ], batch size: 13, lr: 2.85e-03 +2024-08-06 21:06:48,952 INFO [trainer.py:765] (5/8) Epoch 30, batch 1000, train_loss[loss=2.872, NarTop10Accuracy=0.755, over 6093.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7128, over 5866.00 frames. ], batch size: 13, lr: 2.85e-03 +2024-08-06 21:07:25,936 INFO [trainer.py:765] (5/8) Epoch 30, batch 1100, train_loss[loss=3.378, NarTop10Accuracy=0.6411, over 6798.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7104, over 5905.68 frames. ], batch size: 17, lr: 2.84e-03 +2024-08-06 21:08:02,380 INFO [trainer.py:765] (5/8) Epoch 30, batch 1200, train_loss[loss=3.041, NarTop10Accuracy=0.71, over 7365.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7114, over 5896.99 frames. ], batch size: 31, lr: 2.84e-03 +2024-08-06 21:08:35,371 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 21:08:43,457 INFO [trainer.py:811] (5/8) Epoch 30, validation: loss=2.93, NarTop10Accuracy=0.7391, over 1905321.00 frames. +2024-08-06 21:08:43,458 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 21:08:44,197 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.209e+02 2.377e+02 2.553e+02 3.956e+02, threshold=4.754e+02, percent-clipped=0.0 +2024-08-06 21:08:44,202 INFO [trainer.py:765] (5/8) Epoch 30, batch 1300, train_loss[loss=3.144, NarTop10Accuracy=0.6994, over 4926.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7125, over 5975.32 frames. ], batch size: 6, lr: 2.84e-03 +2024-08-06 21:09:22,396 INFO [trainer.py:765] (5/8) Epoch 30, batch 1400, train_loss[loss=2.845, NarTop10Accuracy=0.7573, over 6096.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7108, over 6005.30 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 21:09:52,372 INFO [trainer.py:765] (5/8) Epoch 30, batch 1500, train_loss[loss=3.045, NarTop10Accuracy=0.722, over 5961.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7125, over 5949.63 frames. ], batch size: 50, lr: 2.84e-03 +2024-08-06 21:10:20,083 INFO [trainer.py:765] (5/8) Epoch 30, batch 1600, train_loss[loss=2.956, NarTop10Accuracy=0.7274, over 6951.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.712, over 5929.76 frames. ], batch size: 22, lr: 2.84e-03 +2024-08-06 21:10:46,680 INFO [trainer.py:765] (5/8) Epoch 30, batch 1700, train_loss[loss=3.086, NarTop10Accuracy=0.7011, over 6183.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.71, over 5912.32 frames. ], batch size: 13, lr: 2.83e-03 +2024-08-06 21:11:13,058 INFO [trainer.py:765] (5/8) Epoch 30, batch 1800, train_loss[loss=3.387, NarTop10Accuracy=0.6406, over 6834.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7102, over 5981.57 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 21:11:39,417 INFO [trainer.py:765] (5/8) Epoch 30, batch 1900, train_loss[loss=3.001, NarTop10Accuracy=0.7314, over 5757.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.709, over 6029.33 frames. ], batch size: 51, lr: 2.83e-03 +2024-08-06 21:12:04,825 INFO [trainer.py:765] (5/8) Epoch 30, batch 2000, train_loss[loss=3.338, NarTop10Accuracy=0.6474, over 5703.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7127, over 6003.86 frames. ], batch size: 51, lr: 2.83e-03 +2024-08-06 21:12:30,087 INFO [trainer.py:765] (5/8) Epoch 30, batch 2100, train_loss[loss=2.817, NarTop10Accuracy=0.7641, over 4770.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7115, over 5985.10 frames. ], batch size: 5, lr: 2.83e-03 +2024-08-06 21:12:55,225 INFO [trainer.py:765] (5/8) Epoch 30, batch 2200, train_loss[loss=2.863, NarTop10Accuracy=0.7563, over 7200.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7119, over 6023.75 frames. ], batch size: 31, lr: 2.82e-03 +2024-08-06 21:13:20,296 INFO [trainer.py:765] (5/8) Epoch 30, batch 2300, train_loss[loss=2.809, NarTop10Accuracy=0.7676, over 5811.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7084, over 6010.19 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 21:13:44,490 INFO [trainer.py:765] (5/8) Epoch 30, batch 2400, train_loss[loss=2.567, NarTop10Accuracy=0.8046, over 5109.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.716, over 5771.15 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:07,986 INFO [trainer.py:765] (5/8) Epoch 30, batch 2500, train_loss[loss=2.844, NarTop10Accuracy=0.7499, over 5142.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7175, over 5457.33 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:27,902 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 21:15:23,633 INFO [trainer.py:765] (5/8) Epoch 31, batch 100, train_loss[loss=3.349, NarTop10Accuracy=0.653, over 7323.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7149, over 2362.03 frames. ], batch size: 31, lr: 2.77e-03 +2024-08-06 21:15:55,127 INFO [trainer.py:765] (5/8) Epoch 31, batch 200, train_loss[loss=2.974, NarTop10Accuracy=0.7365, over 6831.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7188, over 3860.67 frames. ], batch size: 17, lr: 2.77e-03 +2024-08-06 21:16:31,215 INFO [trainer.py:765] (5/8) Epoch 31, batch 300, train_loss[loss=2.94, NarTop10Accuracy=0.7372, over 7218.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7187, over 4669.14 frames. ], batch size: 22, lr: 2.77e-03 +2024-08-06 21:17:01,625 INFO [trainer.py:765] (5/8) Epoch 31, batch 400, train_loss[loss=3.002, NarTop10Accuracy=0.7207, over 5148.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7163, over 5108.89 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 21:17:35,724 INFO [trainer.py:765] (5/8) Epoch 31, batch 500, train_loss[loss=2.84, NarTop10Accuracy=0.7568, over 6135.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7169, over 5385.23 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 21:18:07,084 INFO [trainer.py:765] (5/8) Epoch 31, batch 600, train_loss[loss=2.662, NarTop10Accuracy=0.7941, over 5829.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7138, over 5655.44 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 21:18:44,610 INFO [trainer.py:765] (5/8) Epoch 31, batch 700, train_loss[loss=3.401, NarTop10Accuracy=0.6533, over 5004.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7129, over 5721.53 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 21:18:51,095 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 21:18:59,276 INFO [trainer.py:811] (5/8) Epoch 31, validation: loss=2.984, NarTop10Accuracy=0.7279, over 1905321.00 frames. +2024-08-06 21:18:59,277 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 21:18:59,986 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.222e+02 2.378e+02 2.557e+02 4.306e+02, threshold=4.755e+02, percent-clipped=0.0 +2024-08-06 21:19:24,245 INFO [trainer.py:765] (5/8) Epoch 31, batch 800, train_loss[loss=2.725, NarTop10Accuracy=0.783, over 5088.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7158, over 5781.80 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 21:19:56,950 INFO [trainer.py:765] (5/8) Epoch 31, batch 900, train_loss[loss=3.217, NarTop10Accuracy=0.6801, over 6240.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7157, over 5800.59 frames. ], batch size: 13, lr: 2.76e-03 +2024-08-06 21:20:33,310 INFO [trainer.py:765] (5/8) Epoch 31, batch 1000, train_loss[loss=3.412, NarTop10Accuracy=0.6299, over 6294.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7173, over 5895.42 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 21:21:10,215 INFO [trainer.py:765] (5/8) Epoch 31, batch 1100, train_loss[loss=3.246, NarTop10Accuracy=0.6697, over 6831.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7157, over 5931.89 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 21:21:41,119 INFO [trainer.py:765] (5/8) Epoch 31, batch 1200, train_loss[loss=2.902, NarTop10Accuracy=0.7419, over 7095.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7184, over 5922.05 frames. ], batch size: 31, lr: 2.75e-03 +2024-08-06 21:22:19,741 INFO [trainer.py:765] (5/8) Epoch 31, batch 1300, train_loss[loss=2.769, NarTop10Accuracy=0.7689, over 5067.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7138, over 5993.76 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 21:22:53,533 INFO [trainer.py:765] (5/8) Epoch 31, batch 1400, train_loss[loss=2.894, NarTop10Accuracy=0.7446, over 6207.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7113, over 6026.93 frames. ], batch size: 11, lr: 2.75e-03 +2024-08-06 21:23:21,269 INFO [trainer.py:765] (5/8) Epoch 31, batch 1500, train_loss[loss=3.361, NarTop10Accuracy=0.6547, over 6129.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7136, over 5947.18 frames. ], batch size: 52, lr: 2.74e-03 +2024-08-06 21:23:49,004 INFO [trainer.py:765] (5/8) Epoch 31, batch 1600, train_loss[loss=3.295, NarTop10Accuracy=0.6595, over 7125.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7146, over 5921.79 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 21:24:15,512 INFO [trainer.py:765] (5/8) Epoch 31, batch 1700, train_loss[loss=3.339, NarTop10Accuracy=0.6543, over 6264.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7138, over 5906.38 frames. ], batch size: 13, lr: 2.74e-03 +2024-08-06 21:24:41,995 INFO [trainer.py:765] (5/8) Epoch 31, batch 1800, train_loss[loss=2.838, NarTop10Accuracy=0.7647, over 7254.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7168, over 5977.31 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 21:25:08,357 INFO [trainer.py:765] (5/8) Epoch 31, batch 1900, train_loss[loss=3.245, NarTop10Accuracy=0.6776, over 6003.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7138, over 6022.64 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 21:25:33,773 INFO [trainer.py:765] (5/8) Epoch 31, batch 2000, train_loss[loss=3.017, NarTop10Accuracy=0.7257, over 6033.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7145, over 5996.08 frames. ], batch size: 51, lr: 2.74e-03 +2024-08-06 21:25:59,107 INFO [trainer.py:765] (5/8) Epoch 31, batch 2100, train_loss[loss=2.649, NarTop10Accuracy=0.8035, over 3933.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7162, over 5969.82 frames. ], batch size: 4, lr: 2.73e-03 +2024-08-06 21:26:24,238 INFO [trainer.py:765] (5/8) Epoch 31, batch 2200, train_loss[loss=2.959, NarTop10Accuracy=0.7407, over 7200.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7197, over 6002.24 frames. ], batch size: 31, lr: 2.73e-03 +2024-08-06 21:26:49,322 INFO [trainer.py:765] (5/8) Epoch 31, batch 2300, train_loss[loss=2.743, NarTop10Accuracy=0.7776, over 5532.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7152, over 6023.28 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 21:27:13,607 INFO [trainer.py:765] (5/8) Epoch 31, batch 2400, train_loss[loss=2.851, NarTop10Accuracy=0.7502, over 5226.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7156, over 5784.57 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 21:27:37,027 INFO [trainer.py:765] (5/8) Epoch 31, batch 2500, train_loss[loss=2.952, NarTop10Accuracy=0.7377, over 5040.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7183, over 5479.18 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 21:27:57,461 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 21:28:49,392 INFO [trainer.py:765] (5/8) Epoch 32, batch 100, train_loss[loss=2.892, NarTop10Accuracy=0.7463, over 7350.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7136, over 2359.27 frames. ], batch size: 31, lr: 2.68e-03 +2024-08-06 21:29:08,160 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 21:29:16,392 INFO [trainer.py:811] (5/8) Epoch 32, validation: loss=2.919, NarTop10Accuracy=0.7409, over 1905321.00 frames. +2024-08-06 21:29:16,393 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 21:29:16,939 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.842e+02 2.253e+02 2.413e+02 2.600e+02 5.680e+02, threshold=4.826e+02, percent-clipped=0.1 +2024-08-06 21:29:32,273 INFO [trainer.py:765] (5/8) Epoch 32, batch 200, train_loss[loss=3.317, NarTop10Accuracy=0.6643, over 6831.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.712, over 3856.83 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 21:30:05,279 INFO [trainer.py:765] (5/8) Epoch 32, batch 300, train_loss[loss=2.986, NarTop10Accuracy=0.7268, over 7107.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7151, over 4660.71 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 21:30:34,103 INFO [trainer.py:765] (5/8) Epoch 32, batch 400, train_loss[loss=2.683, NarTop10Accuracy=0.7959, over 5079.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7132, over 5107.55 frames. ], batch size: 7, lr: 2.68e-03 +2024-08-06 21:31:13,531 INFO [trainer.py:765] (5/8) Epoch 32, batch 500, train_loss[loss=2.92, NarTop10Accuracy=0.7454, over 6069.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7152, over 5382.61 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 21:31:42,487 INFO [trainer.py:765] (5/8) Epoch 32, batch 600, train_loss[loss=3.182, NarTop10Accuracy=0.6845, over 5745.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.716, over 5651.21 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 21:32:17,029 INFO [trainer.py:765] (5/8) Epoch 32, batch 700, train_loss[loss=2.743, NarTop10Accuracy=0.778, over 4251.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7176, over 5709.36 frames. ], batch size: 5, lr: 2.67e-03 +2024-08-06 21:33:00,647 INFO [trainer.py:765] (5/8) Epoch 32, batch 800, train_loss[loss=3.325, NarTop10Accuracy=0.6645, over 4320.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7169, over 5787.08 frames. ], batch size: 5, lr: 2.67e-03 +2024-08-06 21:33:28,992 INFO [trainer.py:765] (5/8) Epoch 32, batch 900, train_loss[loss=2.762, NarTop10Accuracy=0.7744, over 6288.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.719, over 5806.66 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 21:34:04,049 INFO [trainer.py:765] (5/8) Epoch 32, batch 1000, train_loss[loss=3.168, NarTop10Accuracy=0.6892, over 6096.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7167, over 5903.61 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 21:34:46,675 INFO [trainer.py:765] (5/8) Epoch 32, batch 1100, train_loss[loss=3.144, NarTop10Accuracy=0.6955, over 6831.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7157, over 5935.99 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 21:35:18,171 INFO [trainer.py:765] (5/8) Epoch 32, batch 1200, train_loss[loss=3.181, NarTop10Accuracy=0.6864, over 7338.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7138, over 5944.49 frames. ], batch size: 32, lr: 2.66e-03 +2024-08-06 21:35:52,801 INFO [trainer.py:765] (5/8) Epoch 32, batch 1300, train_loss[loss=3.131, NarTop10Accuracy=0.6972, over 5013.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7134, over 6013.02 frames. ], batch size: 6, lr: 2.66e-03 +2024-08-06 21:36:29,479 INFO [trainer.py:765] (5/8) Epoch 32, batch 1400, train_loss[loss=3.371, NarTop10Accuracy=0.6457, over 5997.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7138, over 6029.48 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 21:37:04,734 INFO [trainer.py:765] (5/8) Epoch 32, batch 1500, train_loss[loss=3.423, NarTop10Accuracy=0.6404, over 6189.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7133, over 5961.21 frames. ], batch size: 50, lr: 2.66e-03 +2024-08-06 21:37:32,522 INFO [trainer.py:765] (5/8) Epoch 32, batch 1600, train_loss[loss=3.092, NarTop10Accuracy=0.7064, over 6915.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7135, over 5944.07 frames. ], batch size: 22, lr: 2.66e-03 +2024-08-06 21:37:59,160 INFO [trainer.py:765] (5/8) Epoch 32, batch 1700, train_loss[loss=3.044, NarTop10Accuracy=0.7164, over 6297.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7141, over 5933.97 frames. ], batch size: 13, lr: 2.65e-03 +2024-08-06 21:38:25,703 INFO [trainer.py:765] (5/8) Epoch 32, batch 1800, train_loss[loss=3.09, NarTop10Accuracy=0.7074, over 7155.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.714, over 5987.84 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 21:38:52,170 INFO [trainer.py:765] (5/8) Epoch 32, batch 1900, train_loss[loss=3.056, NarTop10Accuracy=0.7155, over 6729.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7103, over 6038.25 frames. ], batch size: 51, lr: 2.65e-03 +2024-08-06 21:39:17,769 INFO [trainer.py:765] (5/8) Epoch 32, batch 2000, train_loss[loss=3.474, NarTop10Accuracy=0.6334, over 6258.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.714, over 5982.98 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 21:39:43,178 INFO [trainer.py:765] (5/8) Epoch 32, batch 2100, train_loss[loss=2.696, NarTop10Accuracy=0.7778, over 4770.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.715, over 5974.50 frames. ], batch size: 5, lr: 2.65e-03 +2024-08-06 21:39:54,783 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 21:40:02,941 INFO [trainer.py:811] (5/8) Epoch 32, validation: loss=2.886, NarTop10Accuracy=0.7482, over 1905321.00 frames. +2024-08-06 21:40:02,942 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 21:40:03,423 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.874e+02 2.278e+02 2.449e+02 2.609e+02 8.207e+02, threshold=4.898e+02, percent-clipped=0.3 +2024-08-06 21:40:16,629 INFO [trainer.py:765] (5/8) Epoch 32, batch 2200, train_loss[loss=3.1, NarTop10Accuracy=0.7107, over 7248.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7157, over 6011.41 frames. ], batch size: 32, lr: 2.65e-03 +2024-08-06 21:40:41,717 INFO [trainer.py:765] (5/8) Epoch 32, batch 2300, train_loss[loss=3.282, NarTop10Accuracy=0.6657, over 5775.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7102, over 6026.38 frames. ], batch size: 9, lr: 2.65e-03 +2024-08-06 21:41:06,072 INFO [trainer.py:765] (5/8) Epoch 32, batch 2400, train_loss[loss=3.255, NarTop10Accuracy=0.6667, over 5133.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7144, over 5774.55 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:29,538 INFO [trainer.py:765] (5/8) Epoch 32, batch 2500, train_loss[loss=2.774, NarTop10Accuracy=0.7773, over 4968.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7205, over 5462.29 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:49,517 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 21:42:47,616 INFO [trainer.py:765] (5/8) Epoch 33, batch 100, train_loss[loss=3.075, NarTop10Accuracy=0.705, over 7440.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7231, over 2365.65 frames. ], batch size: 32, lr: 2.60e-03 +2024-08-06 21:43:22,368 INFO [trainer.py:765] (5/8) Epoch 33, batch 200, train_loss[loss=2.764, NarTop10Accuracy=0.778, over 6906.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7208, over 3860.29 frames. ], batch size: 17, lr: 2.60e-03 +2024-08-06 21:43:56,513 INFO [trainer.py:765] (5/8) Epoch 33, batch 300, train_loss[loss=3.351, NarTop10Accuracy=0.654, over 7170.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7169, over 4652.80 frames. ], batch size: 22, lr: 2.60e-03 +2024-08-06 21:44:30,316 INFO [trainer.py:765] (5/8) Epoch 33, batch 400, train_loss[loss=2.828, NarTop10Accuracy=0.7578, over 5055.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7167, over 5110.88 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 21:45:02,870 INFO [trainer.py:765] (5/8) Epoch 33, batch 500, train_loss[loss=2.715, NarTop10Accuracy=0.7818, over 6090.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7205, over 5378.54 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 21:45:36,227 INFO [trainer.py:765] (5/8) Epoch 33, batch 600, train_loss[loss=3.434, NarTop10Accuracy=0.6351, over 5775.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7148, over 5650.07 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 21:46:11,317 INFO [trainer.py:765] (5/8) Epoch 33, batch 700, train_loss[loss=2.75, NarTop10Accuracy=0.7826, over 5076.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7144, over 5724.06 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:46:46,169 INFO [trainer.py:765] (5/8) Epoch 33, batch 800, train_loss[loss=2.83, NarTop10Accuracy=0.7667, over 4353.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7155, over 5778.71 frames. ], batch size: 5, lr: 2.59e-03 +2024-08-06 21:47:18,908 INFO [trainer.py:765] (5/8) Epoch 33, batch 900, train_loss[loss=3.177, NarTop10Accuracy=0.6941, over 6657.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7146, over 5799.25 frames. ], batch size: 14, lr: 2.59e-03 +2024-08-06 21:47:57,316 INFO [trainer.py:765] (5/8) Epoch 33, batch 1000, train_loss[loss=2.909, NarTop10Accuracy=0.7409, over 6708.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7134, over 5900.82 frames. ], batch size: 14, lr: 2.58e-03 +2024-08-06 21:48:30,908 INFO [trainer.py:765] (5/8) Epoch 33, batch 1100, train_loss[loss=2.967, NarTop10Accuracy=0.7349, over 6855.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7103, over 5934.52 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 21:49:06,660 INFO [trainer.py:765] (5/8) Epoch 33, batch 1200, train_loss[loss=2.855, NarTop10Accuracy=0.7601, over 7437.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7121, over 5927.88 frames. ], batch size: 31, lr: 2.58e-03 +2024-08-06 21:49:42,816 INFO [trainer.py:765] (5/8) Epoch 33, batch 1300, train_loss[loss=2.975, NarTop10Accuracy=0.7364, over 5205.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7148, over 5996.44 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 21:50:17,310 INFO [trainer.py:765] (5/8) Epoch 33, batch 1400, train_loss[loss=3.432, NarTop10Accuracy=0.6375, over 6120.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7142, over 6001.97 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 21:50:45,370 INFO [trainer.py:765] (5/8) Epoch 33, batch 1500, train_loss[loss=3.023, NarTop10Accuracy=0.7273, over 6066.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7148, over 5954.47 frames. ], batch size: 51, lr: 2.58e-03 +2024-08-06 21:51:04,607 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 21:51:12,661 INFO [trainer.py:811] (5/8) Epoch 33, validation: loss=2.938, NarTop10Accuracy=0.7372, over 1905321.00 frames. +2024-08-06 21:51:12,662 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 21:51:13,181 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.834e+02 2.250e+02 2.409e+02 2.586e+02 3.975e+02, threshold=4.818e+02, percent-clipped=0.0 +2024-08-06 21:51:21,262 INFO [trainer.py:765] (5/8) Epoch 33, batch 1600, train_loss[loss=3.178, NarTop10Accuracy=0.6843, over 7137.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.716, over 5939.75 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:51:47,923 INFO [trainer.py:765] (5/8) Epoch 33, batch 1700, train_loss[loss=2.87, NarTop10Accuracy=0.7533, over 6141.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7147, over 5915.48 frames. ], batch size: 13, lr: 2.57e-03 +2024-08-06 21:52:14,392 INFO [trainer.py:765] (5/8) Epoch 33, batch 1800, train_loss[loss=2.761, NarTop10Accuracy=0.7728, over 7275.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7163, over 5991.45 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:52:40,856 INFO [trainer.py:765] (5/8) Epoch 33, batch 1900, train_loss[loss=3.486, NarTop10Accuracy=0.6271, over 6027.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7118, over 6022.38 frames. ], batch size: 51, lr: 2.57e-03 +2024-08-06 21:53:06,353 INFO [trainer.py:765] (5/8) Epoch 33, batch 2000, train_loss[loss=3.491, NarTop10Accuracy=0.6301, over 6225.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7175, over 6016.34 frames. ], batch size: 50, lr: 2.57e-03 +2024-08-06 21:53:31,659 INFO [trainer.py:765] (5/8) Epoch 33, batch 2100, train_loss[loss=3.331, NarTop10Accuracy=0.6522, over 4797.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7154, over 5990.52 frames. ], batch size: 5, lr: 2.57e-03 +2024-08-06 21:53:56,891 INFO [trainer.py:765] (5/8) Epoch 33, batch 2200, train_loss[loss=3.462, NarTop10Accuracy=0.6232, over 7350.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7129, over 6008.84 frames. ], batch size: 31, lr: 2.57e-03 +2024-08-06 21:54:21,990 INFO [trainer.py:765] (5/8) Epoch 33, batch 2300, train_loss[loss=2.784, NarTop10Accuracy=0.7791, over 5628.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7134, over 6025.27 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 21:54:46,430 INFO [trainer.py:765] (5/8) Epoch 33, batch 2400, train_loss[loss=2.801, NarTop10Accuracy=0.7733, over 5040.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7176, over 5799.43 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:09,862 INFO [trainer.py:765] (5/8) Epoch 33, batch 2500, train_loss[loss=2.723, NarTop10Accuracy=0.7789, over 5058.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7213, over 5487.41 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:29,874 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 21:56:24,722 INFO [trainer.py:765] (5/8) Epoch 34, batch 100, train_loss[loss=3.48, NarTop10Accuracy=0.6308, over 7377.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7163, over 2352.11 frames. ], batch size: 31, lr: 2.52e-03 +2024-08-06 21:56:55,614 INFO [trainer.py:765] (5/8) Epoch 34, batch 200, train_loss[loss=3.24, NarTop10Accuracy=0.6773, over 6930.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7225, over 3843.06 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 21:57:31,777 INFO [trainer.py:765] (5/8) Epoch 34, batch 300, train_loss[loss=2.846, NarTop10Accuracy=0.7619, over 7353.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7211, over 4654.84 frames. ], batch size: 23, lr: 2.52e-03 +2024-08-06 21:58:02,724 INFO [trainer.py:765] (5/8) Epoch 34, batch 400, train_loss[loss=3.094, NarTop10Accuracy=0.7051, over 5244.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7231, over 5098.16 frames. ], batch size: 7, lr: 2.52e-03 +2024-08-06 21:58:34,690 INFO [trainer.py:765] (5/8) Epoch 34, batch 500, train_loss[loss=3.316, NarTop10Accuracy=0.6591, over 6054.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7208, over 5383.76 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 21:59:09,616 INFO [trainer.py:765] (5/8) Epoch 34, batch 600, train_loss[loss=2.739, NarTop10Accuracy=0.7683, over 5754.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7192, over 5655.64 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 21:59:46,057 INFO [trainer.py:765] (5/8) Epoch 34, batch 700, train_loss[loss=2.856, NarTop10Accuracy=0.7394, over 5019.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7178, over 5724.76 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:17,575 INFO [trainer.py:765] (5/8) Epoch 34, batch 800, train_loss[loss=2.869, NarTop10Accuracy=0.7538, over 5133.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7196, over 5773.41 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:49,874 INFO [trainer.py:765] (5/8) Epoch 34, batch 900, train_loss[loss=2.873, NarTop10Accuracy=0.7495, over 6300.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7197, over 5786.23 frames. ], batch size: 13, lr: 2.51e-03 +2024-08-06 22:01:25,339 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 22:01:33,386 INFO [trainer.py:811] (5/8) Epoch 34, validation: loss=2.9, NarTop10Accuracy=0.7444, over 1905321.00 frames. +2024-08-06 22:01:33,387 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 22:01:34,091 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.259e+02 2.434e+02 2.615e+02 5.125e+02, threshold=4.868e+02, percent-clipped=0.1 +2024-08-06 22:01:35,624 INFO [trainer.py:765] (5/8) Epoch 34, batch 1000, train_loss[loss=3.28, NarTop10Accuracy=0.6589, over 6189.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7174, over 5893.81 frames. ], batch size: 13, lr: 2.51e-03 +2024-08-06 22:02:10,829 INFO [trainer.py:765] (5/8) Epoch 34, batch 1100, train_loss[loss=3.254, NarTop10Accuracy=0.6706, over 6870.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7165, over 5927.68 frames. ], batch size: 17, lr: 2.51e-03 +2024-08-06 22:02:46,786 INFO [trainer.py:765] (5/8) Epoch 34, batch 1200, train_loss[loss=2.772, NarTop10Accuracy=0.7662, over 7422.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7175, over 5935.20 frames. ], batch size: 31, lr: 2.50e-03 +2024-08-06 22:03:20,813 INFO [trainer.py:765] (5/8) Epoch 34, batch 1300, train_loss[loss=2.856, NarTop10Accuracy=0.7602, over 5166.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7169, over 6001.61 frames. ], batch size: 6, lr: 2.50e-03 +2024-08-06 22:03:52,949 INFO [trainer.py:765] (5/8) Epoch 34, batch 1400, train_loss[loss=3.247, NarTop10Accuracy=0.6778, over 6009.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7161, over 6031.21 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 22:04:20,822 INFO [trainer.py:765] (5/8) Epoch 34, batch 1500, train_loss[loss=3.079, NarTop10Accuracy=0.7085, over 6330.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7163, over 5967.17 frames. ], batch size: 50, lr: 2.50e-03 +2024-08-06 22:04:48,599 INFO [trainer.py:765] (5/8) Epoch 34, batch 1600, train_loss[loss=3.04, NarTop10Accuracy=0.7177, over 7092.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7155, over 5936.69 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 22:05:15,241 INFO [trainer.py:765] (5/8) Epoch 34, batch 1700, train_loss[loss=3.039, NarTop10Accuracy=0.7155, over 6771.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7174, over 5919.90 frames. ], batch size: 14, lr: 2.50e-03 +2024-08-06 22:05:41,720 INFO [trainer.py:765] (5/8) Epoch 34, batch 1800, train_loss[loss=3.216, NarTop10Accuracy=0.6749, over 7119.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.715, over 5981.43 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 22:06:08,206 INFO [trainer.py:765] (5/8) Epoch 34, batch 1900, train_loss[loss=3.057, NarTop10Accuracy=0.7114, over 5814.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7125, over 6029.19 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 22:06:33,769 INFO [trainer.py:765] (5/8) Epoch 34, batch 2000, train_loss[loss=3.097, NarTop10Accuracy=0.7093, over 6567.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7148, over 6023.48 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 22:06:59,125 INFO [trainer.py:765] (5/8) Epoch 34, batch 2100, train_loss[loss=3.111, NarTop10Accuracy=0.6811, over 4731.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7123, over 5996.48 frames. ], batch size: 5, lr: 2.49e-03 +2024-08-06 22:07:24,398 INFO [trainer.py:765] (5/8) Epoch 34, batch 2200, train_loss[loss=2.944, NarTop10Accuracy=0.7428, over 7095.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7123, over 6013.87 frames. ], batch size: 31, lr: 2.49e-03 +2024-08-06 22:07:49,535 INFO [trainer.py:765] (5/8) Epoch 34, batch 2300, train_loss[loss=2.765, NarTop10Accuracy=0.7751, over 5628.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.712, over 6023.42 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 22:08:14,059 INFO [trainer.py:765] (5/8) Epoch 34, batch 2400, train_loss[loss=3.338, NarTop10Accuracy=0.6456, over 5160.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7131, over 5757.33 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:37,648 INFO [trainer.py:765] (5/8) Epoch 34, batch 2500, train_loss[loss=2.926, NarTop10Accuracy=0.7436, over 5187.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7199, over 5470.01 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:57,621 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 22:09:52,640 INFO [trainer.py:765] (5/8) Epoch 35, batch 100, train_loss[loss=2.984, NarTop10Accuracy=0.7278, over 7410.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7154, over 2366.74 frames. ], batch size: 32, lr: 2.45e-03 +2024-08-06 22:10:29,697 INFO [trainer.py:765] (5/8) Epoch 35, batch 200, train_loss[loss=3.155, NarTop10Accuracy=0.694, over 6837.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7139, over 3862.98 frames. ], batch size: 17, lr: 2.45e-03 +2024-08-06 22:11:04,943 INFO [trainer.py:765] (5/8) Epoch 35, batch 300, train_loss[loss=2.805, NarTop10Accuracy=0.7667, over 7266.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7184, over 4667.70 frames. ], batch size: 22, lr: 2.44e-03 +2024-08-06 22:11:35,333 INFO [trainer.py:765] (5/8) Epoch 35, batch 400, train_loss[loss=2.969, NarTop10Accuracy=0.735, over 5103.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7183, over 5113.23 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 22:11:40,048 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 22:11:48,129 INFO [trainer.py:811] (5/8) Epoch 35, validation: loss=2.84, NarTop10Accuracy=0.7576, over 1905321.00 frames. +2024-08-06 22:11:48,129 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 22:11:48,702 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.275e+02 2.426e+02 2.615e+02 4.095e+02, threshold=4.852e+02, percent-clipped=0.0 +2024-08-06 22:12:17,723 INFO [trainer.py:765] (5/8) Epoch 35, batch 500, train_loss[loss=2.801, NarTop10Accuracy=0.7694, over 6096.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7202, over 5378.15 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 22:12:51,425 INFO [trainer.py:765] (5/8) Epoch 35, batch 600, train_loss[loss=3.299, NarTop10Accuracy=0.6653, over 5634.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7173, over 5644.58 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 22:13:24,941 INFO [trainer.py:765] (5/8) Epoch 35, batch 700, train_loss[loss=2.53, NarTop10Accuracy=0.8106, over 5094.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7175, over 5732.12 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 22:14:01,384 INFO [trainer.py:765] (5/8) Epoch 35, batch 800, train_loss[loss=2.84, NarTop10Accuracy=0.7587, over 5034.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7157, over 5791.32 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 22:14:34,373 INFO [trainer.py:765] (5/8) Epoch 35, batch 900, train_loss[loss=3.206, NarTop10Accuracy=0.6847, over 6282.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.719, over 5812.33 frames. ], batch size: 13, lr: 2.44e-03 +2024-08-06 22:15:09,372 INFO [trainer.py:765] (5/8) Epoch 35, batch 1000, train_loss[loss=2.854, NarTop10Accuracy=0.7452, over 6321.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7164, over 5905.95 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 22:15:48,495 INFO [trainer.py:765] (5/8) Epoch 35, batch 1100, train_loss[loss=3.12, NarTop10Accuracy=0.6996, over 7104.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7153, over 5940.42 frames. ], batch size: 18, lr: 2.43e-03 +2024-08-06 22:16:22,484 INFO [trainer.py:765] (5/8) Epoch 35, batch 1200, train_loss[loss=2.988, NarTop10Accuracy=0.728, over 7218.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7185, over 5931.00 frames. ], batch size: 31, lr: 2.43e-03 +2024-08-06 22:16:57,060 INFO [trainer.py:765] (5/8) Epoch 35, batch 1300, train_loss[loss=2.753, NarTop10Accuracy=0.784, over 5043.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7206, over 6005.88 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 22:17:31,061 INFO [trainer.py:765] (5/8) Epoch 35, batch 1400, train_loss[loss=3.03, NarTop10Accuracy=0.7129, over 6021.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7173, over 6028.11 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 22:18:03,062 INFO [trainer.py:765] (5/8) Epoch 35, batch 1500, train_loss[loss=3.014, NarTop10Accuracy=0.7209, over 6198.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7172, over 5955.68 frames. ], batch size: 50, lr: 2.43e-03 +2024-08-06 22:18:30,728 INFO [trainer.py:765] (5/8) Epoch 35, batch 1600, train_loss[loss=2.884, NarTop10Accuracy=0.7478, over 7170.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7159, over 5919.46 frames. ], batch size: 22, lr: 2.43e-03 +2024-08-06 22:18:57,320 INFO [trainer.py:765] (5/8) Epoch 35, batch 1700, train_loss[loss=2.766, NarTop10Accuracy=0.7706, over 6324.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.715, over 5897.99 frames. ], batch size: 13, lr: 2.42e-03 +2024-08-06 22:19:23,703 INFO [trainer.py:765] (5/8) Epoch 35, batch 1800, train_loss[loss=3.469, NarTop10Accuracy=0.6345, over 7254.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7167, over 5959.19 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 22:19:50,201 INFO [trainer.py:765] (5/8) Epoch 35, batch 1900, train_loss[loss=3.116, NarTop10Accuracy=0.6991, over 6417.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7152, over 6003.15 frames. ], batch size: 50, lr: 2.42e-03 +2024-08-06 22:20:15,762 INFO [trainer.py:765] (5/8) Epoch 35, batch 2000, train_loss[loss=3.003, NarTop10Accuracy=0.7237, over 6066.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7166, over 5989.55 frames. ], batch size: 53, lr: 2.42e-03 +2024-08-06 22:20:41,045 INFO [trainer.py:765] (5/8) Epoch 35, batch 2100, train_loss[loss=2.655, NarTop10Accuracy=0.791, over 4794.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7166, over 5975.15 frames. ], batch size: 5, lr: 2.42e-03 +2024-08-06 22:21:06,226 INFO [trainer.py:765] (5/8) Epoch 35, batch 2200, train_loss[loss=2.96, NarTop10Accuracy=0.7474, over 7317.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7151, over 6028.29 frames. ], batch size: 31, lr: 2.42e-03 +2024-08-06 22:21:31,286 INFO [trainer.py:765] (5/8) Epoch 35, batch 2300, train_loss[loss=2.909, NarTop10Accuracy=0.7493, over 5703.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7153, over 6030.92 frames. ], batch size: 9, lr: 2.42e-03 +2024-08-06 22:21:55,648 INFO [trainer.py:765] (5/8) Epoch 35, batch 2400, train_loss[loss=3.322, NarTop10Accuracy=0.6562, over 5124.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7152, over 5780.79 frames. ], batch size: 7, lr: 2.42e-03 +2024-08-06 22:21:59,682 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 22:22:07,656 INFO [trainer.py:811] (5/8) Epoch 35, validation: loss=2.905, NarTop10Accuracy=0.7437, over 1905321.00 frames. +2024-08-06 22:22:07,657 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 22:22:08,116 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.895e+02 2.316e+02 2.462e+02 2.653e+02 5.566e+02, threshold=4.923e+02, percent-clipped=0.1 +2024-08-06 22:22:27,128 INFO [trainer.py:765] (5/8) Epoch 35, batch 2500, train_loss[loss=3.052, NarTop10Accuracy=0.7205, over 5253.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7183, over 5472.39 frames. ], batch size: 7, lr: 2.41e-03 +2024-08-06 22:22:47,096 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 22:23:47,172 INFO [trainer.py:765] (5/8) Epoch 36, batch 100, train_loss[loss=3.259, NarTop10Accuracy=0.6764, over 7251.00 frames. ], tot_loss[loss=2.993, NarTop10Accuracy=0.7268, over 2384.29 frames. ], batch size: 31, lr: 2.38e-03 +2024-08-06 22:24:22,494 INFO [trainer.py:765] (5/8) Epoch 36, batch 200, train_loss[loss=2.813, NarTop10Accuracy=0.7531, over 6852.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7201, over 3868.04 frames. ], batch size: 17, lr: 2.38e-03 +2024-08-06 22:24:54,721 INFO [trainer.py:765] (5/8) Epoch 36, batch 300, train_loss[loss=3.307, NarTop10Accuracy=0.6625, over 6951.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7195, over 4672.30 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 22:25:29,276 INFO [trainer.py:765] (5/8) Epoch 36, batch 400, train_loss[loss=2.973, NarTop10Accuracy=0.7402, over 5328.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.723, over 5136.63 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 22:26:01,819 INFO [trainer.py:765] (5/8) Epoch 36, batch 500, train_loss[loss=3.353, NarTop10Accuracy=0.6486, over 6204.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7216, over 5415.20 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 22:26:35,026 INFO [trainer.py:765] (5/8) Epoch 36, batch 600, train_loss[loss=2.89, NarTop10Accuracy=0.7456, over 5685.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7219, over 5670.60 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 22:27:10,991 INFO [trainer.py:765] (5/8) Epoch 36, batch 700, train_loss[loss=3.16, NarTop10Accuracy=0.6805, over 5097.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7215, over 5726.96 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 22:27:44,915 INFO [trainer.py:765] (5/8) Epoch 36, batch 800, train_loss[loss=3.156, NarTop10Accuracy=0.7013, over 4362.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7193, over 5780.26 frames. ], batch size: 5, lr: 2.37e-03 +2024-08-06 22:28:17,813 INFO [trainer.py:765] (5/8) Epoch 36, batch 900, train_loss[loss=2.819, NarTop10Accuracy=0.7692, over 6648.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.722, over 5797.98 frames. ], batch size: 14, lr: 2.37e-03 +2024-08-06 22:28:56,984 INFO [trainer.py:765] (5/8) Epoch 36, batch 1000, train_loss[loss=3.36, NarTop10Accuracy=0.6406, over 6186.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7199, over 5899.45 frames. ], batch size: 13, lr: 2.37e-03 +2024-08-06 22:29:29,365 INFO [trainer.py:765] (5/8) Epoch 36, batch 1100, train_loss[loss=2.912, NarTop10Accuracy=0.7385, over 6759.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7188, over 5928.43 frames. ], batch size: 17, lr: 2.36e-03 +2024-08-06 22:30:05,681 INFO [trainer.py:765] (5/8) Epoch 36, batch 1200, train_loss[loss=3.119, NarTop10Accuracy=0.7002, over 7455.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7195, over 5933.99 frames. ], batch size: 31, lr: 2.36e-03 +2024-08-06 22:30:42,576 INFO [trainer.py:765] (5/8) Epoch 36, batch 1300, train_loss[loss=2.98, NarTop10Accuracy=0.7389, over 4281.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7188, over 5973.58 frames. ], batch size: 5, lr: 2.36e-03 +2024-08-06 22:31:15,938 INFO [trainer.py:765] (5/8) Epoch 36, batch 1400, train_loss[loss=3.048, NarTop10Accuracy=0.7205, over 6111.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7216, over 6004.06 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 22:31:43,748 INFO [trainer.py:765] (5/8) Epoch 36, batch 1500, train_loss[loss=3.395, NarTop10Accuracy=0.6558, over 5940.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7205, over 5920.95 frames. ], batch size: 51, lr: 2.36e-03 +2024-08-06 22:32:11,460 INFO [trainer.py:765] (5/8) Epoch 36, batch 1600, train_loss[loss=3.432, NarTop10Accuracy=0.6388, over 7065.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7192, over 5920.87 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 22:32:38,109 INFO [trainer.py:765] (5/8) Epoch 36, batch 1700, train_loss[loss=3.42, NarTop10Accuracy=0.651, over 6240.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7174, over 5907.92 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 22:33:04,555 INFO [trainer.py:765] (5/8) Epoch 36, batch 1800, train_loss[loss=3.182, NarTop10Accuracy=0.687, over 7092.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7188, over 5971.41 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 22:33:15,172 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 22:33:23,567 INFO [trainer.py:811] (5/8) Epoch 36, validation: loss=2.897, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 22:33:23,568 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 22:33:24,096 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.876e+02 2.309e+02 2.476e+02 2.664e+02 4.811e+02, threshold=4.951e+02, percent-clipped=0.0 +2024-08-06 22:33:39,456 INFO [trainer.py:765] (5/8) Epoch 36, batch 1900, train_loss[loss=2.899, NarTop10Accuracy=0.7518, over 6219.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7179, over 6008.07 frames. ], batch size: 50, lr: 2.35e-03 +2024-08-06 22:34:05,077 INFO [trainer.py:765] (5/8) Epoch 36, batch 2000, train_loss[loss=3.256, NarTop10Accuracy=0.6771, over 6156.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7175, over 5970.16 frames. ], batch size: 50, lr: 2.35e-03 +2024-08-06 22:34:30,514 INFO [trainer.py:765] (5/8) Epoch 36, batch 2100, train_loss[loss=2.616, NarTop10Accuracy=0.7966, over 3963.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7187, over 5950.66 frames. ], batch size: 4, lr: 2.35e-03 +2024-08-06 22:34:55,938 INFO [trainer.py:765] (5/8) Epoch 36, batch 2200, train_loss[loss=3.443, NarTop10Accuracy=0.6389, over 7164.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7154, over 5988.83 frames. ], batch size: 31, lr: 2.35e-03 +2024-08-06 22:35:21,145 INFO [trainer.py:765] (5/8) Epoch 36, batch 2300, train_loss[loss=3.429, NarTop10Accuracy=0.6446, over 5916.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7138, over 6001.60 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 22:35:45,601 INFO [trainer.py:765] (5/8) Epoch 36, batch 2400, train_loss[loss=3.201, NarTop10Accuracy=0.691, over 5274.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.718, over 5770.46 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:09,183 INFO [trainer.py:765] (5/8) Epoch 36, batch 2500, train_loss[loss=2.866, NarTop10Accuracy=0.7485, over 5058.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7224, over 5463.42 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:28,674 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 22:37:29,726 INFO [trainer.py:765] (5/8) Epoch 37, batch 100, train_loss[loss=2.842, NarTop10Accuracy=0.7605, over 7596.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.718, over 2367.76 frames. ], batch size: 32, lr: 2.31e-03 +2024-08-06 22:38:01,273 INFO [trainer.py:765] (5/8) Epoch 37, batch 200, train_loss[loss=2.763, NarTop10Accuracy=0.7682, over 6651.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7193, over 3868.21 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 22:38:35,956 INFO [trainer.py:765] (5/8) Epoch 37, batch 300, train_loss[loss=3.163, NarTop10Accuracy=0.6906, over 6939.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7211, over 4659.08 frames. ], batch size: 22, lr: 2.31e-03 +2024-08-06 22:39:09,307 INFO [trainer.py:765] (5/8) Epoch 37, batch 400, train_loss[loss=2.617, NarTop10Accuracy=0.7959, over 5049.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7237, over 5116.72 frames. ], batch size: 7, lr: 2.31e-03 +2024-08-06 22:39:43,862 INFO [trainer.py:765] (5/8) Epoch 37, batch 500, train_loss[loss=3.297, NarTop10Accuracy=0.6712, over 6132.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7236, over 5391.72 frames. ], batch size: 11, lr: 2.31e-03 +2024-08-06 22:40:17,334 INFO [trainer.py:765] (5/8) Epoch 37, batch 600, train_loss[loss=2.652, NarTop10Accuracy=0.7971, over 5568.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7209, over 5646.85 frames. ], batch size: 9, lr: 2.31e-03 +2024-08-06 22:40:51,616 INFO [trainer.py:765] (5/8) Epoch 37, batch 700, train_loss[loss=3.015, NarTop10Accuracy=0.7238, over 4989.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.716, over 5715.28 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:41:30,566 INFO [trainer.py:765] (5/8) Epoch 37, batch 800, train_loss[loss=2.846, NarTop10Accuracy=0.7571, over 5034.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7162, over 5781.39 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:41:59,084 INFO [trainer.py:765] (5/8) Epoch 37, batch 900, train_loss[loss=2.776, NarTop10Accuracy=0.773, over 6579.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.719, over 5789.82 frames. ], batch size: 14, lr: 2.30e-03 +2024-08-06 22:42:38,268 INFO [trainer.py:765] (5/8) Epoch 37, batch 1000, train_loss[loss=3.16, NarTop10Accuracy=0.6833, over 6678.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7165, over 5907.23 frames. ], batch size: 14, lr: 2.30e-03 +2024-08-06 22:43:15,907 INFO [trainer.py:765] (5/8) Epoch 37, batch 1100, train_loss[loss=2.926, NarTop10Accuracy=0.739, over 6903.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7161, over 5926.45 frames. ], batch size: 17, lr: 2.30e-03 +2024-08-06 22:43:47,740 INFO [trainer.py:765] (5/8) Epoch 37, batch 1200, train_loss[loss=2.835, NarTop10Accuracy=0.7626, over 7290.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.716, over 5928.44 frames. ], batch size: 31, lr: 2.30e-03 +2024-08-06 22:44:11,754 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 22:44:20,075 INFO [trainer.py:811] (5/8) Epoch 37, validation: loss=2.92, NarTop10Accuracy=0.7407, over 1905321.00 frames. +2024-08-06 22:44:20,076 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 22:44:20,606 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.887e+02 2.309e+02 2.481e+02 2.647e+02 8.766e+02, threshold=4.961e+02, percent-clipped=0.1 +2024-08-06 22:44:32,784 INFO [trainer.py:765] (5/8) Epoch 37, batch 1300, train_loss[loss=2.63, NarTop10Accuracy=0.7907, over 5058.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7189, over 6006.66 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:45:10,388 INFO [trainer.py:765] (5/8) Epoch 37, batch 1400, train_loss[loss=2.892, NarTop10Accuracy=0.7504, over 6033.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7184, over 6020.93 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 22:45:40,512 INFO [trainer.py:765] (5/8) Epoch 37, batch 1500, train_loss[loss=3.031, NarTop10Accuracy=0.7204, over 6186.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7172, over 5964.17 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:46:08,437 INFO [trainer.py:765] (5/8) Epoch 37, batch 1600, train_loss[loss=3.339, NarTop10Accuracy=0.6502, over 7059.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7157, over 5922.56 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 22:46:35,186 INFO [trainer.py:765] (5/8) Epoch 37, batch 1700, train_loss[loss=3.379, NarTop10Accuracy=0.6446, over 6231.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7171, over 5893.41 frames. ], batch size: 13, lr: 2.29e-03 +2024-08-06 22:47:01,792 INFO [trainer.py:765] (5/8) Epoch 37, batch 1800, train_loss[loss=2.748, NarTop10Accuracy=0.777, over 6930.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7182, over 5976.43 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 22:47:28,312 INFO [trainer.py:765] (5/8) Epoch 37, batch 1900, train_loss[loss=3.013, NarTop10Accuracy=0.7246, over 6462.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7184, over 6019.35 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:47:53,925 INFO [trainer.py:765] (5/8) Epoch 37, batch 2000, train_loss[loss=3.256, NarTop10Accuracy=0.6718, over 5952.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7205, over 5980.49 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:48:19,325 INFO [trainer.py:765] (5/8) Epoch 37, batch 2100, train_loss[loss=2.729, NarTop10Accuracy=0.7721, over 4059.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7179, over 5953.95 frames. ], batch size: 4, lr: 2.29e-03 +2024-08-06 22:48:44,707 INFO [trainer.py:765] (5/8) Epoch 37, batch 2200, train_loss[loss=2.89, NarTop10Accuracy=0.7522, over 7170.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7166, over 5969.42 frames. ], batch size: 31, lr: 2.29e-03 +2024-08-06 22:49:09,912 INFO [trainer.py:765] (5/8) Epoch 37, batch 2300, train_loss[loss=2.647, NarTop10Accuracy=0.802, over 5757.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7165, over 5998.41 frames. ], batch size: 9, lr: 2.29e-03 +2024-08-06 22:49:34,318 INFO [trainer.py:765] (5/8) Epoch 37, batch 2400, train_loss[loss=3.268, NarTop10Accuracy=0.6685, over 5265.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7191, over 5764.36 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:49:57,860 INFO [trainer.py:765] (5/8) Epoch 37, batch 2500, train_loss[loss=3.226, NarTop10Accuracy=0.6784, over 5157.00 frames. ], tot_loss[loss=2.998, NarTop10Accuracy=0.7257, over 5476.92 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:50:17,675 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 22:51:16,152 INFO [trainer.py:765] (5/8) Epoch 38, batch 100, train_loss[loss=3.12, NarTop10Accuracy=0.7016, over 7275.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7233, over 2376.11 frames. ], batch size: 31, lr: 2.25e-03 +2024-08-06 22:51:53,014 INFO [trainer.py:765] (5/8) Epoch 38, batch 200, train_loss[loss=3.216, NarTop10Accuracy=0.6882, over 6663.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7222, over 3852.97 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 22:52:25,202 INFO [trainer.py:765] (5/8) Epoch 38, batch 300, train_loss[loss=2.94, NarTop10Accuracy=0.7402, over 6924.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7181, over 4655.75 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 22:52:55,627 INFO [trainer.py:765] (5/8) Epoch 38, batch 400, train_loss[loss=3.191, NarTop10Accuracy=0.695, over 5208.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7216, over 5104.10 frames. ], batch size: 7, lr: 2.25e-03 +2024-08-06 22:53:32,228 INFO [trainer.py:765] (5/8) Epoch 38, batch 500, train_loss[loss=2.662, NarTop10Accuracy=0.7929, over 6150.00 frames. ], tot_loss[loss=2.988, NarTop10Accuracy=0.7277, over 5381.94 frames. ], batch size: 11, lr: 2.25e-03 +2024-08-06 22:54:05,498 INFO [trainer.py:765] (5/8) Epoch 38, batch 600, train_loss[loss=3.228, NarTop10Accuracy=0.6848, over 5778.00 frames. ], tot_loss[loss=3.007, NarTop10Accuracy=0.7241, over 5654.30 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 22:54:36,003 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 22:54:43,918 INFO [trainer.py:811] (5/8) Epoch 38, validation: loss=2.939, NarTop10Accuracy=0.7369, over 1905321.00 frames. +2024-08-06 22:54:43,919 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 22:54:44,427 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.313e+02 2.478e+02 2.663e+02 7.254e+02, threshold=4.957e+02, percent-clipped=0.3 +2024-08-06 22:54:46,658 INFO [trainer.py:765] (5/8) Epoch 38, batch 700, train_loss[loss=2.699, NarTop10Accuracy=0.778, over 5049.00 frames. ], tot_loss[loss=3.003, NarTop10Accuracy=0.7245, over 5725.38 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:24,937 INFO [trainer.py:765] (5/8) Epoch 38, batch 800, train_loss[loss=2.925, NarTop10Accuracy=0.7334, over 5172.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7218, over 5793.68 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:59,703 INFO [trainer.py:765] (5/8) Epoch 38, batch 900, train_loss[loss=2.831, NarTop10Accuracy=0.7541, over 6624.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7229, over 5808.01 frames. ], batch size: 14, lr: 2.24e-03 +2024-08-06 22:56:32,090 INFO [trainer.py:765] (5/8) Epoch 38, batch 1000, train_loss[loss=3.272, NarTop10Accuracy=0.6668, over 6699.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7225, over 5919.52 frames. ], batch size: 14, lr: 2.24e-03 +2024-08-06 22:57:08,991 INFO [trainer.py:765] (5/8) Epoch 38, batch 1100, train_loss[loss=3.153, NarTop10Accuracy=0.6945, over 6783.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7182, over 5939.12 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 22:57:42,661 INFO [trainer.py:765] (5/8) Epoch 38, batch 1200, train_loss[loss=2.785, NarTop10Accuracy=0.7765, over 7332.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7184, over 5920.05 frames. ], batch size: 31, lr: 2.24e-03 +2024-08-06 22:58:16,545 INFO [trainer.py:765] (5/8) Epoch 38, batch 1300, train_loss[loss=3.146, NarTop10Accuracy=0.6953, over 4977.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7193, over 5985.32 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:58:49,810 INFO [trainer.py:765] (5/8) Epoch 38, batch 1400, train_loss[loss=2.857, NarTop10Accuracy=0.7534, over 6105.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7141, over 6004.53 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 22:59:22,853 INFO [trainer.py:765] (5/8) Epoch 38, batch 1500, train_loss[loss=3.608, NarTop10Accuracy=0.6054, over 6537.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7184, over 5953.40 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 22:59:50,643 INFO [trainer.py:765] (5/8) Epoch 38, batch 1600, train_loss[loss=3.386, NarTop10Accuracy=0.6462, over 6891.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.718, over 5928.73 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 23:00:17,315 INFO [trainer.py:765] (5/8) Epoch 38, batch 1700, train_loss[loss=3.143, NarTop10Accuracy=0.698, over 6495.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.714, over 5927.85 frames. ], batch size: 14, lr: 2.23e-03 +2024-08-06 23:00:43,764 INFO [trainer.py:765] (5/8) Epoch 38, batch 1800, train_loss[loss=3.364, NarTop10Accuracy=0.6532, over 7080.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7156, over 5992.02 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 23:01:10,191 INFO [trainer.py:765] (5/8) Epoch 38, batch 1900, train_loss[loss=3.491, NarTop10Accuracy=0.6333, over 6141.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7145, over 6029.18 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 23:01:35,681 INFO [trainer.py:765] (5/8) Epoch 38, batch 2000, train_loss[loss=3.362, NarTop10Accuracy=0.6538, over 5820.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7147, over 6000.31 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 23:02:01,050 INFO [trainer.py:765] (5/8) Epoch 38, batch 2100, train_loss[loss=2.931, NarTop10Accuracy=0.7454, over 4011.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7172, over 5968.18 frames. ], batch size: 4, lr: 2.23e-03 +2024-08-06 23:02:26,313 INFO [trainer.py:765] (5/8) Epoch 38, batch 2200, train_loss[loss=2.87, NarTop10Accuracy=0.7575, over 7176.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7176, over 6003.32 frames. ], batch size: 31, lr: 2.23e-03 +2024-08-06 23:02:51,419 INFO [trainer.py:765] (5/8) Epoch 38, batch 2300, train_loss[loss=2.811, NarTop10Accuracy=0.764, over 5787.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7173, over 6009.49 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 23:03:16,348 INFO [trainer.py:765] (5/8) Epoch 38, batch 2400, train_loss[loss=2.691, NarTop10Accuracy=0.7806, over 5067.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7196, over 5757.84 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:39,823 INFO [trainer.py:765] (5/8) Epoch 38, batch 2500, train_loss[loss=3.162, NarTop10Accuracy=0.6848, over 5082.00 frames. ], tot_loss[loss=3.005, NarTop10Accuracy=0.7238, over 5475.29 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:59,600 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 23:04:58,940 INFO [trainer.py:765] (5/8) Epoch 39, batch 100, train_loss[loss=3.294, NarTop10Accuracy=0.6674, over 7236.00 frames. ], tot_loss[loss=2.971, NarTop10Accuracy=0.7313, over 2381.89 frames. ], batch size: 31, lr: 2.19e-03 +2024-08-06 23:05:03,468 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 23:05:11,563 INFO [trainer.py:811] (5/8) Epoch 39, validation: loss=2.9, NarTop10Accuracy=0.7445, over 1905321.00 frames. +2024-08-06 23:05:11,564 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 23:05:12,137 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 2.316e+02 2.500e+02 2.688e+02 4.683e+02, threshold=5.001e+02, percent-clipped=0.0 +2024-08-06 23:05:40,163 INFO [trainer.py:765] (5/8) Epoch 39, batch 200, train_loss[loss=2.714, NarTop10Accuracy=0.7825, over 6774.00 frames. ], tot_loss[loss=2.991, NarTop10Accuracy=0.7271, over 3863.87 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 23:06:17,293 INFO [trainer.py:765] (5/8) Epoch 39, batch 300, train_loss[loss=3.021, NarTop10Accuracy=0.7274, over 6918.00 frames. ], tot_loss[loss=2.99, NarTop10Accuracy=0.7278, over 4655.13 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 23:06:48,275 INFO [trainer.py:765] (5/8) Epoch 39, batch 400, train_loss[loss=3.004, NarTop10Accuracy=0.7275, over 5196.00 frames. ], tot_loss[loss=2.994, NarTop10Accuracy=0.7268, over 5117.69 frames. ], batch size: 7, lr: 2.19e-03 +2024-08-06 23:07:19,175 INFO [trainer.py:765] (5/8) Epoch 39, batch 500, train_loss[loss=3.337, NarTop10Accuracy=0.6653, over 6012.00 frames. ], tot_loss[loss=3.003, NarTop10Accuracy=0.7242, over 5376.17 frames. ], batch size: 11, lr: 2.19e-03 +2024-08-06 23:07:52,563 INFO [trainer.py:765] (5/8) Epoch 39, batch 600, train_loss[loss=2.712, NarTop10Accuracy=0.7886, over 5739.00 frames. ], tot_loss[loss=3.011, NarTop10Accuracy=0.7226, over 5654.81 frames. ], batch size: 9, lr: 2.19e-03 +2024-08-06 23:08:33,695 INFO [trainer.py:765] (5/8) Epoch 39, batch 700, train_loss[loss=3.1, NarTop10Accuracy=0.7077, over 5007.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7196, over 5719.71 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:09:05,861 INFO [trainer.py:765] (5/8) Epoch 39, batch 800, train_loss[loss=2.485, NarTop10Accuracy=0.8226, over 5127.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7196, over 5775.30 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:09:38,865 INFO [trainer.py:765] (5/8) Epoch 39, batch 900, train_loss[loss=3.502, NarTop10Accuracy=0.621, over 6669.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7197, over 5789.62 frames. ], batch size: 14, lr: 2.18e-03 +2024-08-06 23:10:18,460 INFO [trainer.py:765] (5/8) Epoch 39, batch 1000, train_loss[loss=2.827, NarTop10Accuracy=0.7618, over 6231.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7219, over 5891.58 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 23:10:53,934 INFO [trainer.py:765] (5/8) Epoch 39, batch 1100, train_loss[loss=2.665, NarTop10Accuracy=0.7896, over 6789.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7185, over 5934.54 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 23:11:27,822 INFO [trainer.py:765] (5/8) Epoch 39, batch 1200, train_loss[loss=2.962, NarTop10Accuracy=0.7363, over 7230.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7211, over 5928.27 frames. ], batch size: 31, lr: 2.18e-03 +2024-08-06 23:12:07,253 INFO [trainer.py:765] (5/8) Epoch 39, batch 1300, train_loss[loss=2.749, NarTop10Accuracy=0.7752, over 5157.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7226, over 5996.87 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:12:39,302 INFO [trainer.py:765] (5/8) Epoch 39, batch 1400, train_loss[loss=2.921, NarTop10Accuracy=0.7366, over 6102.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7223, over 6022.67 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 23:13:09,756 INFO [trainer.py:765] (5/8) Epoch 39, batch 1500, train_loss[loss=3.615, NarTop10Accuracy=0.6041, over 6087.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7224, over 5944.15 frames. ], batch size: 51, lr: 2.18e-03 +2024-08-06 23:13:37,587 INFO [trainer.py:765] (5/8) Epoch 39, batch 1600, train_loss[loss=2.773, NarTop10Accuracy=0.7776, over 7161.00 frames. ], tot_loss[loss=3.004, NarTop10Accuracy=0.7242, over 5937.06 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:04,220 INFO [trainer.py:765] (5/8) Epoch 39, batch 1700, train_loss[loss=3.275, NarTop10Accuracy=0.6658, over 6675.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7174, over 5905.63 frames. ], batch size: 14, lr: 2.17e-03 +2024-08-06 23:14:30,767 INFO [trainer.py:765] (5/8) Epoch 39, batch 1800, train_loss[loss=2.833, NarTop10Accuracy=0.7636, over 7089.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.716, over 5950.59 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:57,180 INFO [trainer.py:765] (5/8) Epoch 39, batch 1900, train_loss[loss=3.022, NarTop10Accuracy=0.7228, over 5736.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7146, over 6002.82 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 23:15:22,751 INFO [trainer.py:765] (5/8) Epoch 39, batch 2000, train_loss[loss=3.352, NarTop10Accuracy=0.6582, over 6306.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7185, over 5987.85 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 23:15:48,060 INFO [trainer.py:765] (5/8) Epoch 39, batch 2100, train_loss[loss=3.223, NarTop10Accuracy=0.6883, over 4818.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7197, over 5961.94 frames. ], batch size: 5, lr: 2.17e-03 +2024-08-06 23:15:51,871 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 23:16:02,156 INFO [trainer.py:811] (5/8) Epoch 39, validation: loss=2.85, NarTop10Accuracy=0.7552, over 1905321.00 frames. +2024-08-06 23:16:02,156 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 23:16:02,645 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.369e+02 2.530e+02 2.720e+02 6.127e+02, threshold=5.059e+02, percent-clipped=0.2 +2024-08-06 23:16:23,652 INFO [trainer.py:765] (5/8) Epoch 39, batch 2200, train_loss[loss=3.178, NarTop10Accuracy=0.6951, over 7359.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7195, over 6014.50 frames. ], batch size: 31, lr: 2.17e-03 +2024-08-06 23:16:48,846 INFO [trainer.py:765] (5/8) Epoch 39, batch 2300, train_loss[loss=2.743, NarTop10Accuracy=0.7795, over 5697.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7164, over 6012.09 frames. ], batch size: 9, lr: 2.17e-03 +2024-08-06 23:17:13,135 INFO [trainer.py:765] (5/8) Epoch 39, batch 2400, train_loss[loss=2.618, NarTop10Accuracy=0.8003, over 5055.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7221, over 5769.05 frames. ], batch size: 7, lr: 2.17e-03 +2024-08-06 23:17:36,711 INFO [trainer.py:765] (5/8) Epoch 39, batch 2500, train_loss[loss=2.947, NarTop10Accuracy=0.7431, over 5214.00 frames. ], tot_loss[loss=2.988, NarTop10Accuracy=0.7269, over 5470.75 frames. ], batch size: 7, lr: 2.16e-03 +2024-08-06 23:17:56,587 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 23:18:48,946 INFO [trainer.py:765] (5/8) Epoch 40, batch 100, train_loss[loss=3.025, NarTop10Accuracy=0.7267, over 7053.00 frames. ], tot_loss[loss=2.993, NarTop10Accuracy=0.7271, over 2377.34 frames. ], batch size: 31, lr: 2.14e-03 +2024-08-06 23:19:23,035 INFO [trainer.py:765] (5/8) Epoch 40, batch 200, train_loss[loss=2.703, NarTop10Accuracy=0.784, over 6792.00 frames. ], tot_loss[loss=2.983, NarTop10Accuracy=0.7286, over 3860.38 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 23:19:57,187 INFO [trainer.py:765] (5/8) Epoch 40, batch 300, train_loss[loss=2.82, NarTop10Accuracy=0.7658, over 7020.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.723, over 4658.32 frames. ], batch size: 22, lr: 2.13e-03 +2024-08-06 23:20:30,182 INFO [trainer.py:765] (5/8) Epoch 40, batch 400, train_loss[loss=2.909, NarTop10Accuracy=0.7409, over 5172.00 frames. ], tot_loss[loss=3.011, NarTop10Accuracy=0.723, over 5121.62 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 23:21:00,250 INFO [trainer.py:765] (5/8) Epoch 40, batch 500, train_loss[loss=2.776, NarTop10Accuracy=0.7652, over 6078.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7233, over 5382.90 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 23:21:34,881 INFO [trainer.py:765] (5/8) Epoch 40, batch 600, train_loss[loss=2.923, NarTop10Accuracy=0.7367, over 5703.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7237, over 5648.20 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 23:22:11,097 INFO [trainer.py:765] (5/8) Epoch 40, batch 700, train_loss[loss=2.975, NarTop10Accuracy=0.7268, over 5106.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7235, over 5717.46 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:22:44,753 INFO [trainer.py:765] (5/8) Epoch 40, batch 800, train_loss[loss=2.821, NarTop10Accuracy=0.7698, over 5016.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7208, over 5766.10 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:23:16,635 INFO [trainer.py:765] (5/8) Epoch 40, batch 900, train_loss[loss=3.437, NarTop10Accuracy=0.6342, over 6234.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7227, over 5787.24 frames. ], batch size: 13, lr: 2.13e-03 +2024-08-06 23:23:55,592 INFO [trainer.py:765] (5/8) Epoch 40, batch 1000, train_loss[loss=3.412, NarTop10Accuracy=0.6443, over 6624.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7202, over 5881.95 frames. ], batch size: 14, lr: 2.13e-03 +2024-08-06 23:24:30,208 INFO [trainer.py:765] (5/8) Epoch 40, batch 1100, train_loss[loss=2.809, NarTop10Accuracy=0.7731, over 6894.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7201, over 5932.33 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 23:25:03,090 INFO [trainer.py:765] (5/8) Epoch 40, batch 1200, train_loss[loss=3.004, NarTop10Accuracy=0.7277, over 6993.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7213, over 5928.78 frames. ], batch size: 31, lr: 2.12e-03 +2024-08-06 23:25:41,842 INFO [trainer.py:765] (5/8) Epoch 40, batch 1300, train_loss[loss=2.76, NarTop10Accuracy=0.7785, over 4926.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7229, over 6004.24 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 23:26:13,384 INFO [trainer.py:765] (5/8) Epoch 40, batch 1400, train_loss[loss=2.801, NarTop10Accuracy=0.767, over 6084.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7208, over 6035.36 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 23:26:43,377 INFO [trainer.py:765] (5/8) Epoch 40, batch 1500, train_loss[loss=3.319, NarTop10Accuracy=0.6654, over 6471.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7224, over 5953.37 frames. ], batch size: 52, lr: 2.12e-03 +2024-08-06 23:26:54,419 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 23:27:02,676 INFO [trainer.py:811] (5/8) Epoch 40, validation: loss=2.86, NarTop10Accuracy=0.7522, over 1905321.00 frames. +2024-08-06 23:27:02,677 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30143MB +2024-08-06 23:27:03,156 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.329e+02 2.511e+02 2.723e+02 1.241e+03, threshold=5.022e+02, percent-clipped=0.2 +2024-08-06 23:27:19,382 INFO [trainer.py:765] (5/8) Epoch 40, batch 1600, train_loss[loss=2.799, NarTop10Accuracy=0.7604, over 7272.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7208, over 5933.07 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:27:46,056 INFO [trainer.py:765] (5/8) Epoch 40, batch 1700, train_loss[loss=3.402, NarTop10Accuracy=0.6425, over 6246.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7206, over 5913.18 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 23:28:12,579 INFO [trainer.py:765] (5/8) Epoch 40, batch 1800, train_loss[loss=3.092, NarTop10Accuracy=0.7142, over 7143.00 frames. ], tot_loss[loss=3, NarTop10Accuracy=0.7252, over 5981.03 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:28:38,909 INFO [trainer.py:765] (5/8) Epoch 40, batch 1900, train_loss[loss=3.143, NarTop10Accuracy=0.702, over 6006.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7238, over 6010.88 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:04,444 INFO [trainer.py:765] (5/8) Epoch 40, batch 2000, train_loss[loss=3.534, NarTop10Accuracy=0.6224, over 6189.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7231, over 5998.89 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:29,750 INFO [trainer.py:765] (5/8) Epoch 40, batch 2100, train_loss[loss=2.79, NarTop10Accuracy=0.7648, over 4875.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7223, over 5971.34 frames. ], batch size: 5, lr: 2.11e-03 +2024-08-06 23:29:54,939 INFO [trainer.py:765] (5/8) Epoch 40, batch 2200, train_loss[loss=3.171, NarTop10Accuracy=0.6996, over 7191.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.72, over 6017.26 frames. ], batch size: 31, lr: 2.11e-03 +2024-08-06 23:30:20,013 INFO [trainer.py:765] (5/8) Epoch 40, batch 2300, train_loss[loss=2.961, NarTop10Accuracy=0.7315, over 5712.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.719, over 6032.56 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 23:30:44,296 INFO [trainer.py:765] (5/8) Epoch 40, batch 2400, train_loss[loss=2.799, NarTop10Accuracy=0.7745, over 5202.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7206, over 5780.97 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:07,738 INFO [trainer.py:765] (5/8) Epoch 40, batch 2500, train_loss[loss=3.181, NarTop10Accuracy=0.6891, over 5061.00 frames. ], tot_loss[loss=2.986, NarTop10Accuracy=0.7279, over 5494.44 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:27,895 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 23:31:27,898 INFO [trainer.py:1069] (5/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-14-23-41-6 b/libritts-r/log/log-train-2024-08-06-14-23-41-6 new file mode 100644 index 0000000000000000000000000000000000000000..68c940237aca72b79efde6a8ebe644e5e557d400 --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-14-23-41-6 @@ -0,0 +1,1260 @@ +2024-08-06 14:23:41,730 INFO [trainer.py:870] (6/8) Training started +2024-08-06 14:23:41,731 INFO [trainer.py:889] (6/8) Device: cuda:6 +2024-08-06 14:23:41,731 INFO [trainer.py:890] (6/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 100000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 14:23:41,731 INFO [trainer.py:892] (6/8) About to create model +2024-08-06 14:23:42,497 INFO [trainer.py:899] (6/8) Number of model parameters: 367386628 +2024-08-06 14:23:42,497 INFO [checkpoint.py:112] (6/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 14:23:47,480 INFO [trainer.py:914] (6/8) Using DDP +2024-08-06 14:23:49,642 INFO [datamodule.py:427] (6/8) About to get train cuts +2024-08-06 14:23:49,644 INFO [datamodule.py:434] (6/8) About to get dev cuts +2024-08-06 14:23:49,646 INFO [datamodule.py:292] (6/8) Disable SpecAugment +2024-08-06 14:23:49,646 INFO [datamodule.py:294] (6/8) About to create train dataset +2024-08-06 14:23:49,646 INFO [datamodule.py:323] (6/8) Using DynamicBucketingSampler +2024-08-06 14:23:50,252 INFO [datamodule.py:344] (6/8) About to create train dataloader +2024-08-06 14:23:50,252 INFO [datamodule.py:367] (6/8) About to create dev dataset +2024-08-06 14:23:50,581 INFO [datamodule.py:388] (6/8) About to create dev dataloader +2024-08-06 14:24:38,248 INFO [trainer.py:765] (6/8) Epoch 1, batch 100, train_loss[loss=106.7, NarTop10Accuracy=0.02046, over 7203.00 frames. ], tot_loss[loss=73.99, NarTop10Accuracy=0.0468, over 2357.27 frames. ], batch size: 31, lr: 2.25e-02 +2024-08-06 14:25:07,518 INFO [trainer.py:765] (6/8) Epoch 1, batch 200, train_loss[loss=135.7, NarTop10Accuracy=0.01382, over 6819.00 frames. ], tot_loss[loss=97.65, NarTop10Accuracy=0.04253, over 3832.72 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 14:25:37,111 INFO [trainer.py:765] (6/8) Epoch 1, batch 300, train_loss[loss=108.2, NarTop10Accuracy=0.02574, over 7242.00 frames. ], tot_loss[loss=85.23, NarTop10Accuracy=0.04302, over 4651.54 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 14:26:07,482 INFO [trainer.py:765] (6/8) Epoch 1, batch 400, train_loss[loss=52.2, NarTop10Accuracy=0.02431, over 5031.00 frames. ], tot_loss[loss=67.99, NarTop10Accuracy=0.04725, over 5097.29 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 14:26:35,357 INFO [trainer.py:765] (6/8) Epoch 1, batch 500, train_loss[loss=14.76, NarTop10Accuracy=0.02818, over 6021.00 frames. ], tot_loss[loss=48.94, NarTop10Accuracy=0.04963, over 5396.75 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 14:27:04,000 INFO [trainer.py:765] (6/8) Epoch 1, batch 600, train_loss[loss=6.205, NarTop10Accuracy=0.1748, over 5619.00 frames. ], tot_loss[loss=33.4, NarTop10Accuracy=0.05472, over 5652.45 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 14:27:39,490 INFO [trainer.py:765] (6/8) Epoch 1, batch 700, train_loss[loss=6.663, NarTop10Accuracy=0.1233, over 4368.00 frames. ], tot_loss[loss=23.42, NarTop10Accuracy=0.06343, over 5712.64 frames. ], batch size: 5, lr: 2.99e-02 +2024-08-06 14:28:08,832 INFO [trainer.py:765] (6/8) Epoch 1, batch 800, train_loss[loss=6.515, NarTop10Accuracy=0.12, over 4983.00 frames. ], tot_loss[loss=17.18, NarTop10Accuracy=0.08516, over 5756.53 frames. ], batch size: 6, lr: 2.98e-02 +2024-08-06 14:28:36,758 INFO [trainer.py:765] (6/8) Epoch 1, batch 900, train_loss[loss=5.893, NarTop10Accuracy=0.1519, over 6276.00 frames. ], tot_loss[loss=12.8, NarTop10Accuracy=0.1139, over 5775.56 frames. ], batch size: 13, lr: 2.98e-02 +2024-08-06 14:29:12,586 INFO [trainer.py:765] (6/8) Epoch 1, batch 1000, train_loss[loss=5.766, NarTop10Accuracy=0.1795, over 6636.00 frames. ], tot_loss[loss=10.11, NarTop10Accuracy=0.1336, over 5884.14 frames. ], batch size: 14, lr: 2.97e-02 +2024-08-06 14:29:42,825 INFO [trainer.py:765] (6/8) Epoch 1, batch 1100, train_loss[loss=5.579, NarTop10Accuracy=0.2151, over 6834.00 frames. ], tot_loss[loss=8.421, NarTop10Accuracy=0.1513, over 5930.64 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 14:30:11,468 INFO [trainer.py:765] (6/8) Epoch 1, batch 1200, train_loss[loss=5.847, NarTop10Accuracy=0.1839, over 7206.00 frames. ], tot_loss[loss=7.348, NarTop10Accuracy=0.1705, over 5933.82 frames. ], batch size: 31, lr: 2.96e-02 +2024-08-06 14:30:48,747 INFO [trainer.py:765] (6/8) Epoch 1, batch 1300, train_loss[loss=5.506, NarTop10Accuracy=0.232, over 4401.00 frames. ], tot_loss[loss=6.688, NarTop10Accuracy=0.185, over 5987.91 frames. ], batch size: 5, lr: 2.95e-02 +2024-08-06 14:31:18,144 INFO [trainer.py:765] (6/8) Epoch 1, batch 1400, train_loss[loss=5.61, NarTop10Accuracy=0.204, over 6015.00 frames. ], tot_loss[loss=6.252, NarTop10Accuracy=0.1967, over 6024.49 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 14:31:46,026 INFO [trainer.py:765] (6/8) Epoch 1, batch 1500, train_loss[loss=5.766, NarTop10Accuracy=0.1813, over 6273.00 frames. ], tot_loss[loss=5.973, NarTop10Accuracy=0.2082, over 5956.27 frames. ], batch size: 50, lr: 2.94e-02 +2024-08-06 14:32:13,691 INFO [trainer.py:765] (6/8) Epoch 1, batch 1600, train_loss[loss=5.612, NarTop10Accuracy=0.2033, over 7098.00 frames. ], tot_loss[loss=5.788, NarTop10Accuracy=0.2176, over 5935.23 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 14:32:40,198 INFO [trainer.py:765] (6/8) Epoch 1, batch 1700, train_loss[loss=5.369, NarTop10Accuracy=0.2582, over 6783.00 frames. ], tot_loss[loss=5.661, NarTop10Accuracy=0.226, over 5908.48 frames. ], batch size: 14, lr: 2.92e-02 +2024-08-06 14:33:06,499 INFO [trainer.py:765] (6/8) Epoch 1, batch 1800, train_loss[loss=5.577, NarTop10Accuracy=0.2129, over 7113.00 frames. ], tot_loss[loss=5.573, NarTop10Accuracy=0.2328, over 5966.86 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 14:33:32,625 INFO [trainer.py:765] (6/8) Epoch 1, batch 1900, train_loss[loss=5.788, NarTop10Accuracy=0.1783, over 5982.00 frames. ], tot_loss[loss=5.505, NarTop10Accuracy=0.2412, over 6012.02 frames. ], batch size: 50, lr: 2.90e-02 +2024-08-06 14:33:58,014 INFO [trainer.py:765] (6/8) Epoch 1, batch 2000, train_loss[loss=5.543, NarTop10Accuracy=0.2328, over 6072.00 frames. ], tot_loss[loss=5.45, NarTop10Accuracy=0.2488, over 6000.03 frames. ], batch size: 50, lr: 2.89e-02 +2024-08-06 14:33:58,015 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 14:34:06,103 INFO [trainer.py:811] (6/8) Epoch 1, validation: loss=5.397, NarTop10Accuracy=0.2581, over 1905321.00 frames. +2024-08-06 14:34:06,104 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 26623MB +2024-08-06 14:34:06,612 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 4.749e+01 2.278e+02 7.300e+02 1.664e+04 7.177e+05, threshold=1.460e+03, percent-clipped=0.0 +2024-08-06 14:34:32,061 INFO [trainer.py:765] (6/8) Epoch 1, batch 2100, train_loss[loss=5.236, NarTop10Accuracy=0.2844, over 4905.00 frames. ], tot_loss[loss=5.385, NarTop10Accuracy=0.2595, over 5990.33 frames. ], batch size: 5, lr: 2.88e-02 +2024-08-06 14:34:57,303 INFO [trainer.py:765] (6/8) Epoch 1, batch 2200, train_loss[loss=5.41, NarTop10Accuracy=0.2464, over 7218.00 frames. ], tot_loss[loss=5.353, NarTop10Accuracy=0.2641, over 6016.43 frames. ], batch size: 31, lr: 2.87e-02 +2024-08-06 14:35:22,456 INFO [trainer.py:765] (6/8) Epoch 1, batch 2300, train_loss[loss=5.337, NarTop10Accuracy=0.2634, over 5724.00 frames. ], tot_loss[loss=5.337, NarTop10Accuracy=0.2669, over 6016.53 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 14:35:46,815 INFO [trainer.py:765] (6/8) Epoch 1, batch 2400, train_loss[loss=5.425, NarTop10Accuracy=0.2483, over 5055.00 frames. ], tot_loss[loss=5.283, NarTop10Accuracy=0.277, over 5771.75 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 14:36:10,408 INFO [trainer.py:765] (6/8) Epoch 1, batch 2500, train_loss[loss=5.062, NarTop10Accuracy=0.3107, over 5115.00 frames. ], tot_loss[loss=5.219, NarTop10Accuracy=0.2883, over 5499.02 frames. ], batch size: 7, lr: 2.84e-02 +2024-08-06 14:36:31,071 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 14:37:29,669 INFO [trainer.py:765] (6/8) Epoch 2, batch 100, train_loss[loss=4.977, NarTop10Accuracy=0.3376, over 7383.00 frames. ], tot_loss[loss=5.197, NarTop10Accuracy=0.2928, over 2375.73 frames. ], batch size: 31, lr: 2.77e-02 +2024-08-06 14:38:10,014 INFO [trainer.py:765] (6/8) Epoch 2, batch 200, train_loss[loss=5.146, NarTop10Accuracy=0.3068, over 6804.00 frames. ], tot_loss[loss=5.166, NarTop10Accuracy=0.2986, over 3870.73 frames. ], batch size: 17, lr: 2.76e-02 +2024-08-06 14:38:38,297 INFO [trainer.py:765] (6/8) Epoch 2, batch 300, train_loss[loss=5.17, NarTop10Accuracy=0.2958, over 7065.00 frames. ], tot_loss[loss=5.139, NarTop10Accuracy=0.3033, over 4651.71 frames. ], batch size: 22, lr: 2.75e-02 +2024-08-06 14:39:06,998 INFO [trainer.py:765] (6/8) Epoch 2, batch 400, train_loss[loss=4.9, NarTop10Accuracy=0.3448, over 4986.00 frames. ], tot_loss[loss=5.113, NarTop10Accuracy=0.3078, over 5110.71 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 14:39:46,119 INFO [trainer.py:765] (6/8) Epoch 2, batch 500, train_loss[loss=4.888, NarTop10Accuracy=0.3477, over 6027.00 frames. ], tot_loss[loss=5.07, NarTop10Accuracy=0.3159, over 5368.09 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 14:40:15,083 INFO [trainer.py:765] (6/8) Epoch 2, batch 600, train_loss[loss=4.86, NarTop10Accuracy=0.3591, over 5523.00 frames. ], tot_loss[loss=5.051, NarTop10Accuracy=0.3194, over 5644.09 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 14:40:44,589 INFO [trainer.py:765] (6/8) Epoch 2, batch 700, train_loss[loss=5.053, NarTop10Accuracy=0.3179, over 4980.00 frames. ], tot_loss[loss=5.036, NarTop10Accuracy=0.3221, over 5721.30 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 14:41:24,514 INFO [trainer.py:765] (6/8) Epoch 2, batch 800, train_loss[loss=5.337, NarTop10Accuracy=0.2507, over 5019.00 frames. ], tot_loss[loss=5.018, NarTop10Accuracy=0.3252, over 5798.17 frames. ], batch size: 6, lr: 2.69e-02 +2024-08-06 14:41:54,404 INFO [trainer.py:765] (6/8) Epoch 2, batch 900, train_loss[loss=4.846, NarTop10Accuracy=0.3545, over 6261.00 frames. ], tot_loss[loss=4.979, NarTop10Accuracy=0.333, over 5812.02 frames. ], batch size: 13, lr: 2.68e-02 +2024-08-06 14:42:23,901 INFO [trainer.py:765] (6/8) Epoch 2, batch 1000, train_loss[loss=4.669, NarTop10Accuracy=0.4013, over 6057.00 frames. ], tot_loss[loss=4.951, NarTop10Accuracy=0.3381, over 5914.53 frames. ], batch size: 13, lr: 2.66e-02 +2024-08-06 14:42:56,254 INFO [trainer.py:765] (6/8) Epoch 2, batch 1100, train_loss[loss=5.063, NarTop10Accuracy=0.3154, over 6690.00 frames. ], tot_loss[loss=4.929, NarTop10Accuracy=0.3422, over 5939.25 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 14:43:35,186 INFO [trainer.py:765] (6/8) Epoch 2, batch 1200, train_loss[loss=4.874, NarTop10Accuracy=0.3434, over 7110.00 frames. ], tot_loss[loss=4.907, NarTop10Accuracy=0.3459, over 5940.74 frames. ], batch size: 31, lr: 2.64e-02 +2024-08-06 14:44:04,345 INFO [trainer.py:765] (6/8) Epoch 2, batch 1300, train_loss[loss=4.702, NarTop10Accuracy=0.3756, over 5205.00 frames. ], tot_loss[loss=4.868, NarTop10Accuracy=0.3535, over 5999.29 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 14:44:33,727 INFO [trainer.py:765] (6/8) Epoch 2, batch 1400, train_loss[loss=4.925, NarTop10Accuracy=0.3407, over 6105.00 frames. ], tot_loss[loss=4.853, NarTop10Accuracy=0.3564, over 6022.43 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 14:44:40,441 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 14:44:48,506 INFO [trainer.py:811] (6/8) Epoch 2, validation: loss=4.808, NarTop10Accuracy=0.3642, over 1905321.00 frames. +2024-08-06 14:44:48,506 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 26729MB +2024-08-06 14:44:49,204 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 6.328e+01 1.178e+02 1.410e+02 1.789e+02 6.269e+02, threshold=2.821e+02, percent-clipped=0.0 +2024-08-06 14:45:09,806 INFO [trainer.py:765] (6/8) Epoch 2, batch 1500, train_loss[loss=4.775, NarTop10Accuracy=0.3759, over 6087.00 frames. ], tot_loss[loss=4.827, NarTop10Accuracy=0.3615, over 5957.52 frames. ], batch size: 50, lr: 2.60e-02 +2024-08-06 14:45:37,660 INFO [trainer.py:765] (6/8) Epoch 2, batch 1600, train_loss[loss=4.8, NarTop10Accuracy=0.3727, over 7080.00 frames. ], tot_loss[loss=4.795, NarTop10Accuracy=0.3674, over 5945.67 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 14:46:04,368 INFO [trainer.py:765] (6/8) Epoch 2, batch 1700, train_loss[loss=4.899, NarTop10Accuracy=0.3434, over 6162.00 frames. ], tot_loss[loss=4.792, NarTop10Accuracy=0.368, over 5928.32 frames. ], batch size: 13, lr: 2.58e-02 +2024-08-06 14:46:31,034 INFO [trainer.py:765] (6/8) Epoch 2, batch 1800, train_loss[loss=4.702, NarTop10Accuracy=0.381, over 7020.00 frames. ], tot_loss[loss=4.768, NarTop10Accuracy=0.3731, over 5982.84 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 14:46:57,532 INFO [trainer.py:765] (6/8) Epoch 2, batch 1900, train_loss[loss=4.763, NarTop10Accuracy=0.3704, over 6492.00 frames. ], tot_loss[loss=4.75, NarTop10Accuracy=0.3764, over 6027.17 frames. ], batch size: 50, lr: 2.55e-02 +2024-08-06 14:47:23,234 INFO [trainer.py:765] (6/8) Epoch 2, batch 2000, train_loss[loss=4.864, NarTop10Accuracy=0.3535, over 6222.00 frames. ], tot_loss[loss=4.726, NarTop10Accuracy=0.3807, over 5988.26 frames. ], batch size: 51, lr: 2.54e-02 +2024-08-06 14:47:48,589 INFO [trainer.py:765] (6/8) Epoch 2, batch 2100, train_loss[loss=4.575, NarTop10Accuracy=0.4124, over 4011.00 frames. ], tot_loss[loss=4.721, NarTop10Accuracy=0.3816, over 5972.45 frames. ], batch size: 4, lr: 2.53e-02 +2024-08-06 14:48:13,765 INFO [trainer.py:765] (6/8) Epoch 2, batch 2200, train_loss[loss=4.749, NarTop10Accuracy=0.3711, over 7197.00 frames. ], tot_loss[loss=4.685, NarTop10Accuracy=0.3886, over 6013.84 frames. ], batch size: 31, lr: 2.51e-02 +2024-08-06 14:48:38,952 INFO [trainer.py:765] (6/8) Epoch 2, batch 2300, train_loss[loss=4.878, NarTop10Accuracy=0.3546, over 5667.00 frames. ], tot_loss[loss=4.688, NarTop10Accuracy=0.3879, over 6035.89 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 14:49:03,319 INFO [trainer.py:765] (6/8) Epoch 2, batch 2400, train_loss[loss=4.486, NarTop10Accuracy=0.4316, over 5166.00 frames. ], tot_loss[loss=4.644, NarTop10Accuracy=0.3963, over 5779.80 frames. ], batch size: 7, lr: 2.49e-02 +2024-08-06 14:49:26,868 INFO [trainer.py:765] (6/8) Epoch 2, batch 2500, train_loss[loss=4.712, NarTop10Accuracy=0.3843, over 5142.00 frames. ], tot_loss[loss=4.62, NarTop10Accuracy=0.401, over 5499.64 frames. ], batch size: 7, lr: 2.48e-02 +2024-08-06 14:49:46,731 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 14:50:51,116 INFO [trainer.py:765] (6/8) Epoch 3, batch 100, train_loss[loss=4.709, NarTop10Accuracy=0.3814, over 7143.00 frames. ], tot_loss[loss=4.567, NarTop10Accuracy=0.4112, over 2365.63 frames. ], batch size: 31, lr: 2.36e-02 +2024-08-06 14:51:20,387 INFO [trainer.py:765] (6/8) Epoch 3, batch 200, train_loss[loss=4.556, NarTop10Accuracy=0.4119, over 6699.00 frames. ], tot_loss[loss=4.54, NarTop10Accuracy=0.4171, over 3840.91 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 14:51:50,953 INFO [trainer.py:765] (6/8) Epoch 3, batch 300, train_loss[loss=4.808, NarTop10Accuracy=0.3658, over 7284.00 frames. ], tot_loss[loss=4.511, NarTop10Accuracy=0.4227, over 4654.51 frames. ], batch size: 23, lr: 2.33e-02 +2024-08-06 14:52:32,359 INFO [trainer.py:765] (6/8) Epoch 3, batch 400, train_loss[loss=4.585, NarTop10Accuracy=0.3922, over 5139.00 frames. ], tot_loss[loss=4.484, NarTop10Accuracy=0.4278, over 5096.30 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 14:53:00,679 INFO [trainer.py:765] (6/8) Epoch 3, batch 500, train_loss[loss=4.188, NarTop10Accuracy=0.4876, over 6045.00 frames. ], tot_loss[loss=4.482, NarTop10Accuracy=0.428, over 5375.17 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 14:53:29,551 INFO [trainer.py:765] (6/8) Epoch 3, batch 600, train_loss[loss=4.419, NarTop10Accuracy=0.4393, over 5655.00 frames. ], tot_loss[loss=4.472, NarTop10Accuracy=0.4299, over 5641.04 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 14:54:12,465 INFO [trainer.py:765] (6/8) Epoch 3, batch 700, train_loss[loss=4.082, NarTop10Accuracy=0.4973, over 5106.00 frames. ], tot_loss[loss=4.441, NarTop10Accuracy=0.4361, over 5705.32 frames. ], batch size: 6, lr: 2.29e-02 +2024-08-06 14:54:44,784 INFO [trainer.py:765] (6/8) Epoch 3, batch 800, train_loss[loss=4.206, NarTop10Accuracy=0.48, over 4977.00 frames. ], tot_loss[loss=4.418, NarTop10Accuracy=0.4409, over 5769.07 frames. ], batch size: 6, lr: 2.28e-02 +2024-08-06 14:54:58,684 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 14:55:06,655 INFO [trainer.py:811] (6/8) Epoch 3, validation: loss=4.276, NarTop10Accuracy=0.4689, over 1905321.00 frames. +2024-08-06 14:55:06,656 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 26864MB +2024-08-06 14:55:07,182 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 8.443e+01 1.396e+02 1.639e+02 2.017e+02 7.124e+02, threshold=3.277e+02, percent-clipped=4.5 +2024-08-06 14:55:21,051 INFO [trainer.py:765] (6/8) Epoch 3, batch 900, train_loss[loss=4.175, NarTop10Accuracy=0.4891, over 6333.00 frames. ], tot_loss[loss=4.387, NarTop10Accuracy=0.4471, over 5807.82 frames. ], batch size: 13, lr: 2.26e-02 +2024-08-06 14:56:04,957 INFO [trainer.py:765] (6/8) Epoch 3, batch 1000, train_loss[loss=4.264, NarTop10Accuracy=0.4725, over 6189.00 frames. ], tot_loss[loss=4.371, NarTop10Accuracy=0.45, over 5911.64 frames. ], batch size: 13, lr: 2.25e-02 +2024-08-06 14:56:37,300 INFO [trainer.py:765] (6/8) Epoch 3, batch 1100, train_loss[loss=4.499, NarTop10Accuracy=0.4129, over 7131.00 frames. ], tot_loss[loss=4.348, NarTop10Accuracy=0.454, over 5940.54 frames. ], batch size: 18, lr: 2.24e-02 +2024-08-06 14:57:06,377 INFO [trainer.py:765] (6/8) Epoch 3, batch 1200, train_loss[loss=4.327, NarTop10Accuracy=0.4566, over 7254.00 frames. ], tot_loss[loss=4.335, NarTop10Accuracy=0.457, over 5939.96 frames. ], batch size: 31, lr: 2.23e-02 +2024-08-06 14:57:51,630 INFO [trainer.py:765] (6/8) Epoch 3, batch 1300, train_loss[loss=4.256, NarTop10Accuracy=0.4727, over 5109.00 frames. ], tot_loss[loss=4.305, NarTop10Accuracy=0.4631, over 6001.43 frames. ], batch size: 6, lr: 2.22e-02 +2024-08-06 14:58:22,899 INFO [trainer.py:765] (6/8) Epoch 3, batch 1400, train_loss[loss=4.141, NarTop10Accuracy=0.4982, over 6000.00 frames. ], tot_loss[loss=4.294, NarTop10Accuracy=0.4649, over 6020.30 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 14:58:50,854 INFO [trainer.py:765] (6/8) Epoch 3, batch 1500, train_loss[loss=4.311, NarTop10Accuracy=0.4604, over 5448.00 frames. ], tot_loss[loss=4.269, NarTop10Accuracy=0.4694, over 5952.85 frames. ], batch size: 50, lr: 2.20e-02 +2024-08-06 14:59:18,714 INFO [trainer.py:765] (6/8) Epoch 3, batch 1600, train_loss[loss=4.024, NarTop10Accuracy=0.5187, over 7134.00 frames. ], tot_loss[loss=4.256, NarTop10Accuracy=0.4715, over 5928.05 frames. ], batch size: 22, lr: 2.19e-02 +2024-08-06 14:59:45,951 INFO [trainer.py:765] (6/8) Epoch 3, batch 1700, train_loss[loss=4.103, NarTop10Accuracy=0.5031, over 6582.00 frames. ], tot_loss[loss=4.233, NarTop10Accuracy=0.4762, over 5910.14 frames. ], batch size: 14, lr: 2.18e-02 +2024-08-06 15:00:12,498 INFO [trainer.py:765] (6/8) Epoch 3, batch 1800, train_loss[loss=3.973, NarTop10Accuracy=0.5287, over 7011.00 frames. ], tot_loss[loss=4.208, NarTop10Accuracy=0.4816, over 5995.22 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 15:00:38,948 INFO [trainer.py:765] (6/8) Epoch 3, batch 1900, train_loss[loss=4.741, NarTop10Accuracy=0.3801, over 6306.00 frames. ], tot_loss[loss=4.198, NarTop10Accuracy=0.4836, over 6037.25 frames. ], batch size: 50, lr: 2.16e-02 +2024-08-06 15:01:04,606 INFO [trainer.py:765] (6/8) Epoch 3, batch 2000, train_loss[loss=4.354, NarTop10Accuracy=0.4459, over 6072.00 frames. ], tot_loss[loss=4.17, NarTop10Accuracy=0.4891, over 6015.47 frames. ], batch size: 50, lr: 2.15e-02 +2024-08-06 15:01:29,898 INFO [trainer.py:765] (6/8) Epoch 3, batch 2100, train_loss[loss=3.565, NarTop10Accuracy=0.6158, over 3849.00 frames. ], tot_loss[loss=4.142, NarTop10Accuracy=0.4947, over 5987.73 frames. ], batch size: 4, lr: 2.14e-02 +2024-08-06 15:01:55,182 INFO [trainer.py:765] (6/8) Epoch 3, batch 2200, train_loss[loss=3.867, NarTop10Accuracy=0.5503, over 7161.00 frames. ], tot_loss[loss=4.115, NarTop10Accuracy=0.5003, over 6014.94 frames. ], batch size: 31, lr: 2.13e-02 +2024-08-06 15:02:20,410 INFO [trainer.py:765] (6/8) Epoch 3, batch 2300, train_loss[loss=4.442, NarTop10Accuracy=0.4245, over 5805.00 frames. ], tot_loss[loss=4.125, NarTop10Accuracy=0.4986, over 6009.02 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 15:02:44,663 INFO [trainer.py:765] (6/8) Epoch 3, batch 2400, train_loss[loss=4.271, NarTop10Accuracy=0.4715, over 5100.00 frames. ], tot_loss[loss=4.096, NarTop10Accuracy=0.5043, over 5765.52 frames. ], batch size: 7, lr: 2.11e-02 +2024-08-06 15:03:08,234 INFO [trainer.py:765] (6/8) Epoch 3, batch 2500, train_loss[loss=3.728, NarTop10Accuracy=0.5718, over 5127.00 frames. ], tot_loss[loss=4.045, NarTop10Accuracy=0.5146, over 5463.88 frames. ], batch size: 7, lr: 2.10e-02 +2024-08-06 15:03:28,326 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 15:04:28,131 INFO [trainer.py:765] (6/8) Epoch 4, batch 100, train_loss[loss=3.821, NarTop10Accuracy=0.5638, over 7323.00 frames. ], tot_loss[loss=4.035, NarTop10Accuracy=0.5177, over 2389.36 frames. ], batch size: 31, lr: 1.97e-02 +2024-08-06 15:04:59,842 INFO [trainer.py:765] (6/8) Epoch 4, batch 200, train_loss[loss=3.87, NarTop10Accuracy=0.5537, over 6822.00 frames. ], tot_loss[loss=4.011, NarTop10Accuracy=0.5224, over 3875.56 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 15:05:27,509 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 15:05:35,694 INFO [trainer.py:811] (6/8) Epoch 4, validation: loss=3.804, NarTop10Accuracy=0.5644, over 1905321.00 frames. +2024-08-06 15:05:35,695 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27368MB +2024-08-06 15:05:36,237 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.765e+02 1.975e+02 2.270e+02 5.852e+02, threshold=3.949e+02, percent-clipped=2.8 +2024-08-06 15:05:43,888 INFO [trainer.py:765] (6/8) Epoch 4, batch 300, train_loss[loss=3.701, NarTop10Accuracy=0.5839, over 7074.00 frames. ], tot_loss[loss=3.996, NarTop10Accuracy=0.5252, over 4670.95 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 15:06:16,124 INFO [trainer.py:765] (6/8) Epoch 4, batch 400, train_loss[loss=3.745, NarTop10Accuracy=0.5819, over 5187.00 frames. ], tot_loss[loss=4.004, NarTop10Accuracy=0.5237, over 5112.98 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 15:06:46,473 INFO [trainer.py:765] (6/8) Epoch 4, batch 500, train_loss[loss=4.044, NarTop10Accuracy=0.5068, over 6117.00 frames. ], tot_loss[loss=3.984, NarTop10Accuracy=0.5281, over 5387.52 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 15:07:23,818 INFO [trainer.py:765] (6/8) Epoch 4, batch 600, train_loss[loss=3.795, NarTop10Accuracy=0.5682, over 5742.00 frames. ], tot_loss[loss=3.981, NarTop10Accuracy=0.5284, over 5655.73 frames. ], batch size: 9, lr: 1.93e-02 +2024-08-06 15:07:59,002 INFO [trainer.py:765] (6/8) Epoch 4, batch 700, train_loss[loss=4.22, NarTop10Accuracy=0.4886, over 5001.00 frames. ], tot_loss[loss=3.979, NarTop10Accuracy=0.5287, over 5720.42 frames. ], batch size: 6, lr: 1.92e-02 +2024-08-06 15:08:32,429 INFO [trainer.py:765] (6/8) Epoch 4, batch 800, train_loss[loss=3.677, NarTop10Accuracy=0.5928, over 4977.00 frames. ], tot_loss[loss=3.964, NarTop10Accuracy=0.5314, over 5783.39 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 15:09:10,689 INFO [trainer.py:765] (6/8) Epoch 4, batch 900, train_loss[loss=3.631, NarTop10Accuracy=0.6039, over 6231.00 frames. ], tot_loss[loss=3.925, NarTop10Accuracy=0.5397, over 5785.27 frames. ], batch size: 13, lr: 1.90e-02 +2024-08-06 15:09:46,075 INFO [trainer.py:765] (6/8) Epoch 4, batch 1000, train_loss[loss=3.618, NarTop10Accuracy=0.6036, over 6159.00 frames. ], tot_loss[loss=3.92, NarTop10Accuracy=0.5408, over 5896.88 frames. ], batch size: 13, lr: 1.89e-02 +2024-08-06 15:10:18,140 INFO [trainer.py:765] (6/8) Epoch 4, batch 1100, train_loss[loss=3.94, NarTop10Accuracy=0.548, over 6756.00 frames. ], tot_loss[loss=3.911, NarTop10Accuracy=0.5426, over 5931.15 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 15:10:55,075 INFO [trainer.py:765] (6/8) Epoch 4, batch 1200, train_loss[loss=4.343, NarTop10Accuracy=0.4539, over 7155.00 frames. ], tot_loss[loss=3.907, NarTop10Accuracy=0.5434, over 5928.15 frames. ], batch size: 31, lr: 1.88e-02 +2024-08-06 15:11:32,074 INFO [trainer.py:765] (6/8) Epoch 4, batch 1300, train_loss[loss=3.646, NarTop10Accuracy=0.6058, over 5241.00 frames. ], tot_loss[loss=3.864, NarTop10Accuracy=0.5521, over 6003.41 frames. ], batch size: 6, lr: 1.87e-02 +2024-08-06 15:12:05,688 INFO [trainer.py:765] (6/8) Epoch 4, batch 1400, train_loss[loss=3.755, NarTop10Accuracy=0.5785, over 6219.00 frames. ], tot_loss[loss=3.857, NarTop10Accuracy=0.5535, over 6027.73 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 15:12:33,695 INFO [trainer.py:765] (6/8) Epoch 4, batch 1500, train_loss[loss=3.913, NarTop10Accuracy=0.5492, over 6015.00 frames. ], tot_loss[loss=3.858, NarTop10Accuracy=0.5535, over 5943.66 frames. ], batch size: 52, lr: 1.85e-02 +2024-08-06 15:13:01,510 INFO [trainer.py:765] (6/8) Epoch 4, batch 1600, train_loss[loss=3.692, NarTop10Accuracy=0.5979, over 7236.00 frames. ], tot_loss[loss=3.851, NarTop10Accuracy=0.5548, over 5915.05 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 15:13:28,133 INFO [trainer.py:765] (6/8) Epoch 4, batch 1700, train_loss[loss=3.899, NarTop10Accuracy=0.5437, over 6708.00 frames. ], tot_loss[loss=3.827, NarTop10Accuracy=0.559, over 5891.09 frames. ], batch size: 14, lr: 1.84e-02 +2024-08-06 15:13:54,557 INFO [trainer.py:765] (6/8) Epoch 4, batch 1800, train_loss[loss=3.742, NarTop10Accuracy=0.5763, over 7113.00 frames. ], tot_loss[loss=3.832, NarTop10Accuracy=0.5584, over 5946.88 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 15:14:20,998 INFO [trainer.py:765] (6/8) Epoch 4, batch 1900, train_loss[loss=3.801, NarTop10Accuracy=0.5669, over 5913.00 frames. ], tot_loss[loss=3.85, NarTop10Accuracy=0.5546, over 6003.91 frames. ], batch size: 51, lr: 1.82e-02 +2024-08-06 15:14:46,671 INFO [trainer.py:765] (6/8) Epoch 4, batch 2000, train_loss[loss=3.666, NarTop10Accuracy=0.5932, over 6324.00 frames. ], tot_loss[loss=3.82, NarTop10Accuracy=0.5608, over 5997.93 frames. ], batch size: 50, lr: 1.81e-02 +2024-08-06 15:15:11,859 INFO [trainer.py:765] (6/8) Epoch 4, batch 2100, train_loss[loss=3.705, NarTop10Accuracy=0.5778, over 3903.00 frames. ], tot_loss[loss=3.812, NarTop10Accuracy=0.5626, over 5968.68 frames. ], batch size: 4, lr: 1.81e-02 +2024-08-06 15:15:37,089 INFO [trainer.py:765] (6/8) Epoch 4, batch 2200, train_loss[loss=3.626, NarTop10Accuracy=0.6057, over 7191.00 frames. ], tot_loss[loss=3.804, NarTop10Accuracy=0.5642, over 5994.73 frames. ], batch size: 31, lr: 1.80e-02 +2024-08-06 15:15:55,089 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 15:16:03,242 INFO [trainer.py:811] (6/8) Epoch 4, validation: loss=3.665, NarTop10Accuracy=0.5912, over 1905321.00 frames. +2024-08-06 15:16:03,243 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29575MB +2024-08-06 15:16:03,740 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.414e+02 1.889e+02 2.096e+02 2.369e+02 1.168e+03, threshold=4.192e+02, percent-clipped=1.7 +2024-08-06 15:16:10,347 INFO [trainer.py:765] (6/8) Epoch 4, batch 2300, train_loss[loss=3.596, NarTop10Accuracy=0.5997, over 5802.00 frames. ], tot_loss[loss=3.809, NarTop10Accuracy=0.5635, over 6008.90 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 15:16:34,840 INFO [trainer.py:765] (6/8) Epoch 4, batch 2400, train_loss[loss=3.601, NarTop10Accuracy=0.6083, over 5136.00 frames. ], tot_loss[loss=3.777, NarTop10Accuracy=0.5696, over 5759.05 frames. ], batch size: 7, lr: 1.79e-02 +2024-08-06 15:16:58,534 INFO [trainer.py:765] (6/8) Epoch 4, batch 2500, train_loss[loss=3.595, NarTop10Accuracy=0.6189, over 5052.00 frames. ], tot_loss[loss=3.762, NarTop10Accuracy=0.5723, over 5475.95 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 15:17:18,827 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 15:18:24,100 INFO [trainer.py:765] (6/8) Epoch 5, batch 100, train_loss[loss=3.484, NarTop10Accuracy=0.6289, over 7389.00 frames. ], tot_loss[loss=3.783, NarTop10Accuracy=0.5683, over 2360.81 frames. ], batch size: 31, lr: 1.66e-02 +2024-08-06 15:18:59,675 INFO [trainer.py:765] (6/8) Epoch 5, batch 200, train_loss[loss=4.093, NarTop10Accuracy=0.5035, over 6828.00 frames. ], tot_loss[loss=3.757, NarTop10Accuracy=0.5742, over 3852.68 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 15:19:32,887 INFO [trainer.py:765] (6/8) Epoch 5, batch 300, train_loss[loss=3.99, NarTop10Accuracy=0.5218, over 7098.00 frames. ], tot_loss[loss=3.73, NarTop10Accuracy=0.5794, over 4659.91 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 15:20:01,656 INFO [trainer.py:765] (6/8) Epoch 5, batch 400, train_loss[loss=3.41, NarTop10Accuracy=0.6437, over 5097.00 frames. ], tot_loss[loss=3.72, NarTop10Accuracy=0.5814, over 5099.05 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 15:20:38,298 INFO [trainer.py:765] (6/8) Epoch 5, batch 500, train_loss[loss=4.013, NarTop10Accuracy=0.512, over 6171.00 frames. ], tot_loss[loss=3.738, NarTop10Accuracy=0.5773, over 5384.88 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 15:21:13,710 INFO [trainer.py:765] (6/8) Epoch 5, batch 600, train_loss[loss=3.923, NarTop10Accuracy=0.5268, over 5745.00 frames. ], tot_loss[loss=3.731, NarTop10Accuracy=0.5787, over 5645.61 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 15:21:45,881 INFO [trainer.py:765] (6/8) Epoch 5, batch 700, train_loss[loss=3.375, NarTop10Accuracy=0.6532, over 5130.00 frames. ], tot_loss[loss=3.72, NarTop10Accuracy=0.5809, over 5718.32 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 15:22:24,498 INFO [trainer.py:765] (6/8) Epoch 5, batch 800, train_loss[loss=3.869, NarTop10Accuracy=0.5337, over 4974.00 frames. ], tot_loss[loss=3.705, NarTop10Accuracy=0.5837, over 5770.67 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 15:22:56,783 INFO [trainer.py:765] (6/8) Epoch 5, batch 900, train_loss[loss=3.631, NarTop10Accuracy=0.5919, over 6171.00 frames. ], tot_loss[loss=3.692, NarTop10Accuracy=0.5863, over 5806.32 frames. ], batch size: 13, lr: 1.61e-02 +2024-08-06 15:23:31,914 INFO [trainer.py:765] (6/8) Epoch 5, batch 1000, train_loss[loss=3.577, NarTop10Accuracy=0.6192, over 6129.00 frames. ], tot_loss[loss=3.683, NarTop10Accuracy=0.5884, over 5883.92 frames. ], batch size: 13, lr: 1.60e-02 +2024-08-06 15:24:09,572 INFO [trainer.py:765] (6/8) Epoch 5, batch 1100, train_loss[loss=3.51, NarTop10Accuracy=0.6275, over 6792.00 frames. ], tot_loss[loss=3.681, NarTop10Accuracy=0.5893, over 5923.67 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 15:24:44,528 INFO [trainer.py:765] (6/8) Epoch 5, batch 1200, train_loss[loss=3.529, NarTop10Accuracy=0.6197, over 7092.00 frames. ], tot_loss[loss=3.676, NarTop10Accuracy=0.59, over 5934.67 frames. ], batch size: 31, lr: 1.59e-02 +2024-08-06 15:25:19,379 INFO [trainer.py:765] (6/8) Epoch 5, batch 1300, train_loss[loss=3.78, NarTop10Accuracy=0.5559, over 5043.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5922, over 6008.46 frames. ], batch size: 6, lr: 1.59e-02 +2024-08-06 15:25:51,694 INFO [trainer.py:765] (6/8) Epoch 5, batch 1400, train_loss[loss=3.842, NarTop10Accuracy=0.5492, over 6090.00 frames. ], tot_loss[loss=3.672, NarTop10Accuracy=0.5906, over 6016.33 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 15:26:26,195 INFO [trainer.py:765] (6/8) Epoch 5, batch 1500, train_loss[loss=3.704, NarTop10Accuracy=0.583, over 6051.00 frames. ], tot_loss[loss=3.667, NarTop10Accuracy=0.5916, over 5956.63 frames. ], batch size: 51, lr: 1.58e-02 +2024-08-06 15:26:54,130 INFO [trainer.py:765] (6/8) Epoch 5, batch 1600, train_loss[loss=3.51, NarTop10Accuracy=0.6318, over 7203.00 frames. ], tot_loss[loss=3.677, NarTop10Accuracy=0.5896, over 5944.78 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 15:27:19,603 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 15:27:27,821 INFO [trainer.py:811] (6/8) Epoch 5, validation: loss=3.552, NarTop10Accuracy=0.6147, over 1905321.00 frames. +2024-08-06 15:27:27,822 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29995MB +2024-08-06 15:27:28,341 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.756e+02 1.962e+02 2.205e+02 5.880e+02, threshold=3.924e+02, percent-clipped=0.8 +2024-08-06 15:27:29,131 INFO [trainer.py:765] (6/8) Epoch 5, batch 1700, train_loss[loss=3.689, NarTop10Accuracy=0.5805, over 6264.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5911, over 5918.73 frames. ], batch size: 13, lr: 1.56e-02 +2024-08-06 15:27:55,652 INFO [trainer.py:765] (6/8) Epoch 5, batch 1800, train_loss[loss=3.867, NarTop10Accuracy=0.5422, over 7086.00 frames. ], tot_loss[loss=3.659, NarTop10Accuracy=0.5931, over 5984.35 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 15:28:22,172 INFO [trainer.py:765] (6/8) Epoch 5, batch 1900, train_loss[loss=3.678, NarTop10Accuracy=0.5972, over 6312.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.591, over 6037.42 frames. ], batch size: 51, lr: 1.55e-02 +2024-08-06 15:28:47,893 INFO [trainer.py:765] (6/8) Epoch 5, batch 2000, train_loss[loss=3.68, NarTop10Accuracy=0.5899, over 5841.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.5902, over 6006.26 frames. ], batch size: 50, lr: 1.55e-02 +2024-08-06 15:29:13,770 INFO [trainer.py:765] (6/8) Epoch 5, batch 2100, train_loss[loss=3.384, NarTop10Accuracy=0.6542, over 4815.00 frames. ], tot_loss[loss=3.686, NarTop10Accuracy=0.5873, over 5970.34 frames. ], batch size: 5, lr: 1.54e-02 +2024-08-06 15:29:39,177 INFO [trainer.py:765] (6/8) Epoch 5, batch 2200, train_loss[loss=3.972, NarTop10Accuracy=0.5312, over 7245.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.5915, over 6003.28 frames. ], batch size: 31, lr: 1.54e-02 +2024-08-06 15:30:04,430 INFO [trainer.py:765] (6/8) Epoch 5, batch 2300, train_loss[loss=3.445, NarTop10Accuracy=0.6377, over 5787.00 frames. ], tot_loss[loss=3.667, NarTop10Accuracy=0.5911, over 6012.66 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 15:30:28,862 INFO [trainer.py:765] (6/8) Epoch 5, batch 2400, train_loss[loss=3.541, NarTop10Accuracy=0.6148, over 5643.00 frames. ], tot_loss[loss=3.646, NarTop10Accuracy=0.5951, over 5769.75 frames. ], batch size: 8, lr: 1.53e-02 +2024-08-06 15:30:52,503 INFO [trainer.py:765] (6/8) Epoch 5, batch 2500, train_loss[loss=3.503, NarTop10Accuracy=0.6298, over 5187.00 frames. ], tot_loss[loss=3.614, NarTop10Accuracy=0.6018, over 5479.21 frames. ], batch size: 7, lr: 1.52e-02 +2024-08-06 15:31:12,173 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 15:32:14,414 INFO [trainer.py:765] (6/8) Epoch 6, batch 100, train_loss[loss=3.508, NarTop10Accuracy=0.6289, over 7344.00 frames. ], tot_loss[loss=3.639, NarTop10Accuracy=0.5969, over 2366.39 frames. ], batch size: 31, lr: 1.42e-02 +2024-08-06 15:32:46,015 INFO [trainer.py:765] (6/8) Epoch 6, batch 200, train_loss[loss=4.088, NarTop10Accuracy=0.504, over 6828.00 frames. ], tot_loss[loss=3.617, NarTop10Accuracy=0.6015, over 3855.86 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 15:33:21,242 INFO [trainer.py:765] (6/8) Epoch 6, batch 300, train_loss[loss=3.378, NarTop10Accuracy=0.6605, over 6942.00 frames. ], tot_loss[loss=3.607, NarTop10Accuracy=0.6034, over 4656.06 frames. ], batch size: 22, lr: 1.41e-02 +2024-08-06 15:33:56,035 INFO [trainer.py:765] (6/8) Epoch 6, batch 400, train_loss[loss=3.341, NarTop10Accuracy=0.6597, over 4992.00 frames. ], tot_loss[loss=3.585, NarTop10Accuracy=0.6082, over 5101.08 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 15:34:26,759 INFO [trainer.py:765] (6/8) Epoch 6, batch 500, train_loss[loss=3.298, NarTop10Accuracy=0.6725, over 6054.00 frames. ], tot_loss[loss=3.572, NarTop10Accuracy=0.6109, over 5382.42 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 15:35:01,457 INFO [trainer.py:765] (6/8) Epoch 6, batch 600, train_loss[loss=3.568, NarTop10Accuracy=0.6126, over 5637.00 frames. ], tot_loss[loss=3.572, NarTop10Accuracy=0.6107, over 5661.45 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 15:35:32,733 INFO [trainer.py:765] (6/8) Epoch 6, batch 700, train_loss[loss=3.769, NarTop10Accuracy=0.5701, over 5109.00 frames. ], tot_loss[loss=3.587, NarTop10Accuracy=0.6078, over 5703.56 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 15:36:06,844 INFO [trainer.py:765] (6/8) Epoch 6, batch 800, train_loss[loss=3.668, NarTop10Accuracy=0.5896, over 5112.00 frames. ], tot_loss[loss=3.596, NarTop10Accuracy=0.606, over 5749.08 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 15:36:40,384 INFO [trainer.py:765] (6/8) Epoch 6, batch 900, train_loss[loss=4.044, NarTop10Accuracy=0.5114, over 6591.00 frames. ], tot_loss[loss=3.58, NarTop10Accuracy=0.6093, over 5776.73 frames. ], batch size: 14, lr: 1.38e-02 +2024-08-06 15:37:15,271 INFO [trainer.py:765] (6/8) Epoch 6, batch 1000, train_loss[loss=3.288, NarTop10Accuracy=0.67, over 6105.00 frames. ], tot_loss[loss=3.594, NarTop10Accuracy=0.606, over 5901.50 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 15:37:50,508 INFO [trainer.py:765] (6/8) Epoch 6, batch 1100, train_loss[loss=3.419, NarTop10Accuracy=0.6403, over 6924.00 frames. ], tot_loss[loss=3.589, NarTop10Accuracy=0.6075, over 5934.86 frames. ], batch size: 17, lr: 1.38e-02 +2024-08-06 15:37:55,826 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 15:38:04,436 INFO [trainer.py:811] (6/8) Epoch 6, validation: loss=3.421, NarTop10Accuracy=0.6418, over 1905321.00 frames. +2024-08-06 15:38:04,437 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29995MB +2024-08-06 15:38:04,966 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.809e+02 1.991e+02 2.234e+02 5.215e+02, threshold=3.983e+02, percent-clipped=0.5 +2024-08-06 15:38:36,168 INFO [trainer.py:765] (6/8) Epoch 6, batch 1200, train_loss[loss=3.468, NarTop10Accuracy=0.6332, over 7347.00 frames. ], tot_loss[loss=3.577, NarTop10Accuracy=0.6097, over 5935.59 frames. ], batch size: 31, lr: 1.37e-02 +2024-08-06 15:39:08,242 INFO [trainer.py:765] (6/8) Epoch 6, batch 1300, train_loss[loss=3.288, NarTop10Accuracy=0.6683, over 4962.00 frames. ], tot_loss[loss=3.572, NarTop10Accuracy=0.6106, over 5998.94 frames. ], batch size: 6, lr: 1.37e-02 +2024-08-06 15:39:44,070 INFO [trainer.py:765] (6/8) Epoch 6, batch 1400, train_loss[loss=3.332, NarTop10Accuracy=0.6575, over 6063.00 frames. ], tot_loss[loss=3.569, NarTop10Accuracy=0.6115, over 6025.86 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 15:40:15,383 INFO [trainer.py:765] (6/8) Epoch 6, batch 1500, train_loss[loss=3.915, NarTop10Accuracy=0.5409, over 6366.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.6115, over 5956.81 frames. ], batch size: 51, lr: 1.36e-02 +2024-08-06 15:40:43,106 INFO [trainer.py:765] (6/8) Epoch 6, batch 1600, train_loss[loss=3.361, NarTop10Accuracy=0.6477, over 7125.00 frames. ], tot_loss[loss=3.563, NarTop10Accuracy=0.6129, over 5939.07 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 15:41:09,789 INFO [trainer.py:765] (6/8) Epoch 6, batch 1700, train_loss[loss=3.665, NarTop10Accuracy=0.5935, over 6300.00 frames. ], tot_loss[loss=3.557, NarTop10Accuracy=0.614, over 5911.71 frames. ], batch size: 13, lr: 1.35e-02 +2024-08-06 15:41:36,317 INFO [trainer.py:765] (6/8) Epoch 6, batch 1800, train_loss[loss=3.326, NarTop10Accuracy=0.6718, over 7083.00 frames. ], tot_loss[loss=3.562, NarTop10Accuracy=0.6127, over 5974.72 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 15:42:02,720 INFO [trainer.py:765] (6/8) Epoch 6, batch 1900, train_loss[loss=3.825, NarTop10Accuracy=0.5578, over 5892.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.6094, over 6019.47 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 15:42:28,319 INFO [trainer.py:765] (6/8) Epoch 6, batch 2000, train_loss[loss=3.438, NarTop10Accuracy=0.6349, over 5970.00 frames. ], tot_loss[loss=3.574, NarTop10Accuracy=0.6103, over 5978.01 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 15:42:53,668 INFO [trainer.py:765] (6/8) Epoch 6, batch 2100, train_loss[loss=3.439, NarTop10Accuracy=0.6464, over 3807.00 frames. ], tot_loss[loss=3.567, NarTop10Accuracy=0.6118, over 5972.15 frames. ], batch size: 4, lr: 1.33e-02 +2024-08-06 15:43:18,977 INFO [trainer.py:765] (6/8) Epoch 6, batch 2200, train_loss[loss=3.796, NarTop10Accuracy=0.566, over 7212.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6113, over 6012.27 frames. ], batch size: 31, lr: 1.33e-02 +2024-08-06 15:43:44,106 INFO [trainer.py:765] (6/8) Epoch 6, batch 2300, train_loss[loss=3.306, NarTop10Accuracy=0.6715, over 5631.00 frames. ], tot_loss[loss=3.569, NarTop10Accuracy=0.6111, over 6037.84 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 15:44:08,620 INFO [trainer.py:765] (6/8) Epoch 6, batch 2400, train_loss[loss=3.274, NarTop10Accuracy=0.6765, over 5124.00 frames. ], tot_loss[loss=3.535, NarTop10Accuracy=0.6183, over 5788.26 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:32,132 INFO [trainer.py:765] (6/8) Epoch 6, batch 2500, train_loss[loss=3.619, NarTop10Accuracy=0.6062, over 5196.00 frames. ], tot_loss[loss=3.52, NarTop10Accuracy=0.621, over 5507.89 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:51,698 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 15:45:58,042 INFO [trainer.py:765] (6/8) Epoch 7, batch 100, train_loss[loss=3.22, NarTop10Accuracy=0.6802, over 7380.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.617, over 2378.78 frames. ], batch size: 31, lr: 1.24e-02 +2024-08-06 15:46:33,614 INFO [trainer.py:765] (6/8) Epoch 7, batch 200, train_loss[loss=3.549, NarTop10Accuracy=0.6138, over 6852.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.619, over 3868.43 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 15:47:03,246 INFO [trainer.py:765] (6/8) Epoch 7, batch 300, train_loss[loss=3.774, NarTop10Accuracy=0.5694, over 7095.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6181, over 4651.48 frames. ], batch size: 22, lr: 1.23e-02 +2024-08-06 15:47:34,494 INFO [trainer.py:765] (6/8) Epoch 7, batch 400, train_loss[loss=3.632, NarTop10Accuracy=0.598, over 5130.00 frames. ], tot_loss[loss=3.529, NarTop10Accuracy=0.6197, over 5096.58 frames. ], batch size: 7, lr: 1.23e-02 +2024-08-06 15:48:13,729 INFO [trainer.py:765] (6/8) Epoch 7, batch 500, train_loss[loss=3.516, NarTop10Accuracy=0.6178, over 6057.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.6217, over 5394.53 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 15:48:26,368 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 15:48:34,533 INFO [trainer.py:811] (6/8) Epoch 7, validation: loss=3.326, NarTop10Accuracy=0.6612, over 1905321.00 frames. +2024-08-06 15:48:34,534 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29995MB +2024-08-06 15:48:35,078 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.860e+02 2.018e+02 2.241e+02 5.111e+02, threshold=4.035e+02, percent-clipped=0.3 +2024-08-06 15:48:52,720 INFO [trainer.py:765] (6/8) Epoch 7, batch 600, train_loss[loss=3.233, NarTop10Accuracy=0.6834, over 5844.00 frames. ], tot_loss[loss=3.519, NarTop10Accuracy=0.6212, over 5653.21 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 15:49:24,912 INFO [trainer.py:765] (6/8) Epoch 7, batch 700, train_loss[loss=3.741, NarTop10Accuracy=0.5764, over 5160.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6228, over 5713.85 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 15:50:04,380 INFO [trainer.py:765] (6/8) Epoch 7, batch 800, train_loss[loss=3.332, NarTop10Accuracy=0.6612, over 4983.00 frames. ], tot_loss[loss=3.496, NarTop10Accuracy=0.6262, over 5765.29 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 15:50:34,548 INFO [trainer.py:765] (6/8) Epoch 7, batch 900, train_loss[loss=3.327, NarTop10Accuracy=0.6662, over 6249.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6276, over 5803.56 frames. ], batch size: 13, lr: 1.21e-02 +2024-08-06 15:51:07,155 INFO [trainer.py:765] (6/8) Epoch 7, batch 1000, train_loss[loss=3.357, NarTop10Accuracy=0.6553, over 6177.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6268, over 5906.08 frames. ], batch size: 13, lr: 1.20e-02 +2024-08-06 15:51:51,758 INFO [trainer.py:765] (6/8) Epoch 7, batch 1100, train_loss[loss=3.27, NarTop10Accuracy=0.6688, over 6975.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6273, over 5949.69 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 15:52:22,699 INFO [trainer.py:765] (6/8) Epoch 7, batch 1200, train_loss[loss=3.392, NarTop10Accuracy=0.6501, over 6990.00 frames. ], tot_loss[loss=3.484, NarTop10Accuracy=0.629, over 5926.93 frames. ], batch size: 31, lr: 1.20e-02 +2024-08-06 15:52:52,007 INFO [trainer.py:765] (6/8) Epoch 7, batch 1300, train_loss[loss=3.412, NarTop10Accuracy=0.6413, over 5220.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.6286, over 6001.27 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 15:53:33,842 INFO [trainer.py:765] (6/8) Epoch 7, batch 1400, train_loss[loss=3.453, NarTop10Accuracy=0.6496, over 6108.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6254, over 6025.11 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 15:54:04,600 INFO [trainer.py:765] (6/8) Epoch 7, batch 1500, train_loss[loss=3.807, NarTop10Accuracy=0.5656, over 6414.00 frames. ], tot_loss[loss=3.48, NarTop10Accuracy=0.6296, over 5951.15 frames. ], batch size: 53, lr: 1.19e-02 +2024-08-06 15:54:32,385 INFO [trainer.py:765] (6/8) Epoch 7, batch 1600, train_loss[loss=3.587, NarTop10Accuracy=0.6109, over 7044.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6296, over 5945.11 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 15:54:59,054 INFO [trainer.py:765] (6/8) Epoch 7, batch 1700, train_loss[loss=3.437, NarTop10Accuracy=0.64, over 6372.00 frames. ], tot_loss[loss=3.502, NarTop10Accuracy=0.6248, over 5918.10 frames. ], batch size: 13, lr: 1.18e-02 +2024-08-06 15:55:25,512 INFO [trainer.py:765] (6/8) Epoch 7, batch 1800, train_loss[loss=4.009, NarTop10Accuracy=0.5118, over 6894.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6265, over 5995.56 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 15:55:52,083 INFO [trainer.py:765] (6/8) Epoch 7, batch 1900, train_loss[loss=3.434, NarTop10Accuracy=0.6445, over 5634.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6232, over 6034.12 frames. ], batch size: 50, lr: 1.18e-02 +2024-08-06 15:56:17,591 INFO [trainer.py:765] (6/8) Epoch 7, batch 2000, train_loss[loss=3.722, NarTop10Accuracy=0.5765, over 6315.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6245, over 5998.06 frames. ], batch size: 51, lr: 1.17e-02 +2024-08-06 15:56:42,856 INFO [trainer.py:765] (6/8) Epoch 7, batch 2100, train_loss[loss=3.688, NarTop10Accuracy=0.5822, over 3906.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6274, over 5967.36 frames. ], batch size: 4, lr: 1.17e-02 +2024-08-06 15:57:08,079 INFO [trainer.py:765] (6/8) Epoch 7, batch 2200, train_loss[loss=3.454, NarTop10Accuracy=0.6356, over 7239.00 frames. ], tot_loss[loss=3.51, NarTop10Accuracy=0.6233, over 6006.23 frames. ], batch size: 32, lr: 1.17e-02 +2024-08-06 15:57:33,178 INFO [trainer.py:765] (6/8) Epoch 7, batch 2300, train_loss[loss=3.192, NarTop10Accuracy=0.6845, over 5790.00 frames. ], tot_loss[loss=3.507, NarTop10Accuracy=0.6243, over 6008.84 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 15:57:57,619 INFO [trainer.py:765] (6/8) Epoch 7, batch 2400, train_loss[loss=3.196, NarTop10Accuracy=0.6875, over 5028.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6264, over 5768.19 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 15:58:21,088 INFO [trainer.py:765] (6/8) Epoch 7, batch 2500, train_loss[loss=3.664, NarTop10Accuracy=0.5851, over 5001.00 frames. ], tot_loss[loss=3.466, NarTop10Accuracy=0.6321, over 5472.02 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 15:58:31,565 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 15:58:39,769 INFO [trainer.py:811] (6/8) Epoch 7, validation: loss=3.381, NarTop10Accuracy=0.6488, over 1905321.00 frames. +2024-08-06 15:58:39,770 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29995MB +2024-08-06 15:58:40,220 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.831e+02 1.996e+02 2.207e+02 5.229e+02, threshold=3.992e+02, percent-clipped=0.2 +2024-08-06 15:58:48,956 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 15:59:52,876 INFO [trainer.py:765] (6/8) Epoch 8, batch 100, train_loss[loss=3.669, NarTop10Accuracy=0.5975, over 7062.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6363, over 2359.67 frames. ], batch size: 31, lr: 1.09e-02 +2024-08-06 16:00:27,880 INFO [trainer.py:765] (6/8) Epoch 8, batch 200, train_loss[loss=3.299, NarTop10Accuracy=0.6786, over 6654.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6306, over 3846.00 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 16:00:58,562 INFO [trainer.py:765] (6/8) Epoch 8, batch 300, train_loss[loss=3.283, NarTop10Accuracy=0.6667, over 7029.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.6324, over 4639.78 frames. ], batch size: 22, lr: 1.08e-02 +2024-08-06 16:01:29,759 INFO [trainer.py:765] (6/8) Epoch 8, batch 400, train_loss[loss=3.621, NarTop10Accuracy=0.5924, over 5118.00 frames. ], tot_loss[loss=3.465, NarTop10Accuracy=0.6325, over 5105.54 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 16:02:04,065 INFO [trainer.py:765] (6/8) Epoch 8, batch 500, train_loss[loss=3.781, NarTop10Accuracy=0.5666, over 6009.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.636, over 5393.70 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 16:02:41,835 INFO [trainer.py:765] (6/8) Epoch 8, batch 600, train_loss[loss=3.057, NarTop10Accuracy=0.7193, over 5772.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6328, over 5658.64 frames. ], batch size: 9, lr: 1.08e-02 +2024-08-06 16:03:11,499 INFO [trainer.py:765] (6/8) Epoch 8, batch 700, train_loss[loss=3.725, NarTop10Accuracy=0.5731, over 5091.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.631, over 5723.45 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 16:03:50,083 INFO [trainer.py:765] (6/8) Epoch 8, batch 800, train_loss[loss=3.49, NarTop10Accuracy=0.6136, over 4293.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6331, over 5764.55 frames. ], batch size: 5, lr: 1.07e-02 +2024-08-06 16:04:27,587 INFO [trainer.py:765] (6/8) Epoch 8, batch 900, train_loss[loss=3.26, NarTop10Accuracy=0.6892, over 6597.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6357, over 5794.49 frames. ], batch size: 14, lr: 1.07e-02 +2024-08-06 16:04:57,465 INFO [trainer.py:765] (6/8) Epoch 8, batch 1000, train_loss[loss=3.65, NarTop10Accuracy=0.5983, over 6585.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6374, over 5892.09 frames. ], batch size: 14, lr: 1.07e-02 +2024-08-06 16:05:37,293 INFO [trainer.py:765] (6/8) Epoch 8, batch 1100, train_loss[loss=3.617, NarTop10Accuracy=0.5908, over 6819.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6381, over 5949.98 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 16:06:15,858 INFO [trainer.py:765] (6/8) Epoch 8, batch 1200, train_loss[loss=3.418, NarTop10Accuracy=0.6419, over 7158.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6367, over 5939.24 frames. ], batch size: 31, lr: 1.06e-02 +2024-08-06 16:06:45,186 INFO [trainer.py:765] (6/8) Epoch 8, batch 1300, train_loss[loss=3.263, NarTop10Accuracy=0.6857, over 5130.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6388, over 6001.24 frames. ], batch size: 6, lr: 1.06e-02 +2024-08-06 16:07:24,234 INFO [trainer.py:765] (6/8) Epoch 8, batch 1400, train_loss[loss=3.454, NarTop10Accuracy=0.6334, over 6030.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6385, over 6020.77 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 16:07:52,168 INFO [trainer.py:765] (6/8) Epoch 8, batch 1500, train_loss[loss=3.445, NarTop10Accuracy=0.6382, over 6123.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.642, over 5979.31 frames. ], batch size: 50, lr: 1.05e-02 +2024-08-06 16:08:19,948 INFO [trainer.py:765] (6/8) Epoch 8, batch 1600, train_loss[loss=3.24, NarTop10Accuracy=0.6855, over 6930.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6422, over 5960.21 frames. ], batch size: 22, lr: 1.05e-02 +2024-08-06 16:08:46,617 INFO [trainer.py:765] (6/8) Epoch 8, batch 1700, train_loss[loss=3.37, NarTop10Accuracy=0.6522, over 6267.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6424, over 5926.58 frames. ], batch size: 13, lr: 1.05e-02 +2024-08-06 16:09:13,105 INFO [trainer.py:765] (6/8) Epoch 8, batch 1800, train_loss[loss=3.204, NarTop10Accuracy=0.6895, over 7122.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6421, over 5986.66 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 16:09:39,635 INFO [trainer.py:765] (6/8) Epoch 8, batch 1900, train_loss[loss=3.649, NarTop10Accuracy=0.5883, over 6129.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6441, over 6014.79 frames. ], batch size: 50, lr: 1.04e-02 +2024-08-06 16:09:56,939 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 16:10:04,970 INFO [trainer.py:811] (6/8) Epoch 8, validation: loss=3.282, NarTop10Accuracy=0.6699, over 1905321.00 frames. +2024-08-06 16:10:04,970 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29995MB +2024-08-06 16:10:05,470 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.814e+02 1.981e+02 2.158e+02 5.862e+02, threshold=3.962e+02, percent-clipped=0.1 +2024-08-06 16:10:13,204 INFO [trainer.py:765] (6/8) Epoch 8, batch 2000, train_loss[loss=4.011, NarTop10Accuracy=0.5161, over 6453.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6421, over 5986.85 frames. ], batch size: 51, lr: 1.04e-02 +2024-08-06 16:10:38,514 INFO [trainer.py:765] (6/8) Epoch 8, batch 2100, train_loss[loss=3.183, NarTop10Accuracy=0.685, over 4008.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6433, over 5975.30 frames. ], batch size: 4, lr: 1.04e-02 +2024-08-06 16:11:03,747 INFO [trainer.py:765] (6/8) Epoch 8, batch 2200, train_loss[loss=3.566, NarTop10Accuracy=0.6099, over 7428.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6412, over 6013.13 frames. ], batch size: 32, lr: 1.04e-02 +2024-08-06 16:11:28,905 INFO [trainer.py:765] (6/8) Epoch 8, batch 2300, train_loss[loss=3.698, NarTop10Accuracy=0.58, over 5676.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6372, over 6025.32 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 16:11:53,093 INFO [trainer.py:765] (6/8) Epoch 8, batch 2400, train_loss[loss=3.538, NarTop10Accuracy=0.6175, over 5112.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6398, over 5787.41 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 16:12:16,444 INFO [trainer.py:765] (6/8) Epoch 8, batch 2500, train_loss[loss=3.14, NarTop10Accuracy=0.6962, over 5205.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6412, over 5494.53 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 16:12:36,624 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 16:13:37,514 INFO [trainer.py:765] (6/8) Epoch 9, batch 100, train_loss[loss=3.109, NarTop10Accuracy=0.7031, over 7101.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6512, over 2370.26 frames. ], batch size: 31, lr: 9.72e-03 +2024-08-06 16:14:14,440 INFO [trainer.py:765] (6/8) Epoch 9, batch 200, train_loss[loss=3.65, NarTop10Accuracy=0.5923, over 6750.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6503, over 3850.61 frames. ], batch size: 17, lr: 9.70e-03 +2024-08-06 16:14:44,507 INFO [trainer.py:765] (6/8) Epoch 9, batch 300, train_loss[loss=3.36, NarTop10Accuracy=0.6537, over 6996.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6489, over 4656.05 frames. ], batch size: 22, lr: 9.68e-03 +2024-08-06 16:15:14,914 INFO [trainer.py:765] (6/8) Epoch 9, batch 400, train_loss[loss=3.241, NarTop10Accuracy=0.6678, over 5151.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6544, over 5115.89 frames. ], batch size: 7, lr: 9.65e-03 +2024-08-06 16:15:50,336 INFO [trainer.py:765] (6/8) Epoch 9, batch 500, train_loss[loss=3.169, NarTop10Accuracy=0.6897, over 6303.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6565, over 5403.32 frames. ], batch size: 11, lr: 9.63e-03 +2024-08-06 16:16:23,972 INFO [trainer.py:765] (6/8) Epoch 9, batch 600, train_loss[loss=3.765, NarTop10Accuracy=0.5658, over 5823.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6581, over 5660.29 frames. ], batch size: 9, lr: 9.61e-03 +2024-08-06 16:16:57,145 INFO [trainer.py:765] (6/8) Epoch 9, batch 700, train_loss[loss=3.017, NarTop10Accuracy=0.7147, over 5115.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6553, over 5730.13 frames. ], batch size: 6, lr: 9.59e-03 +2024-08-06 16:17:32,052 INFO [trainer.py:765] (6/8) Epoch 9, batch 800, train_loss[loss=3.268, NarTop10Accuracy=0.6755, over 4989.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.648, over 5784.60 frames. ], batch size: 6, lr: 9.57e-03 +2024-08-06 16:18:07,815 INFO [trainer.py:765] (6/8) Epoch 9, batch 900, train_loss[loss=3.055, NarTop10Accuracy=0.7248, over 6024.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6487, over 5800.63 frames. ], batch size: 13, lr: 9.55e-03 +2024-08-06 16:18:39,345 INFO [trainer.py:765] (6/8) Epoch 9, batch 1000, train_loss[loss=3.157, NarTop10Accuracy=0.6899, over 6093.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6462, over 5891.74 frames. ], batch size: 13, lr: 9.53e-03 +2024-08-06 16:19:15,382 INFO [trainer.py:765] (6/8) Epoch 9, batch 1100, train_loss[loss=3.448, NarTop10Accuracy=0.6349, over 6684.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6461, over 5948.00 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 16:19:53,877 INFO [trainer.py:765] (6/8) Epoch 9, batch 1200, train_loss[loss=3.811, NarTop10Accuracy=0.5587, over 7233.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6444, over 5928.62 frames. ], batch size: 31, lr: 9.48e-03 +2024-08-06 16:20:24,906 INFO [trainer.py:765] (6/8) Epoch 9, batch 1300, train_loss[loss=3.165, NarTop10Accuracy=0.6975, over 5175.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6462, over 6007.27 frames. ], batch size: 6, lr: 9.46e-03 +2024-08-06 16:20:56,579 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 16:21:04,483 INFO [trainer.py:811] (6/8) Epoch 9, validation: loss=3.266, NarTop10Accuracy=0.6725, over 1905321.00 frames. +2024-08-06 16:21:04,484 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 16:21:05,035 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 1.808e+02 1.967e+02 2.142e+02 6.126e+02, threshold=3.935e+02, percent-clipped=0.5 +2024-08-06 16:21:06,690 INFO [trainer.py:765] (6/8) Epoch 9, batch 1400, train_loss[loss=3.633, NarTop10Accuracy=0.5891, over 6099.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6448, over 6026.01 frames. ], batch size: 11, lr: 9.44e-03 +2024-08-06 16:21:38,895 INFO [trainer.py:765] (6/8) Epoch 9, batch 1500, train_loss[loss=3.392, NarTop10Accuracy=0.6512, over 5847.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6499, over 5958.36 frames. ], batch size: 52, lr: 9.42e-03 +2024-08-06 16:22:06,720 INFO [trainer.py:765] (6/8) Epoch 9, batch 1600, train_loss[loss=3.272, NarTop10Accuracy=0.6738, over 7011.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6514, over 5939.29 frames. ], batch size: 22, lr: 9.40e-03 +2024-08-06 16:22:33,469 INFO [trainer.py:765] (6/8) Epoch 9, batch 1700, train_loss[loss=3.427, NarTop10Accuracy=0.6467, over 6405.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6479, over 5903.65 frames. ], batch size: 13, lr: 9.38e-03 +2024-08-06 16:23:00,063 INFO [trainer.py:765] (6/8) Epoch 9, batch 1800, train_loss[loss=3.353, NarTop10Accuracy=0.6681, over 7257.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6493, over 5969.46 frames. ], batch size: 23, lr: 9.36e-03 +2024-08-06 16:23:26,782 INFO [trainer.py:765] (6/8) Epoch 9, batch 1900, train_loss[loss=3.379, NarTop10Accuracy=0.6517, over 6471.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6464, over 6012.07 frames. ], batch size: 50, lr: 9.34e-03 +2024-08-06 16:23:52,484 INFO [trainer.py:765] (6/8) Epoch 9, batch 2000, train_loss[loss=3.891, NarTop10Accuracy=0.5463, over 6123.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.648, over 5981.11 frames. ], batch size: 50, lr: 9.32e-03 +2024-08-06 16:24:17,962 INFO [trainer.py:765] (6/8) Epoch 9, batch 2100, train_loss[loss=3.116, NarTop10Accuracy=0.701, over 4680.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6483, over 5950.15 frames. ], batch size: 5, lr: 9.30e-03 +2024-08-06 16:24:43,420 INFO [trainer.py:765] (6/8) Epoch 9, batch 2200, train_loss[loss=3.788, NarTop10Accuracy=0.5681, over 7236.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6477, over 5994.91 frames. ], batch size: 31, lr: 9.28e-03 +2024-08-06 16:25:08,720 INFO [trainer.py:765] (6/8) Epoch 9, batch 2300, train_loss[loss=3.243, NarTop10Accuracy=0.6838, over 5685.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6445, over 6013.52 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 16:25:33,162 INFO [trainer.py:765] (6/8) Epoch 9, batch 2400, train_loss[loss=3.247, NarTop10Accuracy=0.6734, over 5124.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.646, over 5759.71 frames. ], batch size: 7, lr: 9.25e-03 +2024-08-06 16:25:56,767 INFO [trainer.py:765] (6/8) Epoch 9, batch 2500, train_loss[loss=3.122, NarTop10Accuracy=0.7047, over 5097.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.653, over 5487.94 frames. ], batch size: 7, lr: 9.23e-03 +2024-08-06 16:26:16,340 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 16:27:19,583 INFO [trainer.py:765] (6/8) Epoch 10, batch 100, train_loss[loss=3.21, NarTop10Accuracy=0.6841, over 7062.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6519, over 2368.65 frames. ], batch size: 31, lr: 8.76e-03 +2024-08-06 16:27:52,627 INFO [trainer.py:765] (6/8) Epoch 10, batch 200, train_loss[loss=3.096, NarTop10Accuracy=0.7172, over 6762.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6569, over 3854.54 frames. ], batch size: 17, lr: 8.74e-03 +2024-08-06 16:28:23,057 INFO [trainer.py:765] (6/8) Epoch 10, batch 300, train_loss[loss=3.062, NarTop10Accuracy=0.7228, over 7023.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6566, over 4652.12 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 16:28:59,199 INFO [trainer.py:765] (6/8) Epoch 10, batch 400, train_loss[loss=3.161, NarTop10Accuracy=0.6992, over 5166.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6583, over 5109.77 frames. ], batch size: 7, lr: 8.71e-03 +2024-08-06 16:29:29,218 INFO [trainer.py:765] (6/8) Epoch 10, batch 500, train_loss[loss=3.125, NarTop10Accuracy=0.7083, over 6042.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6591, over 5400.06 frames. ], batch size: 11, lr: 8.69e-03 +2024-08-06 16:30:02,765 INFO [trainer.py:765] (6/8) Epoch 10, batch 600, train_loss[loss=3.608, NarTop10Accuracy=0.5927, over 5727.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6565, over 5660.21 frames. ], batch size: 9, lr: 8.67e-03 +2024-08-06 16:30:34,264 INFO [trainer.py:765] (6/8) Epoch 10, batch 700, train_loss[loss=3.625, NarTop10Accuracy=0.5923, over 5022.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6548, over 5723.22 frames. ], batch size: 6, lr: 8.65e-03 +2024-08-06 16:31:09,842 INFO [trainer.py:765] (6/8) Epoch 10, batch 800, train_loss[loss=3.608, NarTop10Accuracy=0.608, over 4320.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6534, over 5763.93 frames. ], batch size: 5, lr: 8.64e-03 +2024-08-06 16:31:16,257 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 16:31:24,565 INFO [trainer.py:811] (6/8) Epoch 10, validation: loss=3.184, NarTop10Accuracy=0.6898, over 1905321.00 frames. +2024-08-06 16:31:24,566 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 16:31:25,154 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.851e+02 2.012e+02 2.196e+02 4.599e+02, threshold=4.024e+02, percent-clipped=0.1 +2024-08-06 16:31:50,345 INFO [trainer.py:765] (6/8) Epoch 10, batch 900, train_loss[loss=3.048, NarTop10Accuracy=0.7275, over 6483.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6588, over 5794.28 frames. ], batch size: 14, lr: 8.62e-03 +2024-08-06 16:32:28,589 INFO [trainer.py:765] (6/8) Epoch 10, batch 1000, train_loss[loss=3.209, NarTop10Accuracy=0.693, over 6243.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6573, over 5902.04 frames. ], batch size: 13, lr: 8.60e-03 +2024-08-06 16:33:06,376 INFO [trainer.py:765] (6/8) Epoch 10, batch 1100, train_loss[loss=3.146, NarTop10Accuracy=0.6976, over 6750.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6558, over 5944.96 frames. ], batch size: 17, lr: 8.59e-03 +2024-08-06 16:33:40,961 INFO [trainer.py:765] (6/8) Epoch 10, batch 1200, train_loss[loss=3.221, NarTop10Accuracy=0.6893, over 7533.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6564, over 5959.83 frames. ], batch size: 33, lr: 8.57e-03 +2024-08-06 16:34:16,169 INFO [trainer.py:765] (6/8) Epoch 10, batch 1300, train_loss[loss=3.541, NarTop10Accuracy=0.6172, over 5028.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6574, over 6024.09 frames. ], batch size: 6, lr: 8.55e-03 +2024-08-06 16:34:51,201 INFO [trainer.py:765] (6/8) Epoch 10, batch 1400, train_loss[loss=3.384, NarTop10Accuracy=0.655, over 6036.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6511, over 6018.48 frames. ], batch size: 11, lr: 8.54e-03 +2024-08-06 16:35:22,159 INFO [trainer.py:765] (6/8) Epoch 10, batch 1500, train_loss[loss=3.726, NarTop10Accuracy=0.5835, over 6111.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6563, over 5937.67 frames. ], batch size: 50, lr: 8.52e-03 +2024-08-06 16:35:50,136 INFO [trainer.py:765] (6/8) Epoch 10, batch 1600, train_loss[loss=3.614, NarTop10Accuracy=0.6021, over 7128.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6593, over 5931.18 frames. ], batch size: 22, lr: 8.50e-03 +2024-08-06 16:36:16,976 INFO [trainer.py:765] (6/8) Epoch 10, batch 1700, train_loss[loss=3.251, NarTop10Accuracy=0.6745, over 6228.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6571, over 5909.89 frames. ], batch size: 13, lr: 8.49e-03 +2024-08-06 16:36:43,647 INFO [trainer.py:765] (6/8) Epoch 10, batch 1800, train_loss[loss=3.225, NarTop10Accuracy=0.6834, over 7050.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6594, over 5995.03 frames. ], batch size: 22, lr: 8.47e-03 +2024-08-06 16:37:10,290 INFO [trainer.py:765] (6/8) Epoch 10, batch 1900, train_loss[loss=3.37, NarTop10Accuracy=0.6569, over 6582.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6604, over 6033.26 frames. ], batch size: 50, lr: 8.45e-03 +2024-08-06 16:37:36,089 INFO [trainer.py:765] (6/8) Epoch 10, batch 2000, train_loss[loss=3.3, NarTop10Accuracy=0.6672, over 5739.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6624, over 6004.10 frames. ], batch size: 50, lr: 8.44e-03 +2024-08-06 16:38:01,650 INFO [trainer.py:765] (6/8) Epoch 10, batch 2100, train_loss[loss=3.485, NarTop10Accuracy=0.6222, over 4005.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6597, over 5983.97 frames. ], batch size: 4, lr: 8.42e-03 +2024-08-06 16:38:27,120 INFO [trainer.py:765] (6/8) Epoch 10, batch 2200, train_loss[loss=3.783, NarTop10Accuracy=0.5618, over 7380.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6577, over 6015.73 frames. ], batch size: 31, lr: 8.41e-03 +2024-08-06 16:38:52,447 INFO [trainer.py:765] (6/8) Epoch 10, batch 2300, train_loss[loss=3.14, NarTop10Accuracy=0.6975, over 5685.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6564, over 6030.06 frames. ], batch size: 9, lr: 8.39e-03 +2024-08-06 16:39:17,006 INFO [trainer.py:765] (6/8) Epoch 10, batch 2400, train_loss[loss=3.292, NarTop10Accuracy=0.6733, over 5193.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6625, over 5796.75 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 16:39:40,801 INFO [trainer.py:765] (6/8) Epoch 10, batch 2500, train_loss[loss=3.676, NarTop10Accuracy=0.5933, over 5172.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6671, over 5489.69 frames. ], batch size: 7, lr: 8.36e-03 +2024-08-06 16:40:00,617 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 16:41:06,233 INFO [trainer.py:765] (6/8) Epoch 11, batch 100, train_loss[loss=3.615, NarTop10Accuracy=0.5969, over 7485.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.653, over 2370.60 frames. ], batch size: 31, lr: 7.97e-03 +2024-08-06 16:41:39,020 INFO [trainer.py:765] (6/8) Epoch 11, batch 200, train_loss[loss=3.584, NarTop10Accuracy=0.6045, over 6813.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6589, over 3859.08 frames. ], batch size: 17, lr: 7.95e-03 +2024-08-06 16:41:53,188 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 16:42:01,355 INFO [trainer.py:811] (6/8) Epoch 11, validation: loss=3.116, NarTop10Accuracy=0.7034, over 1905321.00 frames. +2024-08-06 16:42:01,356 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 16:42:01,879 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 1.889e+02 2.046e+02 2.249e+02 5.417e+02, threshold=4.093e+02, percent-clipped=0.2 +2024-08-06 16:42:17,975 INFO [trainer.py:765] (6/8) Epoch 11, batch 300, train_loss[loss=3.132, NarTop10Accuracy=0.7039, over 7119.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6645, over 4653.90 frames. ], batch size: 22, lr: 7.94e-03 +2024-08-06 16:42:55,154 INFO [trainer.py:765] (6/8) Epoch 11, batch 400, train_loss[loss=3.292, NarTop10Accuracy=0.6661, over 4950.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6663, over 5087.13 frames. ], batch size: 7, lr: 7.92e-03 +2024-08-06 16:43:25,719 INFO [trainer.py:765] (6/8) Epoch 11, batch 500, train_loss[loss=3.055, NarTop10Accuracy=0.7175, over 6018.00 frames. ], tot_loss[loss=3.289, NarTop10Accuracy=0.6686, over 5376.11 frames. ], batch size: 11, lr: 7.91e-03 +2024-08-06 16:44:02,241 INFO [trainer.py:765] (6/8) Epoch 11, batch 600, train_loss[loss=3.542, NarTop10Accuracy=0.6129, over 5589.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6658, over 5644.84 frames. ], batch size: 9, lr: 7.89e-03 +2024-08-06 16:44:35,716 INFO [trainer.py:765] (6/8) Epoch 11, batch 700, train_loss[loss=3.872, NarTop10Accuracy=0.5477, over 5016.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6669, over 5738.86 frames. ], batch size: 6, lr: 7.88e-03 +2024-08-06 16:45:10,467 INFO [trainer.py:765] (6/8) Epoch 11, batch 800, train_loss[loss=3, NarTop10Accuracy=0.7199, over 5052.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6638, over 5785.08 frames. ], batch size: 6, lr: 7.86e-03 +2024-08-06 16:45:46,458 INFO [trainer.py:765] (6/8) Epoch 11, batch 900, train_loss[loss=3.548, NarTop10Accuracy=0.6112, over 6303.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.6651, over 5804.58 frames. ], batch size: 13, lr: 7.85e-03 +2024-08-06 16:46:20,310 INFO [trainer.py:765] (6/8) Epoch 11, batch 1000, train_loss[loss=3.424, NarTop10Accuracy=0.64, over 6216.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6648, over 5916.99 frames. ], batch size: 13, lr: 7.84e-03 +2024-08-06 16:46:53,456 INFO [trainer.py:765] (6/8) Epoch 11, batch 1100, train_loss[loss=3.102, NarTop10Accuracy=0.706, over 6915.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6665, over 5949.54 frames. ], batch size: 17, lr: 7.82e-03 +2024-08-06 16:47:33,030 INFO [trainer.py:765] (6/8) Epoch 11, batch 1200, train_loss[loss=3.413, NarTop10Accuracy=0.6409, over 7509.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6643, over 5954.97 frames. ], batch size: 31, lr: 7.81e-03 +2024-08-06 16:48:06,482 INFO [trainer.py:765] (6/8) Epoch 11, batch 1300, train_loss[loss=3.004, NarTop10Accuracy=0.732, over 5070.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6638, over 6008.88 frames. ], batch size: 6, lr: 7.79e-03 +2024-08-06 16:48:41,353 INFO [trainer.py:765] (6/8) Epoch 11, batch 1400, train_loss[loss=3.394, NarTop10Accuracy=0.6434, over 6129.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6612, over 6032.76 frames. ], batch size: 11, lr: 7.78e-03 +2024-08-06 16:49:09,345 INFO [trainer.py:765] (6/8) Epoch 11, batch 1500, train_loss[loss=3.274, NarTop10Accuracy=0.6753, over 6228.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6608, over 5972.48 frames. ], batch size: 52, lr: 7.77e-03 +2024-08-06 16:49:37,103 INFO [trainer.py:765] (6/8) Epoch 11, batch 1600, train_loss[loss=3.3, NarTop10Accuracy=0.6641, over 7116.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6621, over 5964.10 frames. ], batch size: 22, lr: 7.75e-03 +2024-08-06 16:50:03,792 INFO [trainer.py:765] (6/8) Epoch 11, batch 1700, train_loss[loss=3.415, NarTop10Accuracy=0.6458, over 6330.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6639, over 5946.74 frames. ], batch size: 13, lr: 7.74e-03 +2024-08-06 16:50:30,353 INFO [trainer.py:765] (6/8) Epoch 11, batch 1800, train_loss[loss=3.367, NarTop10Accuracy=0.6511, over 6981.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6608, over 5980.23 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 16:50:56,821 INFO [trainer.py:765] (6/8) Epoch 11, batch 1900, train_loss[loss=3.889, NarTop10Accuracy=0.5472, over 5646.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6593, over 6038.83 frames. ], batch size: 50, lr: 7.71e-03 +2024-08-06 16:51:22,405 INFO [trainer.py:765] (6/8) Epoch 11, batch 2000, train_loss[loss=3.834, NarTop10Accuracy=0.5612, over 6399.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6608, over 5994.97 frames. ], batch size: 53, lr: 7.70e-03 +2024-08-06 16:51:47,793 INFO [trainer.py:765] (6/8) Epoch 11, batch 2100, train_loss[loss=3.144, NarTop10Accuracy=0.704, over 4011.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6631, over 5972.77 frames. ], batch size: 4, lr: 7.68e-03 +2024-08-06 16:52:13,118 INFO [trainer.py:765] (6/8) Epoch 11, batch 2200, train_loss[loss=3.299, NarTop10Accuracy=0.6689, over 7356.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6636, over 6004.66 frames. ], batch size: 31, lr: 7.67e-03 +2024-08-06 16:52:23,900 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 16:52:32,079 INFO [trainer.py:811] (6/8) Epoch 11, validation: loss=3.101, NarTop10Accuracy=0.7058, over 1905321.00 frames. +2024-08-06 16:52:32,079 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 16:52:32,593 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.920e+02 2.088e+02 2.244e+02 3.599e+02, threshold=4.177e+02, percent-clipped=0.0 +2024-08-06 16:52:46,445 INFO [trainer.py:765] (6/8) Epoch 11, batch 2300, train_loss[loss=3.313, NarTop10Accuracy=0.6694, over 5682.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.662, over 6024.01 frames. ], batch size: 9, lr: 7.66e-03 +2024-08-06 16:53:10,887 INFO [trainer.py:765] (6/8) Epoch 11, batch 2400, train_loss[loss=3.496, NarTop10Accuracy=0.6255, over 5196.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6652, over 5779.45 frames. ], batch size: 7, lr: 7.64e-03 +2024-08-06 16:53:34,372 INFO [trainer.py:765] (6/8) Epoch 11, batch 2500, train_loss[loss=3.335, NarTop10Accuracy=0.645, over 5043.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6657, over 5473.42 frames. ], batch size: 7, lr: 7.63e-03 +2024-08-06 16:53:54,172 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 16:54:58,524 INFO [trainer.py:765] (6/8) Epoch 12, batch 100, train_loss[loss=3.68, NarTop10Accuracy=0.5774, over 7131.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6623, over 2354.05 frames. ], batch size: 31, lr: 7.30e-03 +2024-08-06 16:55:32,431 INFO [trainer.py:765] (6/8) Epoch 12, batch 200, train_loss[loss=3.105, NarTop10Accuracy=0.7055, over 6588.00 frames. ], tot_loss[loss=3.273, NarTop10Accuracy=0.6712, over 3843.29 frames. ], batch size: 17, lr: 7.29e-03 +2024-08-06 16:56:05,095 INFO [trainer.py:765] (6/8) Epoch 12, batch 300, train_loss[loss=3.044, NarTop10Accuracy=0.7221, over 7008.00 frames. ], tot_loss[loss=3.248, NarTop10Accuracy=0.6767, over 4649.24 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 16:56:36,425 INFO [trainer.py:765] (6/8) Epoch 12, batch 400, train_loss[loss=3.083, NarTop10Accuracy=0.7097, over 5022.00 frames. ], tot_loss[loss=3.256, NarTop10Accuracy=0.6746, over 5132.28 frames. ], batch size: 7, lr: 7.26e-03 +2024-08-06 16:57:10,502 INFO [trainer.py:765] (6/8) Epoch 12, batch 500, train_loss[loss=3.584, NarTop10Accuracy=0.605, over 5970.00 frames. ], tot_loss[loss=3.267, NarTop10Accuracy=0.6721, over 5400.91 frames. ], batch size: 11, lr: 7.25e-03 +2024-08-06 16:57:45,482 INFO [trainer.py:765] (6/8) Epoch 12, batch 600, train_loss[loss=3.168, NarTop10Accuracy=0.6929, over 5682.00 frames. ], tot_loss[loss=3.268, NarTop10Accuracy=0.672, over 5642.85 frames. ], batch size: 9, lr: 7.24e-03 +2024-08-06 16:58:17,003 INFO [trainer.py:765] (6/8) Epoch 12, batch 700, train_loss[loss=3.429, NarTop10Accuracy=0.6412, over 4269.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6682, over 5709.10 frames. ], batch size: 5, lr: 7.22e-03 +2024-08-06 16:58:53,468 INFO [trainer.py:765] (6/8) Epoch 12, batch 800, train_loss[loss=3.37, NarTop10Accuracy=0.6557, over 4416.00 frames. ], tot_loss[loss=3.289, NarTop10Accuracy=0.6678, over 5764.96 frames. ], batch size: 5, lr: 7.21e-03 +2024-08-06 16:59:27,205 INFO [trainer.py:765] (6/8) Epoch 12, batch 900, train_loss[loss=3.083, NarTop10Accuracy=0.7048, over 6213.00 frames. ], tot_loss[loss=3.272, NarTop10Accuracy=0.6718, over 5792.79 frames. ], batch size: 13, lr: 7.20e-03 +2024-08-06 17:00:01,573 INFO [trainer.py:765] (6/8) Epoch 12, batch 1000, train_loss[loss=2.915, NarTop10Accuracy=0.7437, over 6216.00 frames. ], tot_loss[loss=3.282, NarTop10Accuracy=0.6696, over 5901.59 frames. ], batch size: 13, lr: 7.19e-03 +2024-08-06 17:00:39,188 INFO [trainer.py:765] (6/8) Epoch 12, batch 1100, train_loss[loss=3.651, NarTop10Accuracy=0.5847, over 6888.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6657, over 5939.44 frames. ], batch size: 17, lr: 7.18e-03 +2024-08-06 17:01:13,963 INFO [trainer.py:765] (6/8) Epoch 12, batch 1200, train_loss[loss=3.064, NarTop10Accuracy=0.7247, over 7014.00 frames. ], tot_loss[loss=3.268, NarTop10Accuracy=0.6718, over 5917.71 frames. ], batch size: 31, lr: 7.17e-03 +2024-08-06 17:01:48,107 INFO [trainer.py:765] (6/8) Epoch 12, batch 1300, train_loss[loss=3.263, NarTop10Accuracy=0.6715, over 5061.00 frames. ], tot_loss[loss=3.282, NarTop10Accuracy=0.6685, over 5981.88 frames. ], batch size: 6, lr: 7.15e-03 +2024-08-06 17:02:22,322 INFO [trainer.py:765] (6/8) Epoch 12, batch 1400, train_loss[loss=3.62, NarTop10Accuracy=0.6027, over 6102.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.668, over 6019.44 frames. ], batch size: 11, lr: 7.14e-03 +2024-08-06 17:02:52,876 INFO [trainer.py:765] (6/8) Epoch 12, batch 1500, train_loss[loss=3.337, NarTop10Accuracy=0.6641, over 6201.00 frames. ], tot_loss[loss=3.267, NarTop10Accuracy=0.6722, over 5968.77 frames. ], batch size: 50, lr: 7.13e-03 +2024-08-06 17:03:20,690 INFO [trainer.py:765] (6/8) Epoch 12, batch 1600, train_loss[loss=3.236, NarTop10Accuracy=0.6725, over 7341.00 frames. ], tot_loss[loss=3.275, NarTop10Accuracy=0.6708, over 5946.63 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 17:03:38,296 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 17:03:46,474 INFO [trainer.py:811] (6/8) Epoch 12, validation: loss=3.054, NarTop10Accuracy=0.7153, over 1905321.00 frames. +2024-08-06 17:03:46,474 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 17:03:46,988 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.899e+02 2.078e+02 2.276e+02 5.455e+02, threshold=4.157e+02, percent-clipped=0.1 +2024-08-06 17:03:55,603 INFO [trainer.py:765] (6/8) Epoch 12, batch 1700, train_loss[loss=3.303, NarTop10Accuracy=0.6575, over 6207.00 frames. ], tot_loss[loss=3.28, NarTop10Accuracy=0.6699, over 5927.63 frames. ], batch size: 13, lr: 7.11e-03 +2024-08-06 17:04:22,121 INFO [trainer.py:765] (6/8) Epoch 12, batch 1800, train_loss[loss=3.704, NarTop10Accuracy=0.5796, over 7230.00 frames. ], tot_loss[loss=3.279, NarTop10Accuracy=0.6698, over 5992.98 frames. ], batch size: 23, lr: 7.10e-03 +2024-08-06 17:04:48,591 INFO [trainer.py:765] (6/8) Epoch 12, batch 1900, train_loss[loss=3.23, NarTop10Accuracy=0.6844, over 6261.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6708, over 6031.90 frames. ], batch size: 51, lr: 7.08e-03 +2024-08-06 17:05:14,198 INFO [trainer.py:765] (6/8) Epoch 12, batch 2000, train_loss[loss=3.579, NarTop10Accuracy=0.5997, over 5367.00 frames. ], tot_loss[loss=3.268, NarTop10Accuracy=0.6725, over 5993.35 frames. ], batch size: 50, lr: 7.07e-03 +2024-08-06 17:05:39,468 INFO [trainer.py:765] (6/8) Epoch 12, batch 2100, train_loss[loss=3.262, NarTop10Accuracy=0.6688, over 4836.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6706, over 5971.77 frames. ], batch size: 5, lr: 7.06e-03 +2024-08-06 17:06:04,691 INFO [trainer.py:765] (6/8) Epoch 12, batch 2200, train_loss[loss=3.48, NarTop10Accuracy=0.6265, over 7143.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6672, over 6025.91 frames. ], batch size: 31, lr: 7.05e-03 +2024-08-06 17:06:29,847 INFO [trainer.py:765] (6/8) Epoch 12, batch 2300, train_loss[loss=3.516, NarTop10Accuracy=0.6303, over 5724.00 frames. ], tot_loss[loss=3.29, NarTop10Accuracy=0.6678, over 6054.37 frames. ], batch size: 9, lr: 7.04e-03 +2024-08-06 17:06:54,200 INFO [trainer.py:765] (6/8) Epoch 12, batch 2400, train_loss[loss=3.089, NarTop10Accuracy=0.7065, over 5265.00 frames. ], tot_loss[loss=3.28, NarTop10Accuracy=0.6697, over 5790.77 frames. ], batch size: 7, lr: 7.03e-03 +2024-08-06 17:07:17,646 INFO [trainer.py:765] (6/8) Epoch 12, batch 2500, train_loss[loss=3.212, NarTop10Accuracy=0.6891, over 5055.00 frames. ], tot_loss[loss=3.257, NarTop10Accuracy=0.6739, over 5487.44 frames. ], batch size: 7, lr: 7.02e-03 +2024-08-06 17:07:37,943 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 17:08:40,079 INFO [trainer.py:765] (6/8) Epoch 13, batch 100, train_loss[loss=3.08, NarTop10Accuracy=0.7193, over 7236.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6691, over 2355.81 frames. ], batch size: 31, lr: 6.73e-03 +2024-08-06 17:09:14,120 INFO [trainer.py:765] (6/8) Epoch 13, batch 200, train_loss[loss=3.104, NarTop10Accuracy=0.7145, over 6867.00 frames. ], tot_loss[loss=3.284, NarTop10Accuracy=0.669, over 3850.93 frames. ], batch size: 17, lr: 6.72e-03 +2024-08-06 17:09:46,277 INFO [trainer.py:765] (6/8) Epoch 13, batch 300, train_loss[loss=3.517, NarTop10Accuracy=0.6105, over 7020.00 frames. ], tot_loss[loss=3.265, NarTop10Accuracy=0.6729, over 4644.37 frames. ], batch size: 22, lr: 6.71e-03 +2024-08-06 17:10:19,163 INFO [trainer.py:765] (6/8) Epoch 13, batch 400, train_loss[loss=3.104, NarTop10Accuracy=0.7122, over 5199.00 frames. ], tot_loss[loss=3.246, NarTop10Accuracy=0.6767, over 5092.62 frames. ], batch size: 7, lr: 6.70e-03 +2024-08-06 17:10:49,335 INFO [trainer.py:765] (6/8) Epoch 13, batch 500, train_loss[loss=3.211, NarTop10Accuracy=0.6887, over 6087.00 frames. ], tot_loss[loss=3.238, NarTop10Accuracy=0.6784, over 5377.82 frames. ], batch size: 11, lr: 6.69e-03 +2024-08-06 17:11:26,244 INFO [trainer.py:765] (6/8) Epoch 13, batch 600, train_loss[loss=2.915, NarTop10Accuracy=0.7412, over 5637.00 frames. ], tot_loss[loss=3.235, NarTop10Accuracy=0.6794, over 5651.10 frames. ], batch size: 9, lr: 6.68e-03 +2024-08-06 17:11:57,381 INFO [trainer.py:765] (6/8) Epoch 13, batch 700, train_loss[loss=3.219, NarTop10Accuracy=0.6806, over 5022.00 frames. ], tot_loss[loss=3.244, NarTop10Accuracy=0.6773, over 5721.65 frames. ], batch size: 6, lr: 6.67e-03 +2024-08-06 17:12:33,442 INFO [trainer.py:765] (6/8) Epoch 13, batch 800, train_loss[loss=3.209, NarTop10Accuracy=0.6799, over 4371.00 frames. ], tot_loss[loss=3.251, NarTop10Accuracy=0.6756, over 5773.23 frames. ], batch size: 5, lr: 6.66e-03 +2024-08-06 17:13:10,031 INFO [trainer.py:765] (6/8) Epoch 13, batch 900, train_loss[loss=3.235, NarTop10Accuracy=0.6852, over 6225.00 frames. ], tot_loss[loss=3.239, NarTop10Accuracy=0.6778, over 5781.87 frames. ], batch size: 13, lr: 6.65e-03 +2024-08-06 17:13:41,442 INFO [trainer.py:765] (6/8) Epoch 13, batch 1000, train_loss[loss=3.509, NarTop10Accuracy=0.6323, over 6837.00 frames. ], tot_loss[loss=3.243, NarTop10Accuracy=0.6771, over 5884.29 frames. ], batch size: 14, lr: 6.64e-03 +2024-08-06 17:14:15,537 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 17:14:23,644 INFO [trainer.py:811] (6/8) Epoch 13, validation: loss=3.099, NarTop10Accuracy=0.7062, over 1905321.00 frames. +2024-08-06 17:14:23,645 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 17:14:24,471 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 1.948e+02 2.091e+02 2.295e+02 3.353e+02, threshold=4.181e+02, percent-clipped=0.0 +2024-08-06 17:14:26,696 INFO [trainer.py:765] (6/8) Epoch 13, batch 1100, train_loss[loss=3.416, NarTop10Accuracy=0.6427, over 6687.00 frames. ], tot_loss[loss=3.249, NarTop10Accuracy=0.6757, over 5916.12 frames. ], batch size: 17, lr: 6.63e-03 +2024-08-06 17:15:03,474 INFO [trainer.py:765] (6/8) Epoch 13, batch 1200, train_loss[loss=3.331, NarTop10Accuracy=0.6582, over 7230.00 frames. ], tot_loss[loss=3.252, NarTop10Accuracy=0.675, over 5929.96 frames. ], batch size: 31, lr: 6.62e-03 +2024-08-06 17:15:35,514 INFO [trainer.py:765] (6/8) Epoch 13, batch 1300, train_loss[loss=3.105, NarTop10Accuracy=0.7095, over 4413.00 frames. ], tot_loss[loss=3.259, NarTop10Accuracy=0.6735, over 6003.17 frames. ], batch size: 5, lr: 6.61e-03 +2024-08-06 17:16:11,782 INFO [trainer.py:765] (6/8) Epoch 13, batch 1400, train_loss[loss=3.037, NarTop10Accuracy=0.7193, over 5958.00 frames. ], tot_loss[loss=3.262, NarTop10Accuracy=0.6736, over 6037.32 frames. ], batch size: 11, lr: 6.60e-03 +2024-08-06 17:16:39,787 INFO [trainer.py:765] (6/8) Epoch 13, batch 1500, train_loss[loss=3.486, NarTop10Accuracy=0.627, over 6429.00 frames. ], tot_loss[loss=3.26, NarTop10Accuracy=0.6738, over 5964.80 frames. ], batch size: 50, lr: 6.59e-03 +2024-08-06 17:17:07,602 INFO [trainer.py:765] (6/8) Epoch 13, batch 1600, train_loss[loss=3.136, NarTop10Accuracy=0.7049, over 7074.00 frames. ], tot_loss[loss=3.263, NarTop10Accuracy=0.6729, over 5937.57 frames. ], batch size: 22, lr: 6.58e-03 +2024-08-06 17:17:34,259 INFO [trainer.py:765] (6/8) Epoch 13, batch 1700, train_loss[loss=3.184, NarTop10Accuracy=0.6965, over 6261.00 frames. ], tot_loss[loss=3.262, NarTop10Accuracy=0.673, over 5928.56 frames. ], batch size: 13, lr: 6.57e-03 +2024-08-06 17:18:00,761 INFO [trainer.py:765] (6/8) Epoch 13, batch 1800, train_loss[loss=3.174, NarTop10Accuracy=0.6983, over 7218.00 frames. ], tot_loss[loss=3.256, NarTop10Accuracy=0.6744, over 5983.36 frames. ], batch size: 22, lr: 6.56e-03 +2024-08-06 17:18:27,243 INFO [trainer.py:765] (6/8) Epoch 13, batch 1900, train_loss[loss=3.531, NarTop10Accuracy=0.6228, over 6033.00 frames. ], tot_loss[loss=3.254, NarTop10Accuracy=0.675, over 6019.16 frames. ], batch size: 50, lr: 6.55e-03 +2024-08-06 17:18:52,776 INFO [trainer.py:765] (6/8) Epoch 13, batch 2000, train_loss[loss=3.565, NarTop10Accuracy=0.6157, over 6522.00 frames. ], tot_loss[loss=3.239, NarTop10Accuracy=0.6785, over 6002.45 frames. ], batch size: 50, lr: 6.54e-03 +2024-08-06 17:19:18,147 INFO [trainer.py:765] (6/8) Epoch 13, batch 2100, train_loss[loss=2.858, NarTop10Accuracy=0.7575, over 4725.00 frames. ], tot_loss[loss=3.24, NarTop10Accuracy=0.6782, over 6000.67 frames. ], batch size: 5, lr: 6.53e-03 +2024-08-06 17:19:43,412 INFO [trainer.py:765] (6/8) Epoch 13, batch 2200, train_loss[loss=3.406, NarTop10Accuracy=0.6501, over 6969.00 frames. ], tot_loss[loss=3.251, NarTop10Accuracy=0.6759, over 6035.57 frames. ], batch size: 31, lr: 6.52e-03 +2024-08-06 17:20:08,542 INFO [trainer.py:765] (6/8) Epoch 13, batch 2300, train_loss[loss=3.363, NarTop10Accuracy=0.6393, over 5673.00 frames. ], tot_loss[loss=3.27, NarTop10Accuracy=0.6718, over 6034.54 frames. ], batch size: 9, lr: 6.51e-03 +2024-08-06 17:20:32,939 INFO [trainer.py:765] (6/8) Epoch 13, batch 2400, train_loss[loss=3.528, NarTop10Accuracy=0.6091, over 5163.00 frames. ], tot_loss[loss=3.243, NarTop10Accuracy=0.6771, over 5767.29 frames. ], batch size: 7, lr: 6.50e-03 +2024-08-06 17:20:56,408 INFO [trainer.py:765] (6/8) Epoch 13, batch 2500, train_loss[loss=3.469, NarTop10Accuracy=0.6362, over 5100.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.6808, over 5467.57 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 17:21:16,262 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 17:22:19,316 INFO [trainer.py:765] (6/8) Epoch 14, batch 100, train_loss[loss=2.985, NarTop10Accuracy=0.7346, over 7224.00 frames. ], tot_loss[loss=3.216, NarTop10Accuracy=0.6839, over 2387.72 frames. ], batch size: 32, lr: 6.24e-03 +2024-08-06 17:22:50,378 INFO [trainer.py:765] (6/8) Epoch 14, batch 200, train_loss[loss=3.154, NarTop10Accuracy=0.6988, over 6717.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6805, over 3869.35 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 17:23:23,880 INFO [trainer.py:765] (6/8) Epoch 14, batch 300, train_loss[loss=3.161, NarTop10Accuracy=0.6981, over 7071.00 frames. ], tot_loss[loss=3.209, NarTop10Accuracy=0.6848, over 4648.52 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 17:23:57,484 INFO [trainer.py:765] (6/8) Epoch 14, batch 400, train_loss[loss=2.883, NarTop10Accuracy=0.75, over 4983.00 frames. ], tot_loss[loss=3.226, NarTop10Accuracy=0.6813, over 5092.21 frames. ], batch size: 7, lr: 6.22e-03 +2024-08-06 17:24:32,114 INFO [trainer.py:765] (6/8) Epoch 14, batch 500, train_loss[loss=3.292, NarTop10Accuracy=0.6697, over 6006.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.68, over 5371.47 frames. ], batch size: 11, lr: 6.21e-03 +2024-08-06 17:24:36,214 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 17:24:44,275 INFO [trainer.py:811] (6/8) Epoch 14, validation: loss=3.004, NarTop10Accuracy=0.726, over 1905321.00 frames. +2024-08-06 17:24:44,276 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 17:24:44,823 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 1.969e+02 2.114e+02 2.287e+02 4.406e+02, threshold=4.227e+02, percent-clipped=0.1 +2024-08-06 17:25:12,914 INFO [trainer.py:765] (6/8) Epoch 14, batch 600, train_loss[loss=2.98, NarTop10Accuracy=0.73, over 5718.00 frames. ], tot_loss[loss=3.23, NarTop10Accuracy=0.6802, over 5642.42 frames. ], batch size: 9, lr: 6.20e-03 +2024-08-06 17:25:48,548 INFO [trainer.py:765] (6/8) Epoch 14, batch 700, train_loss[loss=3.328, NarTop10Accuracy=0.6551, over 5166.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.6815, over 5726.04 frames. ], batch size: 6, lr: 6.19e-03 +2024-08-06 17:26:25,279 INFO [trainer.py:765] (6/8) Epoch 14, batch 800, train_loss[loss=2.828, NarTop10Accuracy=0.7684, over 5145.00 frames. ], tot_loss[loss=3.216, NarTop10Accuracy=0.6826, over 5782.40 frames. ], batch size: 6, lr: 6.18e-03 +2024-08-06 17:26:57,659 INFO [trainer.py:765] (6/8) Epoch 14, batch 900, train_loss[loss=3.198, NarTop10Accuracy=0.6781, over 6615.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.684, over 5787.22 frames. ], batch size: 14, lr: 6.17e-03 +2024-08-06 17:27:31,716 INFO [trainer.py:765] (6/8) Epoch 14, batch 1000, train_loss[loss=3.415, NarTop10Accuracy=0.6472, over 6294.00 frames. ], tot_loss[loss=3.223, NarTop10Accuracy=0.6809, over 5866.09 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 17:28:11,597 INFO [trainer.py:765] (6/8) Epoch 14, batch 1100, train_loss[loss=2.972, NarTop10Accuracy=0.7405, over 6933.00 frames. ], tot_loss[loss=3.22, NarTop10Accuracy=0.6816, over 5914.50 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 17:28:40,733 INFO [trainer.py:765] (6/8) Epoch 14, batch 1200, train_loss[loss=3.6, NarTop10Accuracy=0.6035, over 7512.00 frames. ], tot_loss[loss=3.218, NarTop10Accuracy=0.6822, over 5919.56 frames. ], batch size: 31, lr: 6.15e-03 +2024-08-06 17:29:16,214 INFO [trainer.py:765] (6/8) Epoch 14, batch 1300, train_loss[loss=3.566, NarTop10Accuracy=0.6119, over 5196.00 frames. ], tot_loss[loss=3.22, NarTop10Accuracy=0.6821, over 5996.29 frames. ], batch size: 6, lr: 6.14e-03 +2024-08-06 17:29:54,602 INFO [trainer.py:765] (6/8) Epoch 14, batch 1400, train_loss[loss=3.367, NarTop10Accuracy=0.6498, over 6081.00 frames. ], tot_loss[loss=3.226, NarTop10Accuracy=0.6803, over 6015.82 frames. ], batch size: 11, lr: 6.13e-03 +2024-08-06 17:30:25,315 INFO [trainer.py:765] (6/8) Epoch 14, batch 1500, train_loss[loss=3.772, NarTop10Accuracy=0.5673, over 5769.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6793, over 5953.10 frames. ], batch size: 51, lr: 6.12e-03 +2024-08-06 17:30:53,042 INFO [trainer.py:765] (6/8) Epoch 14, batch 1600, train_loss[loss=3.115, NarTop10Accuracy=0.706, over 7359.00 frames. ], tot_loss[loss=3.221, NarTop10Accuracy=0.6815, over 5915.37 frames. ], batch size: 23, lr: 6.11e-03 +2024-08-06 17:31:19,728 INFO [trainer.py:765] (6/8) Epoch 14, batch 1700, train_loss[loss=2.994, NarTop10Accuracy=0.7287, over 6579.00 frames. ], tot_loss[loss=3.206, NarTop10Accuracy=0.6851, over 5891.40 frames. ], batch size: 14, lr: 6.10e-03 +2024-08-06 17:31:46,289 INFO [trainer.py:765] (6/8) Epoch 14, batch 1800, train_loss[loss=2.976, NarTop10Accuracy=0.7334, over 7191.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6892, over 5963.79 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 17:32:12,727 INFO [trainer.py:765] (6/8) Epoch 14, batch 1900, train_loss[loss=3.604, NarTop10Accuracy=0.5986, over 5676.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6869, over 6000.20 frames. ], batch size: 50, lr: 6.09e-03 +2024-08-06 17:32:38,282 INFO [trainer.py:765] (6/8) Epoch 14, batch 2000, train_loss[loss=3.321, NarTop10Accuracy=0.6701, over 5919.00 frames. ], tot_loss[loss=3.214, NarTop10Accuracy=0.6834, over 5982.87 frames. ], batch size: 52, lr: 6.08e-03 +2024-08-06 17:33:03,646 INFO [trainer.py:765] (6/8) Epoch 14, batch 2100, train_loss[loss=2.92, NarTop10Accuracy=0.7301, over 3972.00 frames. ], tot_loss[loss=3.216, NarTop10Accuracy=0.6825, over 5963.05 frames. ], batch size: 4, lr: 6.07e-03 +2024-08-06 17:33:28,999 INFO [trainer.py:765] (6/8) Epoch 14, batch 2200, train_loss[loss=3.25, NarTop10Accuracy=0.6714, over 7371.00 frames. ], tot_loss[loss=3.213, NarTop10Accuracy=0.683, over 5998.24 frames. ], batch size: 32, lr: 6.06e-03 +2024-08-06 17:33:54,089 INFO [trainer.py:765] (6/8) Epoch 14, batch 2300, train_loss[loss=2.782, NarTop10Accuracy=0.772, over 5766.00 frames. ], tot_loss[loss=3.233, NarTop10Accuracy=0.6791, over 6009.68 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 17:34:18,534 INFO [trainer.py:765] (6/8) Epoch 14, batch 2400, train_loss[loss=2.92, NarTop10Accuracy=0.746, over 5343.00 frames. ], tot_loss[loss=3.235, NarTop10Accuracy=0.6787, over 5756.45 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:42,116 INFO [trainer.py:765] (6/8) Epoch 14, batch 2500, train_loss[loss=2.927, NarTop10Accuracy=0.7444, over 5124.00 frames. ], tot_loss[loss=3.204, NarTop10Accuracy=0.6849, over 5457.46 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:45,395 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 17:34:53,209 INFO [trainer.py:811] (6/8) Epoch 14, validation: loss=3.062, NarTop10Accuracy=0.7136, over 1905321.00 frames. +2024-08-06 17:34:53,209 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 17:34:53,679 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 1.975e+02 2.132e+02 2.304e+02 3.875e+02, threshold=4.265e+02, percent-clipped=0.0 +2024-08-06 17:35:09,535 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 17:36:11,738 INFO [trainer.py:765] (6/8) Epoch 15, batch 100, train_loss[loss=3.127, NarTop10Accuracy=0.7016, over 7464.00 frames. ], tot_loss[loss=3.232, NarTop10Accuracy=0.6802, over 2386.58 frames. ], batch size: 31, lr: 5.82e-03 +2024-08-06 17:36:44,334 INFO [trainer.py:765] (6/8) Epoch 15, batch 200, train_loss[loss=3.432, NarTop10Accuracy=0.6412, over 6864.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6876, over 3867.52 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 17:37:17,714 INFO [trainer.py:765] (6/8) Epoch 15, batch 300, train_loss[loss=3.265, NarTop10Accuracy=0.6664, over 7263.00 frames. ], tot_loss[loss=3.204, NarTop10Accuracy=0.6852, over 4657.52 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 17:37:48,904 INFO [trainer.py:765] (6/8) Epoch 15, batch 400, train_loss[loss=2.942, NarTop10Accuracy=0.7454, over 5208.00 frames. ], tot_loss[loss=3.197, NarTop10Accuracy=0.6865, over 5100.50 frames. ], batch size: 7, lr: 5.80e-03 +2024-08-06 17:38:22,354 INFO [trainer.py:765] (6/8) Epoch 15, batch 500, train_loss[loss=2.984, NarTop10Accuracy=0.7351, over 6183.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.6872, over 5402.96 frames. ], batch size: 11, lr: 5.79e-03 +2024-08-06 17:38:53,093 INFO [trainer.py:765] (6/8) Epoch 15, batch 600, train_loss[loss=2.826, NarTop10Accuracy=0.7689, over 5613.00 frames. ], tot_loss[loss=3.209, NarTop10Accuracy=0.6844, over 5657.99 frames. ], batch size: 9, lr: 5.78e-03 +2024-08-06 17:39:27,922 INFO [trainer.py:765] (6/8) Epoch 15, batch 700, train_loss[loss=2.949, NarTop10Accuracy=0.7397, over 5049.00 frames. ], tot_loss[loss=3.212, NarTop10Accuracy=0.6834, over 5719.12 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 17:40:05,565 INFO [trainer.py:765] (6/8) Epoch 15, batch 800, train_loss[loss=3.293, NarTop10Accuracy=0.6629, over 5064.00 frames. ], tot_loss[loss=3.235, NarTop10Accuracy=0.6787, over 5778.87 frames. ], batch size: 6, lr: 5.76e-03 +2024-08-06 17:40:35,791 INFO [trainer.py:765] (6/8) Epoch 15, batch 900, train_loss[loss=3.426, NarTop10Accuracy=0.6342, over 6183.00 frames. ], tot_loss[loss=3.212, NarTop10Accuracy=0.6831, over 5813.22 frames. ], batch size: 13, lr: 5.76e-03 +2024-08-06 17:41:11,251 INFO [trainer.py:765] (6/8) Epoch 15, batch 1000, train_loss[loss=3.06, NarTop10Accuracy=0.7148, over 6297.00 frames. ], tot_loss[loss=3.198, NarTop10Accuracy=0.6862, over 5918.80 frames. ], batch size: 13, lr: 5.75e-03 +2024-08-06 17:41:46,452 INFO [trainer.py:765] (6/8) Epoch 15, batch 1100, train_loss[loss=3.202, NarTop10Accuracy=0.6781, over 6831.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6863, over 5931.78 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 17:42:19,456 INFO [trainer.py:765] (6/8) Epoch 15, batch 1200, train_loss[loss=3.315, NarTop10Accuracy=0.6598, over 7110.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.681, over 5931.46 frames. ], batch size: 31, lr: 5.73e-03 +2024-08-06 17:42:54,428 INFO [trainer.py:765] (6/8) Epoch 15, batch 1300, train_loss[loss=3.181, NarTop10Accuracy=0.691, over 5094.00 frames. ], tot_loss[loss=3.215, NarTop10Accuracy=0.683, over 6004.38 frames. ], batch size: 6, lr: 5.73e-03 +2024-08-06 17:43:26,607 INFO [trainer.py:765] (6/8) Epoch 15, batch 1400, train_loss[loss=3.365, NarTop10Accuracy=0.6563, over 6153.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.681, over 6020.79 frames. ], batch size: 11, lr: 5.72e-03 +2024-08-06 17:43:56,558 INFO [trainer.py:765] (6/8) Epoch 15, batch 1500, train_loss[loss=3.244, NarTop10Accuracy=0.6829, over 6252.00 frames. ], tot_loss[loss=3.226, NarTop10Accuracy=0.6808, over 5944.21 frames. ], batch size: 51, lr: 5.71e-03 +2024-08-06 17:44:24,241 INFO [trainer.py:765] (6/8) Epoch 15, batch 1600, train_loss[loss=3.709, NarTop10Accuracy=0.5854, over 7041.00 frames. ], tot_loss[loss=3.207, NarTop10Accuracy=0.6846, over 5934.46 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 17:44:50,856 INFO [trainer.py:765] (6/8) Epoch 15, batch 1700, train_loss[loss=2.99, NarTop10Accuracy=0.7309, over 6501.00 frames. ], tot_loss[loss=3.195, NarTop10Accuracy=0.6872, over 5933.14 frames. ], batch size: 14, lr: 5.70e-03 +2024-08-06 17:45:17,294 INFO [trainer.py:765] (6/8) Epoch 15, batch 1800, train_loss[loss=3.153, NarTop10Accuracy=0.6926, over 7212.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6882, over 5987.41 frames. ], batch size: 22, lr: 5.69e-03 +2024-08-06 17:45:43,679 INFO [trainer.py:765] (6/8) Epoch 15, batch 1900, train_loss[loss=3.103, NarTop10Accuracy=0.7042, over 6171.00 frames. ], tot_loss[loss=3.216, NarTop10Accuracy=0.6824, over 6024.94 frames. ], batch size: 50, lr: 5.68e-03 +2024-08-06 17:45:53,540 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 17:46:01,743 INFO [trainer.py:811] (6/8) Epoch 15, validation: loss=3.006, NarTop10Accuracy=0.725, over 1905321.00 frames. +2024-08-06 17:46:01,743 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 17:46:02,217 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.004e+02 2.149e+02 2.324e+02 3.721e+02, threshold=4.298e+02, percent-clipped=0.0 +2024-08-06 17:46:17,371 INFO [trainer.py:765] (6/8) Epoch 15, batch 2000, train_loss[loss=3.258, NarTop10Accuracy=0.6768, over 5922.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.6841, over 5992.94 frames. ], batch size: 51, lr: 5.67e-03 +2024-08-06 17:46:42,773 INFO [trainer.py:765] (6/8) Epoch 15, batch 2100, train_loss[loss=3.286, NarTop10Accuracy=0.6678, over 4773.00 frames. ], tot_loss[loss=3.201, NarTop10Accuracy=0.6857, over 5994.04 frames. ], batch size: 5, lr: 5.67e-03 +2024-08-06 17:47:08,033 INFO [trainer.py:765] (6/8) Epoch 15, batch 2200, train_loss[loss=3.089, NarTop10Accuracy=0.7126, over 7239.00 frames. ], tot_loss[loss=3.215, NarTop10Accuracy=0.6831, over 6035.03 frames. ], batch size: 31, lr: 5.66e-03 +2024-08-06 17:47:33,291 INFO [trainer.py:765] (6/8) Epoch 15, batch 2300, train_loss[loss=3.687, NarTop10Accuracy=0.5933, over 5679.00 frames. ], tot_loss[loss=3.216, NarTop10Accuracy=0.6827, over 6044.11 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 17:47:57,640 INFO [trainer.py:765] (6/8) Epoch 15, batch 2400, train_loss[loss=3.127, NarTop10Accuracy=0.6966, over 5055.00 frames. ], tot_loss[loss=3.197, NarTop10Accuracy=0.6863, over 5766.54 frames. ], batch size: 7, lr: 5.65e-03 +2024-08-06 17:48:21,161 INFO [trainer.py:765] (6/8) Epoch 15, batch 2500, train_loss[loss=2.776, NarTop10Accuracy=0.7751, over 5097.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.691, over 5470.98 frames. ], batch size: 7, lr: 5.64e-03 +2024-08-06 17:48:40,661 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 17:49:41,221 INFO [trainer.py:765] (6/8) Epoch 16, batch 100, train_loss[loss=3.571, NarTop10Accuracy=0.6129, over 7305.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6939, over 2363.35 frames. ], batch size: 31, lr: 5.45e-03 +2024-08-06 17:50:12,158 INFO [trainer.py:765] (6/8) Epoch 16, batch 200, train_loss[loss=2.904, NarTop10Accuracy=0.7493, over 6894.00 frames. ], tot_loss[loss=3.201, NarTop10Accuracy=0.6855, over 3845.08 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 17:50:45,160 INFO [trainer.py:765] (6/8) Epoch 16, batch 300, train_loss[loss=3.098, NarTop10Accuracy=0.7098, over 6972.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6875, over 4641.76 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 17:51:15,976 INFO [trainer.py:765] (6/8) Epoch 16, batch 400, train_loss[loss=3.44, NarTop10Accuracy=0.6347, over 5175.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.6872, over 5100.04 frames. ], batch size: 7, lr: 5.43e-03 +2024-08-06 17:51:50,324 INFO [trainer.py:765] (6/8) Epoch 16, batch 500, train_loss[loss=2.984, NarTop10Accuracy=0.7338, over 6165.00 frames. ], tot_loss[loss=3.184, NarTop10Accuracy=0.6891, over 5365.77 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 17:52:24,252 INFO [trainer.py:765] (6/8) Epoch 16, batch 600, train_loss[loss=3.004, NarTop10Accuracy=0.7289, over 5610.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6883, over 5636.30 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 17:52:55,387 INFO [trainer.py:765] (6/8) Epoch 16, batch 700, train_loss[loss=2.901, NarTop10Accuracy=0.7437, over 5142.00 frames. ], tot_loss[loss=3.184, NarTop10Accuracy=0.6892, over 5707.82 frames. ], batch size: 6, lr: 5.41e-03 +2024-08-06 17:53:33,816 INFO [trainer.py:765] (6/8) Epoch 16, batch 800, train_loss[loss=3.278, NarTop10Accuracy=0.6686, over 4998.00 frames. ], tot_loss[loss=3.184, NarTop10Accuracy=0.6894, over 5757.91 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 17:54:03,923 INFO [trainer.py:765] (6/8) Epoch 16, batch 900, train_loss[loss=3.399, NarTop10Accuracy=0.651, over 6264.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6915, over 5785.21 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 17:54:37,607 INFO [trainer.py:765] (6/8) Epoch 16, batch 1000, train_loss[loss=3.042, NarTop10Accuracy=0.7227, over 6801.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6934, over 5879.46 frames. ], batch size: 14, lr: 5.39e-03 +2024-08-06 17:55:17,197 INFO [trainer.py:765] (6/8) Epoch 16, batch 1100, train_loss[loss=3.119, NarTop10Accuracy=0.7031, over 6771.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6878, over 5910.39 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 17:55:46,210 INFO [trainer.py:765] (6/8) Epoch 16, batch 1200, train_loss[loss=3.474, NarTop10Accuracy=0.6372, over 7077.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.687, over 5900.38 frames. ], batch size: 31, lr: 5.37e-03 +2024-08-06 17:56:22,776 INFO [trainer.py:765] (6/8) Epoch 16, batch 1300, train_loss[loss=3.45, NarTop10Accuracy=0.6303, over 5064.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.6878, over 5964.38 frames. ], batch size: 6, lr: 5.37e-03 +2024-08-06 17:56:44,648 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 17:56:53,428 INFO [trainer.py:811] (6/8) Epoch 16, validation: loss=3.112, NarTop10Accuracy=0.703, over 1905321.00 frames. +2024-08-06 17:56:53,429 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 17:56:54,008 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 1.974e+02 2.136e+02 2.310e+02 5.351e+02, threshold=4.271e+02, percent-clipped=0.2 +2024-08-06 17:57:06,171 INFO [trainer.py:765] (6/8) Epoch 16, batch 1400, train_loss[loss=3.061, NarTop10Accuracy=0.7149, over 5991.00 frames. ], tot_loss[loss=3.187, NarTop10Accuracy=0.6886, over 5989.22 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 17:57:34,034 INFO [trainer.py:765] (6/8) Epoch 16, batch 1500, train_loss[loss=3.337, NarTop10Accuracy=0.6602, over 6090.00 frames. ], tot_loss[loss=3.184, NarTop10Accuracy=0.689, over 5935.55 frames. ], batch size: 53, lr: 5.35e-03 +2024-08-06 17:58:01,775 INFO [trainer.py:765] (6/8) Epoch 16, batch 1600, train_loss[loss=3.082, NarTop10Accuracy=0.7102, over 7080.00 frames. ], tot_loss[loss=3.184, NarTop10Accuracy=0.6892, over 5913.63 frames. ], batch size: 22, lr: 5.35e-03 +2024-08-06 17:58:28,475 INFO [trainer.py:765] (6/8) Epoch 16, batch 1700, train_loss[loss=2.94, NarTop10Accuracy=0.7454, over 6255.00 frames. ], tot_loss[loss=3.194, NarTop10Accuracy=0.6872, over 5909.55 frames. ], batch size: 13, lr: 5.34e-03 +2024-08-06 17:58:54,976 INFO [trainer.py:765] (6/8) Epoch 16, batch 1800, train_loss[loss=3.071, NarTop10Accuracy=0.7066, over 6990.00 frames. ], tot_loss[loss=3.185, NarTop10Accuracy=0.689, over 5969.88 frames. ], batch size: 22, lr: 5.33e-03 +2024-08-06 17:59:21,360 INFO [trainer.py:765] (6/8) Epoch 16, batch 1900, train_loss[loss=3.474, NarTop10Accuracy=0.6338, over 6228.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.6842, over 6014.37 frames. ], batch size: 53, lr: 5.33e-03 +2024-08-06 17:59:46,857 INFO [trainer.py:765] (6/8) Epoch 16, batch 2000, train_loss[loss=3.13, NarTop10Accuracy=0.7014, over 6204.00 frames. ], tot_loss[loss=3.178, NarTop10Accuracy=0.6904, over 5995.28 frames. ], batch size: 51, lr: 5.32e-03 +2024-08-06 18:00:12,117 INFO [trainer.py:765] (6/8) Epoch 16, batch 2100, train_loss[loss=3.32, NarTop10Accuracy=0.6648, over 4818.00 frames. ], tot_loss[loss=3.21, NarTop10Accuracy=0.684, over 5975.22 frames. ], batch size: 5, lr: 5.32e-03 +2024-08-06 18:00:37,333 INFO [trainer.py:765] (6/8) Epoch 16, batch 2200, train_loss[loss=3.29, NarTop10Accuracy=0.6627, over 7311.00 frames. ], tot_loss[loss=3.22, NarTop10Accuracy=0.6818, over 6009.73 frames. ], batch size: 31, lr: 5.31e-03 +2024-08-06 18:01:02,503 INFO [trainer.py:765] (6/8) Epoch 16, batch 2300, train_loss[loss=3.057, NarTop10Accuracy=0.7241, over 5682.00 frames. ], tot_loss[loss=3.218, NarTop10Accuracy=0.6821, over 6017.44 frames. ], batch size: 9, lr: 5.30e-03 +2024-08-06 18:01:26,883 INFO [trainer.py:765] (6/8) Epoch 16, batch 2400, train_loss[loss=3.081, NarTop10Accuracy=0.7068, over 5061.00 frames. ], tot_loss[loss=3.197, NarTop10Accuracy=0.6863, over 5755.12 frames. ], batch size: 7, lr: 5.30e-03 +2024-08-06 18:01:50,406 INFO [trainer.py:765] (6/8) Epoch 16, batch 2500, train_loss[loss=2.83, NarTop10Accuracy=0.757, over 5217.00 frames. ], tot_loss[loss=3.162, NarTop10Accuracy=0.6933, over 5489.18 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 18:02:10,329 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 18:03:08,531 INFO [trainer.py:765] (6/8) Epoch 17, batch 100, train_loss[loss=3.12, NarTop10Accuracy=0.7023, over 7296.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6968, over 2357.15 frames. ], batch size: 31, lr: 5.12e-03 +2024-08-06 18:03:45,145 INFO [trainer.py:765] (6/8) Epoch 17, batch 200, train_loss[loss=3.469, NarTop10Accuracy=0.6302, over 6744.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6939, over 3871.82 frames. ], batch size: 17, lr: 5.12e-03 +2024-08-06 18:04:19,591 INFO [trainer.py:765] (6/8) Epoch 17, batch 300, train_loss[loss=3.405, NarTop10Accuracy=0.6473, over 6936.00 frames. ], tot_loss[loss=3.176, NarTop10Accuracy=0.6903, over 4666.85 frames. ], batch size: 22, lr: 5.11e-03 +2024-08-06 18:04:48,402 INFO [trainer.py:765] (6/8) Epoch 17, batch 400, train_loss[loss=3.272, NarTop10Accuracy=0.6789, over 5247.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6909, over 5111.89 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 18:05:24,681 INFO [trainer.py:765] (6/8) Epoch 17, batch 500, train_loss[loss=2.825, NarTop10Accuracy=0.7599, over 6099.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6945, over 5379.47 frames. ], batch size: 11, lr: 5.10e-03 +2024-08-06 18:05:58,739 INFO [trainer.py:765] (6/8) Epoch 17, batch 600, train_loss[loss=3.037, NarTop10Accuracy=0.7263, over 5733.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6912, over 5655.50 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 18:06:32,475 INFO [trainer.py:765] (6/8) Epoch 17, batch 700, train_loss[loss=3.229, NarTop10Accuracy=0.6784, over 4350.00 frames. ], tot_loss[loss=3.177, NarTop10Accuracy=0.6906, over 5708.96 frames. ], batch size: 5, lr: 5.08e-03 +2024-08-06 18:07:02,725 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 18:07:10,763 INFO [trainer.py:811] (6/8) Epoch 17, validation: loss=3.018, NarTop10Accuracy=0.7223, over 1905321.00 frames. +2024-08-06 18:07:10,763 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 18:07:11,311 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.005e+02 2.161e+02 2.341e+02 3.806e+02, threshold=4.323e+02, percent-clipped=0.0 +2024-08-06 18:07:14,353 INFO [trainer.py:765] (6/8) Epoch 17, batch 800, train_loss[loss=3.091, NarTop10Accuracy=0.7107, over 4302.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6878, over 5754.40 frames. ], batch size: 5, lr: 5.08e-03 +2024-08-06 18:07:49,721 INFO [trainer.py:765] (6/8) Epoch 17, batch 900, train_loss[loss=3.54, NarTop10Accuracy=0.6141, over 6507.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.693, over 5793.40 frames. ], batch size: 14, lr: 5.07e-03 +2024-08-06 18:08:21,598 INFO [trainer.py:765] (6/8) Epoch 17, batch 1000, train_loss[loss=3.286, NarTop10Accuracy=0.6667, over 6552.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6917, over 5897.17 frames. ], batch size: 14, lr: 5.07e-03 +2024-08-06 18:09:03,106 INFO [trainer.py:765] (6/8) Epoch 17, batch 1100, train_loss[loss=2.987, NarTop10Accuracy=0.7313, over 6780.00 frames. ], tot_loss[loss=3.175, NarTop10Accuracy=0.6909, over 5940.43 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 18:09:36,745 INFO [trainer.py:765] (6/8) Epoch 17, batch 1200, train_loss[loss=3.089, NarTop10Accuracy=0.7115, over 7413.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6921, over 5927.26 frames. ], batch size: 31, lr: 5.06e-03 +2024-08-06 18:10:10,688 INFO [trainer.py:765] (6/8) Epoch 17, batch 1300, train_loss[loss=3.28, NarTop10Accuracy=0.6624, over 5079.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.6915, over 5998.41 frames. ], batch size: 6, lr: 5.05e-03 +2024-08-06 18:10:48,026 INFO [trainer.py:765] (6/8) Epoch 17, batch 1400, train_loss[loss=3.315, NarTop10Accuracy=0.664, over 6177.00 frames. ], tot_loss[loss=3.178, NarTop10Accuracy=0.6906, over 5999.05 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 18:11:19,105 INFO [trainer.py:765] (6/8) Epoch 17, batch 1500, train_loss[loss=3.43, NarTop10Accuracy=0.6387, over 6267.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6914, over 5937.58 frames. ], batch size: 51, lr: 5.04e-03 +2024-08-06 18:11:46,854 INFO [trainer.py:765] (6/8) Epoch 17, batch 1600, train_loss[loss=2.967, NarTop10Accuracy=0.7268, over 7140.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6935, over 5908.71 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 18:12:13,508 INFO [trainer.py:765] (6/8) Epoch 17, batch 1700, train_loss[loss=3.542, NarTop10Accuracy=0.6163, over 6639.00 frames. ], tot_loss[loss=3.181, NarTop10Accuracy=0.6897, over 5909.69 frames. ], batch size: 14, lr: 5.03e-03 +2024-08-06 18:12:40,001 INFO [trainer.py:765] (6/8) Epoch 17, batch 1800, train_loss[loss=2.963, NarTop10Accuracy=0.7348, over 7134.00 frames. ], tot_loss[loss=3.186, NarTop10Accuracy=0.6883, over 5976.50 frames. ], batch size: 22, lr: 5.02e-03 +2024-08-06 18:13:06,379 INFO [trainer.py:765] (6/8) Epoch 17, batch 1900, train_loss[loss=3.211, NarTop10Accuracy=0.689, over 6579.00 frames. ], tot_loss[loss=3.197, NarTop10Accuracy=0.6858, over 6010.71 frames. ], batch size: 51, lr: 5.01e-03 +2024-08-06 18:13:31,923 INFO [trainer.py:765] (6/8) Epoch 17, batch 2000, train_loss[loss=3.592, NarTop10Accuracy=0.6079, over 5832.00 frames. ], tot_loss[loss=3.18, NarTop10Accuracy=0.6899, over 6002.12 frames. ], batch size: 51, lr: 5.01e-03 +2024-08-06 18:13:57,228 INFO [trainer.py:765] (6/8) Epoch 17, batch 2100, train_loss[loss=2.918, NarTop10Accuracy=0.7486, over 3891.00 frames. ], tot_loss[loss=3.178, NarTop10Accuracy=0.6901, over 5967.83 frames. ], batch size: 4, lr: 5.00e-03 +2024-08-06 18:14:22,434 INFO [trainer.py:765] (6/8) Epoch 17, batch 2200, train_loss[loss=3.024, NarTop10Accuracy=0.7274, over 7095.00 frames. ], tot_loss[loss=3.195, NarTop10Accuracy=0.6865, over 6011.05 frames. ], batch size: 31, lr: 5.00e-03 +2024-08-06 18:14:47,592 INFO [trainer.py:765] (6/8) Epoch 17, batch 2300, train_loss[loss=2.939, NarTop10Accuracy=0.7373, over 5643.00 frames. ], tot_loss[loss=3.184, NarTop10Accuracy=0.689, over 6016.98 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 18:15:12,061 INFO [trainer.py:765] (6/8) Epoch 17, batch 2400, train_loss[loss=2.932, NarTop10Accuracy=0.7402, over 5220.00 frames. ], tot_loss[loss=3.183, NarTop10Accuracy=0.6891, over 5780.83 frames. ], batch size: 7, lr: 4.99e-03 +2024-08-06 18:15:35,514 INFO [trainer.py:765] (6/8) Epoch 17, batch 2500, train_loss[loss=2.898, NarTop10Accuracy=0.7519, over 5190.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6924, over 5473.16 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 18:15:54,965 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 18:16:49,907 INFO [trainer.py:765] (6/8) Epoch 18, batch 100, train_loss[loss=3.037, NarTop10Accuracy=0.7228, over 7122.00 frames. ], tot_loss[loss=3.183, NarTop10Accuracy=0.6901, over 2377.90 frames. ], batch size: 31, lr: 4.83e-03 +2024-08-06 18:17:24,749 INFO [trainer.py:765] (6/8) Epoch 18, batch 200, train_loss[loss=2.939, NarTop10Accuracy=0.7447, over 6759.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6937, over 3851.92 frames. ], batch size: 17, lr: 4.83e-03 +2024-08-06 18:17:27,715 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 18:17:35,926 INFO [trainer.py:811] (6/8) Epoch 18, validation: loss=3.062, NarTop10Accuracy=0.7137, over 1905321.00 frames. +2024-08-06 18:17:35,927 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 18:17:36,528 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.024e+02 2.164e+02 2.334e+02 7.024e+02, threshold=4.329e+02, percent-clipped=0.1 +2024-08-06 18:18:06,912 INFO [trainer.py:765] (6/8) Epoch 18, batch 300, train_loss[loss=3.386, NarTop10Accuracy=0.6422, over 6942.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6931, over 4653.18 frames. ], batch size: 22, lr: 4.82e-03 +2024-08-06 18:18:38,183 INFO [trainer.py:765] (6/8) Epoch 18, batch 400, train_loss[loss=3.315, NarTop10Accuracy=0.6641, over 5046.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6951, over 5111.84 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 18:19:13,599 INFO [trainer.py:765] (6/8) Epoch 18, batch 500, train_loss[loss=3.103, NarTop10Accuracy=0.7098, over 6219.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6947, over 5401.39 frames. ], batch size: 11, lr: 4.81e-03 +2024-08-06 18:19:48,151 INFO [trainer.py:765] (6/8) Epoch 18, batch 600, train_loss[loss=3.507, NarTop10Accuracy=0.6309, over 5745.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6947, over 5657.06 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 18:20:23,870 INFO [trainer.py:765] (6/8) Epoch 18, batch 700, train_loss[loss=3.284, NarTop10Accuracy=0.664, over 5115.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6926, over 5730.41 frames. ], batch size: 6, lr: 4.80e-03 +2024-08-06 18:21:01,026 INFO [trainer.py:765] (6/8) Epoch 18, batch 800, train_loss[loss=2.685, NarTop10Accuracy=0.7954, over 4992.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6926, over 5797.58 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 18:21:32,409 INFO [trainer.py:765] (6/8) Epoch 18, batch 900, train_loss[loss=2.955, NarTop10Accuracy=0.7256, over 6201.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.696, over 5822.87 frames. ], batch size: 13, lr: 4.79e-03 +2024-08-06 18:22:11,192 INFO [trainer.py:765] (6/8) Epoch 18, batch 1000, train_loss[loss=2.992, NarTop10Accuracy=0.7283, over 6639.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6936, over 5917.94 frames. ], batch size: 14, lr: 4.78e-03 +2024-08-06 18:22:46,969 INFO [trainer.py:765] (6/8) Epoch 18, batch 1100, train_loss[loss=3.4, NarTop10Accuracy=0.641, over 6771.00 frames. ], tot_loss[loss=3.166, NarTop10Accuracy=0.6927, over 5928.99 frames. ], batch size: 17, lr: 4.78e-03 +2024-08-06 18:23:18,605 INFO [trainer.py:765] (6/8) Epoch 18, batch 1200, train_loss[loss=3.514, NarTop10Accuracy=0.6158, over 7536.00 frames. ], tot_loss[loss=3.175, NarTop10Accuracy=0.6909, over 5920.93 frames. ], batch size: 31, lr: 4.77e-03 +2024-08-06 18:24:00,099 INFO [trainer.py:765] (6/8) Epoch 18, batch 1300, train_loss[loss=2.956, NarTop10Accuracy=0.7497, over 4149.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6946, over 5988.84 frames. ], batch size: 5, lr: 4.77e-03 +2024-08-06 18:24:29,574 INFO [trainer.py:765] (6/8) Epoch 18, batch 1400, train_loss[loss=3.018, NarTop10Accuracy=0.7189, over 6018.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6949, over 6016.93 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 18:25:00,307 INFO [trainer.py:765] (6/8) Epoch 18, batch 1500, train_loss[loss=3.117, NarTop10Accuracy=0.7067, over 6060.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6948, over 5962.69 frames. ], batch size: 50, lr: 4.76e-03 +2024-08-06 18:25:28,085 INFO [trainer.py:765] (6/8) Epoch 18, batch 1600, train_loss[loss=2.992, NarTop10Accuracy=0.7289, over 7029.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6934, over 5938.61 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 18:25:54,688 INFO [trainer.py:765] (6/8) Epoch 18, batch 1700, train_loss[loss=3.159, NarTop10Accuracy=0.6945, over 6195.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6924, over 5912.17 frames. ], batch size: 13, lr: 4.75e-03 +2024-08-06 18:26:21,197 INFO [trainer.py:765] (6/8) Epoch 18, batch 1800, train_loss[loss=3.372, NarTop10Accuracy=0.6484, over 7716.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.695, over 5975.40 frames. ], batch size: 23, lr: 4.74e-03 +2024-08-06 18:26:47,567 INFO [trainer.py:765] (6/8) Epoch 18, batch 1900, train_loss[loss=3.046, NarTop10Accuracy=0.7166, over 6261.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6924, over 6020.62 frames. ], batch size: 51, lr: 4.74e-03 +2024-08-06 18:27:13,176 INFO [trainer.py:765] (6/8) Epoch 18, batch 2000, train_loss[loss=3.063, NarTop10Accuracy=0.7181, over 6153.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6945, over 5980.48 frames. ], batch size: 51, lr: 4.73e-03 +2024-08-06 18:27:38,529 INFO [trainer.py:765] (6/8) Epoch 18, batch 2100, train_loss[loss=3.216, NarTop10Accuracy=0.6717, over 4923.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.6945, over 5975.77 frames. ], batch size: 5, lr: 4.73e-03 +2024-08-06 18:28:03,812 INFO [trainer.py:765] (6/8) Epoch 18, batch 2200, train_loss[loss=3.033, NarTop10Accuracy=0.7182, over 7482.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6943, over 6000.64 frames. ], batch size: 33, lr: 4.72e-03 +2024-08-06 18:28:06,572 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 18:28:14,649 INFO [trainer.py:811] (6/8) Epoch 18, validation: loss=3.028, NarTop10Accuracy=0.7201, over 1905321.00 frames. +2024-08-06 18:28:14,650 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 18:28:15,147 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.054e+02 2.220e+02 2.384e+02 3.992e+02, threshold=4.441e+02, percent-clipped=0.0 +2024-08-06 18:28:37,097 INFO [trainer.py:765] (6/8) Epoch 18, batch 2300, train_loss[loss=2.937, NarTop10Accuracy=0.7478, over 5832.00 frames. ], tot_loss[loss=3.176, NarTop10Accuracy=0.6908, over 6016.09 frames. ], batch size: 9, lr: 4.72e-03 +2024-08-06 18:29:01,592 INFO [trainer.py:765] (6/8) Epoch 18, batch 2400, train_loss[loss=2.973, NarTop10Accuracy=0.7269, over 5277.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6949, over 5778.90 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 18:29:25,027 INFO [trainer.py:765] (6/8) Epoch 18, batch 2500, train_loss[loss=2.914, NarTop10Accuracy=0.7405, over 5352.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.6984, over 5502.01 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 18:29:45,229 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 18:30:41,232 INFO [trainer.py:765] (6/8) Epoch 19, batch 100, train_loss[loss=2.992, NarTop10Accuracy=0.7299, over 7494.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6931, over 2373.25 frames. ], batch size: 32, lr: 4.57e-03 +2024-08-06 18:31:15,602 INFO [trainer.py:765] (6/8) Epoch 19, batch 200, train_loss[loss=3.077, NarTop10Accuracy=0.7173, over 6822.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6937, over 3858.92 frames. ], batch size: 17, lr: 4.57e-03 +2024-08-06 18:31:47,468 INFO [trainer.py:765] (6/8) Epoch 19, batch 300, train_loss[loss=3.475, NarTop10Accuracy=0.6322, over 7089.00 frames. ], tot_loss[loss=3.141, NarTop10Accuracy=0.6982, over 4665.50 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 18:32:20,355 INFO [trainer.py:765] (6/8) Epoch 19, batch 400, train_loss[loss=3.183, NarTop10Accuracy=0.6885, over 5049.00 frames. ], tot_loss[loss=3.14, NarTop10Accuracy=0.6984, over 5119.97 frames. ], batch size: 7, lr: 4.56e-03 +2024-08-06 18:32:50,335 INFO [trainer.py:765] (6/8) Epoch 19, batch 500, train_loss[loss=2.914, NarTop10Accuracy=0.7562, over 6093.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.6988, over 5384.44 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 18:33:29,610 INFO [trainer.py:765] (6/8) Epoch 19, batch 600, train_loss[loss=3.116, NarTop10Accuracy=0.7109, over 5616.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.6973, over 5642.15 frames. ], batch size: 9, lr: 4.55e-03 +2024-08-06 18:34:03,592 INFO [trainer.py:765] (6/8) Epoch 19, batch 700, train_loss[loss=2.805, NarTop10Accuracy=0.7628, over 5166.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6968, over 5720.97 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 18:34:35,179 INFO [trainer.py:765] (6/8) Epoch 19, batch 800, train_loss[loss=3.279, NarTop10Accuracy=0.668, over 4989.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.6953, over 5788.65 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 18:35:10,263 INFO [trainer.py:765] (6/8) Epoch 19, batch 900, train_loss[loss=2.866, NarTop10Accuracy=0.7568, over 6273.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6965, over 5810.34 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 18:35:48,637 INFO [trainer.py:765] (6/8) Epoch 19, batch 1000, train_loss[loss=3.398, NarTop10Accuracy=0.6473, over 6705.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6964, over 5906.78 frames. ], batch size: 14, lr: 4.53e-03 +2024-08-06 18:36:20,938 INFO [trainer.py:765] (6/8) Epoch 19, batch 1100, train_loss[loss=2.953, NarTop10Accuracy=0.7412, over 6843.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6942, over 5938.44 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 18:36:57,130 INFO [trainer.py:765] (6/8) Epoch 19, batch 1200, train_loss[loss=3.008, NarTop10Accuracy=0.7318, over 7167.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6932, over 5937.40 frames. ], batch size: 31, lr: 4.52e-03 +2024-08-06 18:37:35,314 INFO [trainer.py:765] (6/8) Epoch 19, batch 1300, train_loss[loss=2.845, NarTop10Accuracy=0.7566, over 5076.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6917, over 5996.96 frames. ], batch size: 6, lr: 4.51e-03 +2024-08-06 18:38:04,679 INFO [trainer.py:765] (6/8) Epoch 19, batch 1400, train_loss[loss=2.871, NarTop10Accuracy=0.7545, over 6000.00 frames. ], tot_loss[loss=3.17, NarTop10Accuracy=0.6915, over 6007.85 frames. ], batch size: 11, lr: 4.51e-03 +2024-08-06 18:38:34,550 INFO [trainer.py:765] (6/8) Epoch 19, batch 1500, train_loss[loss=3.341, NarTop10Accuracy=0.6586, over 6039.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6963, over 5950.58 frames. ], batch size: 50, lr: 4.50e-03 +2024-08-06 18:39:02,311 INFO [trainer.py:765] (6/8) Epoch 19, batch 1600, train_loss[loss=3.522, NarTop10Accuracy=0.6199, over 7044.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6964, over 5936.85 frames. ], batch size: 22, lr: 4.50e-03 +2024-08-06 18:39:11,590 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 18:39:19,795 INFO [trainer.py:811] (6/8) Epoch 19, validation: loss=2.958, NarTop10Accuracy=0.7345, over 1905321.00 frames. +2024-08-06 18:39:19,796 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 18:39:20,379 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.040e+02 2.194e+02 2.364e+02 6.410e+02, threshold=4.387e+02, percent-clipped=0.2 +2024-08-06 18:39:37,191 INFO [trainer.py:765] (6/8) Epoch 19, batch 1700, train_loss[loss=3.421, NarTop10Accuracy=0.6373, over 6249.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6961, over 5905.83 frames. ], batch size: 13, lr: 4.49e-03 +2024-08-06 18:40:03,789 INFO [trainer.py:765] (6/8) Epoch 19, batch 1800, train_loss[loss=3.568, NarTop10Accuracy=0.6177, over 7104.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6968, over 5967.31 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 18:40:30,217 INFO [trainer.py:765] (6/8) Epoch 19, batch 1900, train_loss[loss=3.133, NarTop10Accuracy=0.7008, over 6438.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6964, over 6019.44 frames. ], batch size: 50, lr: 4.49e-03 +2024-08-06 18:40:55,793 INFO [trainer.py:765] (6/8) Epoch 19, batch 2000, train_loss[loss=3.276, NarTop10Accuracy=0.672, over 5853.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6965, over 6001.39 frames. ], batch size: 51, lr: 4.48e-03 +2024-08-06 18:41:21,183 INFO [trainer.py:765] (6/8) Epoch 19, batch 2100, train_loss[loss=2.791, NarTop10Accuracy=0.7735, over 3969.00 frames. ], tot_loss[loss=3.136, NarTop10Accuracy=0.6984, over 5965.49 frames. ], batch size: 4, lr: 4.48e-03 +2024-08-06 18:41:46,455 INFO [trainer.py:765] (6/8) Epoch 19, batch 2200, train_loss[loss=3.169, NarTop10Accuracy=0.689, over 6984.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6963, over 6009.46 frames. ], batch size: 31, lr: 4.47e-03 +2024-08-06 18:42:11,559 INFO [trainer.py:765] (6/8) Epoch 19, batch 2300, train_loss[loss=3.288, NarTop10Accuracy=0.6512, over 5721.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6928, over 6003.71 frames. ], batch size: 9, lr: 4.47e-03 +2024-08-06 18:42:35,988 INFO [trainer.py:765] (6/8) Epoch 19, batch 2400, train_loss[loss=2.954, NarTop10Accuracy=0.7309, over 5109.00 frames. ], tot_loss[loss=3.139, NarTop10Accuracy=0.6978, over 5762.13 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:42:59,690 INFO [trainer.py:765] (6/8) Epoch 19, batch 2500, train_loss[loss=2.972, NarTop10Accuracy=0.7297, over 5106.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7019, over 5471.70 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:43:19,816 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 18:44:22,973 INFO [trainer.py:765] (6/8) Epoch 20, batch 100, train_loss[loss=3.256, NarTop10Accuracy=0.6703, over 7434.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6934, over 2366.08 frames. ], batch size: 31, lr: 4.34e-03 +2024-08-06 18:44:58,379 INFO [trainer.py:765] (6/8) Epoch 20, batch 200, train_loss[loss=3.48, NarTop10Accuracy=0.6196, over 6786.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7019, over 3866.86 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 18:45:32,278 INFO [trainer.py:765] (6/8) Epoch 20, batch 300, train_loss[loss=3.384, NarTop10Accuracy=0.6453, over 7200.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7034, over 4670.06 frames. ], batch size: 22, lr: 4.33e-03 +2024-08-06 18:46:05,128 INFO [trainer.py:765] (6/8) Epoch 20, batch 400, train_loss[loss=2.819, NarTop10Accuracy=0.7627, over 5745.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.7028, over 5120.23 frames. ], batch size: 8, lr: 4.32e-03 +2024-08-06 18:46:35,769 INFO [trainer.py:765] (6/8) Epoch 20, batch 500, train_loss[loss=2.883, NarTop10Accuracy=0.7598, over 6192.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7004, over 5388.71 frames. ], batch size: 11, lr: 4.32e-03 +2024-08-06 18:47:13,255 INFO [trainer.py:765] (6/8) Epoch 20, batch 600, train_loss[loss=2.819, NarTop10Accuracy=0.7485, over 5727.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.7, over 5641.06 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 18:47:44,481 INFO [trainer.py:765] (6/8) Epoch 20, batch 700, train_loss[loss=2.745, NarTop10Accuracy=0.7775, over 5067.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7022, over 5711.15 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 18:48:21,016 INFO [trainer.py:765] (6/8) Epoch 20, batch 800, train_loss[loss=2.809, NarTop10Accuracy=0.7644, over 5196.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7, over 5775.10 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 18:48:56,534 INFO [trainer.py:765] (6/8) Epoch 20, batch 900, train_loss[loss=2.965, NarTop10Accuracy=0.7403, over 6315.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7006, over 5793.62 frames. ], batch size: 13, lr: 4.30e-03 +2024-08-06 18:49:29,805 INFO [trainer.py:765] (6/8) Epoch 20, batch 1000, train_loss[loss=3.107, NarTop10Accuracy=0.6938, over 6162.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6966, over 5889.08 frames. ], batch size: 13, lr: 4.30e-03 +2024-08-06 18:49:52,236 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 18:50:00,327 INFO [trainer.py:811] (6/8) Epoch 20, validation: loss=2.962, NarTop10Accuracy=0.7336, over 1905321.00 frames. +2024-08-06 18:50:00,327 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30248MB +2024-08-06 18:50:00,874 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.061e+02 2.223e+02 2.401e+02 3.871e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 18:50:15,426 INFO [trainer.py:765] (6/8) Epoch 20, batch 1100, train_loss[loss=3.255, NarTop10Accuracy=0.6796, over 6843.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.6967, over 5928.68 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 18:50:53,775 INFO [trainer.py:765] (6/8) Epoch 20, batch 1200, train_loss[loss=3.003, NarTop10Accuracy=0.7334, over 7503.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6956, over 5912.11 frames. ], batch size: 32, lr: 4.29e-03 +2024-08-06 18:51:25,129 INFO [trainer.py:765] (6/8) Epoch 20, batch 1300, train_loss[loss=3.253, NarTop10Accuracy=0.6703, over 4959.00 frames. ], tot_loss[loss=3.136, NarTop10Accuracy=0.698, over 5980.43 frames. ], batch size: 6, lr: 4.29e-03 +2024-08-06 18:51:59,314 INFO [trainer.py:765] (6/8) Epoch 20, batch 1400, train_loss[loss=3.194, NarTop10Accuracy=0.6938, over 6048.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.6995, over 6006.34 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 18:52:32,805 INFO [trainer.py:765] (6/8) Epoch 20, batch 1500, train_loss[loss=3.297, NarTop10Accuracy=0.6705, over 6219.00 frames. ], tot_loss[loss=3.141, NarTop10Accuracy=0.6973, over 5955.64 frames. ], batch size: 50, lr: 4.28e-03 +2024-08-06 18:53:00,635 INFO [trainer.py:765] (6/8) Epoch 20, batch 1600, train_loss[loss=2.88, NarTop10Accuracy=0.7536, over 6942.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6965, over 5937.66 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 18:53:27,327 INFO [trainer.py:765] (6/8) Epoch 20, batch 1700, train_loss[loss=3.494, NarTop10Accuracy=0.6395, over 6303.00 frames. ], tot_loss[loss=3.139, NarTop10Accuracy=0.6978, over 5924.57 frames. ], batch size: 13, lr: 4.27e-03 +2024-08-06 18:53:53,850 INFO [trainer.py:765] (6/8) Epoch 20, batch 1800, train_loss[loss=3.117, NarTop10Accuracy=0.7079, over 7053.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6987, over 5979.26 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 18:54:20,316 INFO [trainer.py:765] (6/8) Epoch 20, batch 1900, train_loss[loss=3.103, NarTop10Accuracy=0.7039, over 6528.00 frames. ], tot_loss[loss=3.154, NarTop10Accuracy=0.6947, over 6030.82 frames. ], batch size: 51, lr: 4.26e-03 +2024-08-06 18:54:45,890 INFO [trainer.py:765] (6/8) Epoch 20, batch 2000, train_loss[loss=3.664, NarTop10Accuracy=0.5939, over 5913.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6941, over 5994.45 frames. ], batch size: 51, lr: 4.26e-03 +2024-08-06 18:55:11,182 INFO [trainer.py:765] (6/8) Epoch 20, batch 2100, train_loss[loss=3.269, NarTop10Accuracy=0.6664, over 4689.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6952, over 5981.12 frames. ], batch size: 5, lr: 4.25e-03 +2024-08-06 18:55:36,414 INFO [trainer.py:765] (6/8) Epoch 20, batch 2200, train_loss[loss=2.942, NarTop10Accuracy=0.749, over 7389.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.6953, over 6031.23 frames. ], batch size: 31, lr: 4.25e-03 +2024-08-06 18:56:01,635 INFO [trainer.py:765] (6/8) Epoch 20, batch 2300, train_loss[loss=3.22, NarTop10Accuracy=0.6881, over 5742.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6929, over 6043.12 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 18:56:26,049 INFO [trainer.py:765] (6/8) Epoch 20, batch 2400, train_loss[loss=2.964, NarTop10Accuracy=0.7405, over 5217.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.6952, over 5808.44 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:56:49,565 INFO [trainer.py:765] (6/8) Epoch 20, batch 2500, train_loss[loss=2.838, NarTop10Accuracy=0.7625, over 5166.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7032, over 5511.09 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:57:09,470 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 18:58:09,585 INFO [trainer.py:765] (6/8) Epoch 21, batch 100, train_loss[loss=3.221, NarTop10Accuracy=0.6762, over 7203.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7042, over 2369.48 frames. ], batch size: 31, lr: 4.13e-03 +2024-08-06 18:58:40,418 INFO [trainer.py:765] (6/8) Epoch 21, batch 200, train_loss[loss=2.884, NarTop10Accuracy=0.7529, over 6789.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7007, over 3864.59 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 18:59:13,333 INFO [trainer.py:765] (6/8) Epoch 21, batch 300, train_loss[loss=2.918, NarTop10Accuracy=0.7531, over 7134.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.6995, over 4656.80 frames. ], batch size: 22, lr: 4.12e-03 +2024-08-06 18:59:48,151 INFO [trainer.py:765] (6/8) Epoch 21, batch 400, train_loss[loss=2.841, NarTop10Accuracy=0.7587, over 5127.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7028, over 5113.81 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 19:00:16,840 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 19:00:25,075 INFO [trainer.py:811] (6/8) Epoch 21, validation: loss=2.992, NarTop10Accuracy=0.7268, over 1905321.00 frames. +2024-08-06 19:00:25,076 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 19:00:25,622 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.071e+02 2.224e+02 2.387e+02 3.839e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 19:00:29,890 INFO [trainer.py:765] (6/8) Epoch 21, batch 500, train_loss[loss=2.841, NarTop10Accuracy=0.7647, over 6039.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7017, over 5377.83 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 19:01:03,328 INFO [trainer.py:765] (6/8) Epoch 21, batch 600, train_loss[loss=3.32, NarTop10Accuracy=0.6553, over 5715.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.7051, over 5646.25 frames. ], batch size: 9, lr: 4.11e-03 +2024-08-06 19:01:39,388 INFO [trainer.py:765] (6/8) Epoch 21, batch 700, train_loss[loss=2.75, NarTop10Accuracy=0.7799, over 4326.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7029, over 5709.10 frames. ], batch size: 5, lr: 4.10e-03 +2024-08-06 19:02:18,047 INFO [trainer.py:765] (6/8) Epoch 21, batch 800, train_loss[loss=2.891, NarTop10Accuracy=0.7406, over 5163.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7006, over 5789.42 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 19:02:48,663 INFO [trainer.py:765] (6/8) Epoch 21, batch 900, train_loss[loss=3.034, NarTop10Accuracy=0.721, over 6327.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7002, over 5789.98 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 19:03:25,801 INFO [trainer.py:765] (6/8) Epoch 21, batch 1000, train_loss[loss=2.902, NarTop10Accuracy=0.7489, over 6855.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6985, over 5895.25 frames. ], batch size: 14, lr: 4.09e-03 +2024-08-06 19:04:07,206 INFO [trainer.py:765] (6/8) Epoch 21, batch 1100, train_loss[loss=3.507, NarTop10Accuracy=0.6113, over 6732.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6953, over 5932.83 frames. ], batch size: 17, lr: 4.09e-03 +2024-08-06 19:04:38,462 INFO [trainer.py:765] (6/8) Epoch 21, batch 1200, train_loss[loss=3.206, NarTop10Accuracy=0.6817, over 7077.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6986, over 5902.83 frames. ], batch size: 31, lr: 4.08e-03 +2024-08-06 19:05:15,315 INFO [trainer.py:765] (6/8) Epoch 21, batch 1300, train_loss[loss=3.025, NarTop10Accuracy=0.7232, over 4293.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7026, over 5981.63 frames. ], batch size: 5, lr: 4.08e-03 +2024-08-06 19:05:55,559 INFO [trainer.py:765] (6/8) Epoch 21, batch 1400, train_loss[loss=3.439, NarTop10Accuracy=0.6368, over 6231.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7026, over 6003.08 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 19:06:23,599 INFO [trainer.py:765] (6/8) Epoch 21, batch 1500, train_loss[loss=3.308, NarTop10Accuracy=0.665, over 5868.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.6998, over 5934.08 frames. ], batch size: 50, lr: 4.07e-03 +2024-08-06 19:06:51,461 INFO [trainer.py:765] (6/8) Epoch 21, batch 1600, train_loss[loss=2.933, NarTop10Accuracy=0.7315, over 6888.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7005, over 5945.46 frames. ], batch size: 22, lr: 4.07e-03 +2024-08-06 19:07:18,211 INFO [trainer.py:765] (6/8) Epoch 21, batch 1700, train_loss[loss=3.191, NarTop10Accuracy=0.6856, over 6258.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.6992, over 5918.00 frames. ], batch size: 13, lr: 4.06e-03 +2024-08-06 19:07:44,809 INFO [trainer.py:765] (6/8) Epoch 21, batch 1800, train_loss[loss=2.96, NarTop10Accuracy=0.7417, over 7203.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7002, over 5963.12 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 19:08:11,369 INFO [trainer.py:765] (6/8) Epoch 21, batch 1900, train_loss[loss=3.653, NarTop10Accuracy=0.5929, over 5943.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6967, over 6013.34 frames. ], batch size: 50, lr: 4.06e-03 +2024-08-06 19:08:37,105 INFO [trainer.py:765] (6/8) Epoch 21, batch 2000, train_loss[loss=3.507, NarTop10Accuracy=0.6178, over 6840.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.6983, over 6007.30 frames. ], batch size: 51, lr: 4.05e-03 +2024-08-06 19:09:02,507 INFO [trainer.py:765] (6/8) Epoch 21, batch 2100, train_loss[loss=2.751, NarTop10Accuracy=0.7796, over 3954.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6958, over 5981.18 frames. ], batch size: 4, lr: 4.05e-03 +2024-08-06 19:09:27,891 INFO [trainer.py:765] (6/8) Epoch 21, batch 2200, train_loss[loss=2.905, NarTop10Accuracy=0.7455, over 7308.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6953, over 6019.37 frames. ], batch size: 32, lr: 4.04e-03 +2024-08-06 19:09:53,222 INFO [trainer.py:765] (6/8) Epoch 21, batch 2300, train_loss[loss=3.058, NarTop10Accuracy=0.7124, over 5850.00 frames. ], tot_loss[loss=3.162, NarTop10Accuracy=0.6927, over 6029.61 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 19:10:17,596 INFO [trainer.py:765] (6/8) Epoch 21, batch 2400, train_loss[loss=3.417, NarTop10Accuracy=0.633, over 5202.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6964, over 5785.28 frames. ], batch size: 7, lr: 4.04e-03 +2024-08-06 19:10:37,229 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 19:10:45,275 INFO [trainer.py:811] (6/8) Epoch 21, validation: loss=2.971, NarTop10Accuracy=0.7316, over 1905321.00 frames. +2024-08-06 19:10:45,276 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 19:10:45,741 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.100e+02 2.242e+02 2.407e+02 6.546e+02, threshold=4.484e+02, percent-clipped=0.1 +2024-08-06 19:10:49,272 INFO [trainer.py:765] (6/8) Epoch 21, batch 2500, train_loss[loss=3.137, NarTop10Accuracy=0.6875, over 5157.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7056, over 5495.05 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 19:11:08,869 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 19:12:09,054 INFO [trainer.py:765] (6/8) Epoch 22, batch 100, train_loss[loss=2.968, NarTop10Accuracy=0.7378, over 7536.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7105, over 2355.65 frames. ], batch size: 31, lr: 3.93e-03 +2024-08-06 19:12:44,462 INFO [trainer.py:765] (6/8) Epoch 22, batch 200, train_loss[loss=3.106, NarTop10Accuracy=0.6982, over 6960.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7071, over 3859.39 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 19:13:14,533 INFO [trainer.py:765] (6/8) Epoch 22, batch 300, train_loss[loss=2.888, NarTop10Accuracy=0.7576, over 7092.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7082, over 4657.63 frames. ], batch size: 22, lr: 3.93e-03 +2024-08-06 19:13:49,229 INFO [trainer.py:765] (6/8) Epoch 22, batch 400, train_loss[loss=3.085, NarTop10Accuracy=0.7161, over 5052.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7086, over 5117.11 frames. ], batch size: 7, lr: 3.92e-03 +2024-08-06 19:14:24,850 INFO [trainer.py:765] (6/8) Epoch 22, batch 500, train_loss[loss=3.141, NarTop10Accuracy=0.6868, over 6048.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7077, over 5380.52 frames. ], batch size: 11, lr: 3.92e-03 +2024-08-06 19:14:55,701 INFO [trainer.py:765] (6/8) Epoch 22, batch 600, train_loss[loss=3.003, NarTop10Accuracy=0.7324, over 5874.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.701, over 5660.87 frames. ], batch size: 9, lr: 3.92e-03 +2024-08-06 19:15:30,867 INFO [trainer.py:765] (6/8) Epoch 22, batch 700, train_loss[loss=3.167, NarTop10Accuracy=0.6868, over 5178.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7023, over 5734.64 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 19:16:10,664 INFO [trainer.py:765] (6/8) Epoch 22, batch 800, train_loss[loss=3.021, NarTop10Accuracy=0.722, over 4983.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7029, over 5782.33 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 19:16:40,952 INFO [trainer.py:765] (6/8) Epoch 22, batch 900, train_loss[loss=2.9, NarTop10Accuracy=0.7431, over 6639.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7027, over 5796.63 frames. ], batch size: 14, lr: 3.90e-03 +2024-08-06 19:17:16,433 INFO [trainer.py:765] (6/8) Epoch 22, batch 1000, train_loss[loss=3.164, NarTop10Accuracy=0.6866, over 6144.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7042, over 5913.07 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 19:17:52,085 INFO [trainer.py:765] (6/8) Epoch 22, batch 1100, train_loss[loss=2.944, NarTop10Accuracy=0.7363, over 6699.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7023, over 5934.53 frames. ], batch size: 17, lr: 3.90e-03 +2024-08-06 19:18:25,927 INFO [trainer.py:765] (6/8) Epoch 22, batch 1200, train_loss[loss=2.967, NarTop10Accuracy=0.7328, over 7182.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.7061, over 5918.99 frames. ], batch size: 32, lr: 3.89e-03 +2024-08-06 19:19:01,253 INFO [trainer.py:765] (6/8) Epoch 22, batch 1300, train_loss[loss=2.799, NarTop10Accuracy=0.7619, over 4992.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7073, over 5970.94 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 19:19:33,317 INFO [trainer.py:765] (6/8) Epoch 22, batch 1400, train_loss[loss=2.789, NarTop10Accuracy=0.7728, over 6021.00 frames. ], tot_loss[loss=3.105, NarTop10Accuracy=0.7047, over 6006.22 frames. ], batch size: 11, lr: 3.89e-03 +2024-08-06 19:20:03,830 INFO [trainer.py:765] (6/8) Epoch 22, batch 1500, train_loss[loss=3.475, NarTop10Accuracy=0.6293, over 6225.00 frames. ], tot_loss[loss=3.105, NarTop10Accuracy=0.7047, over 5933.93 frames. ], batch size: 50, lr: 3.88e-03 +2024-08-06 19:20:31,647 INFO [trainer.py:765] (6/8) Epoch 22, batch 1600, train_loss[loss=3.112, NarTop10Accuracy=0.7034, over 7254.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7012, over 5921.12 frames. ], batch size: 23, lr: 3.88e-03 +2024-08-06 19:20:58,418 INFO [trainer.py:765] (6/8) Epoch 22, batch 1700, train_loss[loss=3.171, NarTop10Accuracy=0.691, over 6609.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7006, over 5931.48 frames. ], batch size: 14, lr: 3.88e-03 +2024-08-06 19:21:25,010 INFO [trainer.py:765] (6/8) Epoch 22, batch 1800, train_loss[loss=2.886, NarTop10Accuracy=0.7446, over 7221.00 frames. ], tot_loss[loss=3.119, NarTop10Accuracy=0.7022, over 5978.93 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 19:21:51,372 INFO [trainer.py:765] (6/8) Epoch 22, batch 1900, train_loss[loss=3.112, NarTop10Accuracy=0.7077, over 6126.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.6976, over 6025.93 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 19:21:53,110 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 19:22:01,088 INFO [trainer.py:811] (6/8) Epoch 22, validation: loss=3.009, NarTop10Accuracy=0.7241, over 1905321.00 frames. +2024-08-06 19:22:01,089 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 19:22:01,575 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.114e+02 2.276e+02 2.445e+02 4.438e+02, threshold=4.551e+02, percent-clipped=0.0 +2024-08-06 19:22:24,819 INFO [trainer.py:765] (6/8) Epoch 22, batch 2000, train_loss[loss=3.484, NarTop10Accuracy=0.628, over 6087.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7024, over 6005.83 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 19:22:50,041 INFO [trainer.py:765] (6/8) Epoch 22, batch 2100, train_loss[loss=3.193, NarTop10Accuracy=0.6837, over 3828.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7038, over 5976.59 frames. ], batch size: 4, lr: 3.86e-03 +2024-08-06 19:23:15,230 INFO [trainer.py:765] (6/8) Epoch 22, batch 2200, train_loss[loss=3.036, NarTop10Accuracy=0.7217, over 7170.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7038, over 6010.03 frames. ], batch size: 31, lr: 3.86e-03 +2024-08-06 19:23:40,315 INFO [trainer.py:765] (6/8) Epoch 22, batch 2300, train_loss[loss=3.006, NarTop10Accuracy=0.7291, over 5658.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.699, over 6025.40 frames. ], batch size: 9, lr: 3.86e-03 +2024-08-06 19:24:04,602 INFO [trainer.py:765] (6/8) Epoch 22, batch 2400, train_loss[loss=3.025, NarTop10Accuracy=0.7144, over 5094.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7015, over 5776.49 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:28,024 INFO [trainer.py:765] (6/8) Epoch 22, batch 2500, train_loss[loss=3.181, NarTop10Accuracy=0.6918, over 5025.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7055, over 5483.72 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:47,714 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 19:25:45,385 INFO [trainer.py:765] (6/8) Epoch 23, batch 100, train_loss[loss=3.065, NarTop10Accuracy=0.7105, over 7437.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.6987, over 2376.10 frames. ], batch size: 32, lr: 3.76e-03 +2024-08-06 19:26:21,309 INFO [trainer.py:765] (6/8) Epoch 23, batch 200, train_loss[loss=3.46, NarTop10Accuracy=0.6328, over 6660.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7002, over 3863.61 frames. ], batch size: 17, lr: 3.76e-03 +2024-08-06 19:26:57,603 INFO [trainer.py:765] (6/8) Epoch 23, batch 300, train_loss[loss=3.011, NarTop10Accuracy=0.7241, over 7389.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.705, over 4692.65 frames. ], batch size: 22, lr: 3.75e-03 +2024-08-06 19:27:26,541 INFO [trainer.py:765] (6/8) Epoch 23, batch 400, train_loss[loss=3.321, NarTop10Accuracy=0.6573, over 5214.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7039, over 5128.74 frames. ], batch size: 7, lr: 3.75e-03 +2024-08-06 19:27:59,713 INFO [trainer.py:765] (6/8) Epoch 23, batch 500, train_loss[loss=3.369, NarTop10Accuracy=0.6552, over 6099.00 frames. ], tot_loss[loss=3.119, NarTop10Accuracy=0.7017, over 5400.72 frames. ], batch size: 11, lr: 3.75e-03 +2024-08-06 19:28:35,883 INFO [trainer.py:765] (6/8) Epoch 23, batch 600, train_loss[loss=3.283, NarTop10Accuracy=0.6672, over 5697.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7034, over 5652.32 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 19:29:11,367 INFO [trainer.py:765] (6/8) Epoch 23, batch 700, train_loss[loss=3.114, NarTop10Accuracy=0.699, over 5148.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7069, over 5738.03 frames. ], batch size: 6, lr: 3.74e-03 +2024-08-06 19:29:43,613 INFO [trainer.py:765] (6/8) Epoch 23, batch 800, train_loss[loss=2.861, NarTop10Accuracy=0.7587, over 5043.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.7054, over 5776.26 frames. ], batch size: 6, lr: 3.74e-03 +2024-08-06 19:30:19,390 INFO [trainer.py:765] (6/8) Epoch 23, batch 900, train_loss[loss=3.361, NarTop10Accuracy=0.6574, over 6243.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7069, over 5781.48 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 19:30:58,195 INFO [trainer.py:765] (6/8) Epoch 23, batch 1000, train_loss[loss=3.058, NarTop10Accuracy=0.7115, over 6228.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7088, over 5899.85 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 19:31:31,521 INFO [trainer.py:765] (6/8) Epoch 23, batch 1100, train_loss[loss=3.078, NarTop10Accuracy=0.7176, over 6900.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7065, over 5916.46 frames. ], batch size: 17, lr: 3.73e-03 +2024-08-06 19:32:08,518 INFO [trainer.py:765] (6/8) Epoch 23, batch 1200, train_loss[loss=3.037, NarTop10Accuracy=0.7267, over 7197.00 frames. ], tot_loss[loss=3.105, NarTop10Accuracy=0.7049, over 5902.05 frames. ], batch size: 31, lr: 3.72e-03 +2024-08-06 19:32:46,937 INFO [trainer.py:765] (6/8) Epoch 23, batch 1300, train_loss[loss=3.121, NarTop10Accuracy=0.7074, over 4344.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7039, over 5975.22 frames. ], batch size: 5, lr: 3.72e-03 +2024-08-06 19:32:56,402 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 19:33:04,722 INFO [trainer.py:811] (6/8) Epoch 23, validation: loss=2.893, NarTop10Accuracy=0.7468, over 1905321.00 frames. +2024-08-06 19:33:04,723 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 19:33:05,262 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.108e+02 2.273e+02 2.457e+02 3.966e+02, threshold=4.546e+02, percent-clipped=0.0 +2024-08-06 19:33:27,407 INFO [trainer.py:765] (6/8) Epoch 23, batch 1400, train_loss[loss=2.867, NarTop10Accuracy=0.7441, over 6027.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7038, over 6000.25 frames. ], batch size: 11, lr: 3.72e-03 +2024-08-06 19:33:58,215 INFO [trainer.py:765] (6/8) Epoch 23, batch 1500, train_loss[loss=3.299, NarTop10Accuracy=0.6714, over 5784.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7066, over 5941.64 frames. ], batch size: 50, lr: 3.71e-03 +2024-08-06 19:34:26,015 INFO [trainer.py:765] (6/8) Epoch 23, batch 1600, train_loss[loss=2.945, NarTop10Accuracy=0.7356, over 7281.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.7051, over 5925.40 frames. ], batch size: 23, lr: 3.71e-03 +2024-08-06 19:34:52,783 INFO [trainer.py:765] (6/8) Epoch 23, batch 1700, train_loss[loss=3.352, NarTop10Accuracy=0.6497, over 6612.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7005, over 5910.92 frames. ], batch size: 14, lr: 3.71e-03 +2024-08-06 19:35:19,261 INFO [trainer.py:765] (6/8) Epoch 23, batch 1800, train_loss[loss=2.932, NarTop10Accuracy=0.7414, over 6924.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.7019, over 5973.38 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 19:35:45,596 INFO [trainer.py:765] (6/8) Epoch 23, batch 1900, train_loss[loss=3.478, NarTop10Accuracy=0.6306, over 6060.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.7008, over 6025.66 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 19:36:11,170 INFO [trainer.py:765] (6/8) Epoch 23, batch 2000, train_loss[loss=3.532, NarTop10Accuracy=0.6103, over 5874.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7028, over 6007.95 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 19:36:36,517 INFO [trainer.py:765] (6/8) Epoch 23, batch 2100, train_loss[loss=3.432, NarTop10Accuracy=0.6384, over 4764.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7027, over 5995.10 frames. ], batch size: 5, lr: 3.69e-03 +2024-08-06 19:37:01,908 INFO [trainer.py:765] (6/8) Epoch 23, batch 2200, train_loss[loss=3.184, NarTop10Accuracy=0.6913, over 7350.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7009, over 6026.16 frames. ], batch size: 32, lr: 3.69e-03 +2024-08-06 19:37:27,060 INFO [trainer.py:765] (6/8) Epoch 23, batch 2300, train_loss[loss=2.945, NarTop10Accuracy=0.7374, over 5514.00 frames. ], tot_loss[loss=3.119, NarTop10Accuracy=0.7019, over 6035.08 frames. ], batch size: 9, lr: 3.69e-03 +2024-08-06 19:37:51,423 INFO [trainer.py:765] (6/8) Epoch 23, batch 2400, train_loss[loss=2.996, NarTop10Accuracy=0.7247, over 5073.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7022, over 5784.28 frames. ], batch size: 7, lr: 3.69e-03 +2024-08-06 19:38:15,052 INFO [trainer.py:765] (6/8) Epoch 23, batch 2500, train_loss[loss=3.309, NarTop10Accuracy=0.657, over 5085.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7064, over 5480.23 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 19:38:34,908 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 19:39:37,631 INFO [trainer.py:765] (6/8) Epoch 24, batch 100, train_loss[loss=3.55, NarTop10Accuracy=0.6207, over 7167.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7002, over 2344.29 frames. ], batch size: 31, lr: 3.60e-03 +2024-08-06 19:40:10,189 INFO [trainer.py:765] (6/8) Epoch 24, batch 200, train_loss[loss=2.893, NarTop10Accuracy=0.7558, over 6852.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7063, over 3839.35 frames. ], batch size: 17, lr: 3.60e-03 +2024-08-06 19:40:40,555 INFO [trainer.py:765] (6/8) Epoch 24, batch 300, train_loss[loss=2.824, NarTop10Accuracy=0.7598, over 6777.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7051, over 4639.04 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 19:41:18,233 INFO [trainer.py:765] (6/8) Epoch 24, batch 400, train_loss[loss=3.009, NarTop10Accuracy=0.7219, over 5181.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7063, over 5077.06 frames. ], batch size: 7, lr: 3.59e-03 +2024-08-06 19:41:50,322 INFO [trainer.py:765] (6/8) Epoch 24, batch 500, train_loss[loss=2.911, NarTop10Accuracy=0.7343, over 6105.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.708, over 5369.99 frames. ], batch size: 11, lr: 3.59e-03 +2024-08-06 19:42:21,451 INFO [trainer.py:765] (6/8) Epoch 24, batch 600, train_loss[loss=2.81, NarTop10Accuracy=0.7749, over 5664.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7073, over 5633.81 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 19:42:52,843 INFO [trainer.py:765] (6/8) Epoch 24, batch 700, train_loss[loss=2.848, NarTop10Accuracy=0.7608, over 5109.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7068, over 5708.99 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 19:43:17,380 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 19:43:25,410 INFO [trainer.py:811] (6/8) Epoch 24, validation: loss=3.021, NarTop10Accuracy=0.7204, over 1905321.00 frames. +2024-08-06 19:43:25,411 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 19:43:28,562 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.113e+02 2.282e+02 2.472e+02 2.357e+03, threshold=4.564e+02, percent-clipped=0.2 +2024-08-06 19:43:40,813 INFO [trainer.py:765] (6/8) Epoch 24, batch 800, train_loss[loss=2.784, NarTop10Accuracy=0.7652, over 5073.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7077, over 5782.14 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 19:44:11,409 INFO [trainer.py:765] (6/8) Epoch 24, batch 900, train_loss[loss=2.901, NarTop10Accuracy=0.7487, over 6201.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7101, over 5800.73 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 19:44:47,488 INFO [trainer.py:765] (6/8) Epoch 24, batch 1000, train_loss[loss=3.231, NarTop10Accuracy=0.6835, over 6654.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7076, over 5902.13 frames. ], batch size: 14, lr: 3.57e-03 +2024-08-06 19:45:27,106 INFO [trainer.py:765] (6/8) Epoch 24, batch 1100, train_loss[loss=3.5, NarTop10Accuracy=0.6178, over 6636.00 frames. ], tot_loss[loss=3.105, NarTop10Accuracy=0.7047, over 5929.10 frames. ], batch size: 17, lr: 3.57e-03 +2024-08-06 19:45:58,436 INFO [trainer.py:765] (6/8) Epoch 24, batch 1200, train_loss[loss=3.144, NarTop10Accuracy=0.6946, over 7164.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7051, over 5928.26 frames. ], batch size: 31, lr: 3.57e-03 +2024-08-06 19:46:30,293 INFO [trainer.py:765] (6/8) Epoch 24, batch 1300, train_loss[loss=3.383, NarTop10Accuracy=0.6455, over 5232.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7059, over 6000.25 frames. ], batch size: 6, lr: 3.56e-03 +2024-08-06 19:47:07,858 INFO [trainer.py:765] (6/8) Epoch 24, batch 1400, train_loss[loss=3.303, NarTop10Accuracy=0.6679, over 5907.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7034, over 6015.10 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 19:47:40,956 INFO [trainer.py:765] (6/8) Epoch 24, batch 1500, train_loss[loss=3.435, NarTop10Accuracy=0.6376, over 6624.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7008, over 5954.94 frames. ], batch size: 50, lr: 3.56e-03 +2024-08-06 19:48:08,675 INFO [trainer.py:765] (6/8) Epoch 24, batch 1600, train_loss[loss=3.345, NarTop10Accuracy=0.6473, over 7020.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7008, over 5937.17 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:48:35,266 INFO [trainer.py:765] (6/8) Epoch 24, batch 1700, train_loss[loss=2.919, NarTop10Accuracy=0.746, over 6276.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7002, over 5911.97 frames. ], batch size: 13, lr: 3.55e-03 +2024-08-06 19:49:01,636 INFO [trainer.py:765] (6/8) Epoch 24, batch 1800, train_loss[loss=2.996, NarTop10Accuracy=0.726, over 7215.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.6984, over 5981.81 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:49:28,041 INFO [trainer.py:765] (6/8) Epoch 24, batch 1900, train_loss[loss=3.566, NarTop10Accuracy=0.6087, over 5841.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6964, over 6030.17 frames. ], batch size: 50, lr: 3.55e-03 +2024-08-06 19:49:53,533 INFO [trainer.py:765] (6/8) Epoch 24, batch 2000, train_loss[loss=3.572, NarTop10Accuracy=0.6081, over 6033.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.7016, over 5999.19 frames. ], batch size: 50, lr: 3.54e-03 +2024-08-06 19:50:18,819 INFO [trainer.py:765] (6/8) Epoch 24, batch 2100, train_loss[loss=2.86, NarTop10Accuracy=0.7567, over 4911.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7023, over 5985.78 frames. ], batch size: 5, lr: 3.54e-03 +2024-08-06 19:50:43,942 INFO [trainer.py:765] (6/8) Epoch 24, batch 2200, train_loss[loss=3.435, NarTop10Accuracy=0.6321, over 7095.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7029, over 6015.62 frames. ], batch size: 31, lr: 3.54e-03 +2024-08-06 19:51:09,024 INFO [trainer.py:765] (6/8) Epoch 24, batch 2300, train_loss[loss=2.936, NarTop10Accuracy=0.7392, over 5805.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7032, over 6026.72 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 19:51:33,349 INFO [trainer.py:765] (6/8) Epoch 24, batch 2400, train_loss[loss=2.918, NarTop10Accuracy=0.7286, over 5094.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.7058, over 5774.79 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 19:51:56,782 INFO [trainer.py:765] (6/8) Epoch 24, batch 2500, train_loss[loss=2.932, NarTop10Accuracy=0.745, over 5250.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7099, over 5457.17 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 19:52:17,335 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 19:53:22,197 INFO [trainer.py:765] (6/8) Epoch 25, batch 100, train_loss[loss=3.369, NarTop10Accuracy=0.6484, over 7113.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7093, over 2372.88 frames. ], batch size: 31, lr: 3.45e-03 +2024-08-06 19:53:47,262 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 19:53:55,329 INFO [trainer.py:811] (6/8) Epoch 25, validation: loss=2.96, NarTop10Accuracy=0.7332, over 1905321.00 frames. +2024-08-06 19:53:55,329 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 19:53:55,916 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.155e+02 2.306e+02 2.475e+02 6.485e+02, threshold=4.611e+02, percent-clipped=0.1 +2024-08-06 19:54:01,177 INFO [trainer.py:765] (6/8) Epoch 25, batch 200, train_loss[loss=2.785, NarTop10Accuracy=0.7664, over 6879.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7083, over 3848.76 frames. ], batch size: 17, lr: 3.45e-03 +2024-08-06 19:54:35,647 INFO [trainer.py:765] (6/8) Epoch 25, batch 300, train_loss[loss=3.232, NarTop10Accuracy=0.6806, over 7137.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7093, over 4650.33 frames. ], batch size: 22, lr: 3.45e-03 +2024-08-06 19:55:12,958 INFO [trainer.py:765] (6/8) Epoch 25, batch 400, train_loss[loss=2.932, NarTop10Accuracy=0.7453, over 5034.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7081, over 5123.77 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 19:55:43,738 INFO [trainer.py:765] (6/8) Epoch 25, batch 500, train_loss[loss=2.844, NarTop10Accuracy=0.7591, over 6096.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7092, over 5399.63 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 19:56:14,814 INFO [trainer.py:765] (6/8) Epoch 25, batch 600, train_loss[loss=2.943, NarTop10Accuracy=0.7421, over 5739.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7081, over 5667.96 frames. ], batch size: 9, lr: 3.44e-03 +2024-08-06 19:56:55,496 INFO [trainer.py:765] (6/8) Epoch 25, batch 700, train_loss[loss=2.753, NarTop10Accuracy=0.7794, over 5085.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7095, over 5743.59 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 19:57:30,136 INFO [trainer.py:765] (6/8) Epoch 25, batch 800, train_loss[loss=3.017, NarTop10Accuracy=0.7275, over 4983.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7083, over 5793.21 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 19:58:00,679 INFO [trainer.py:765] (6/8) Epoch 25, batch 900, train_loss[loss=3.096, NarTop10Accuracy=0.7174, over 6174.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.711, over 5814.12 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 19:58:37,640 INFO [trainer.py:765] (6/8) Epoch 25, batch 1000, train_loss[loss=2.793, NarTop10Accuracy=0.7697, over 6408.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7074, over 5915.86 frames. ], batch size: 14, lr: 3.43e-03 +2024-08-06 19:59:14,855 INFO [trainer.py:765] (6/8) Epoch 25, batch 1100, train_loss[loss=3.379, NarTop10Accuracy=0.642, over 6906.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7061, over 5955.72 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 19:59:49,039 INFO [trainer.py:765] (6/8) Epoch 25, batch 1200, train_loss[loss=3.483, NarTop10Accuracy=0.6283, over 7362.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7067, over 5943.48 frames. ], batch size: 32, lr: 3.42e-03 +2024-08-06 20:00:25,598 INFO [trainer.py:765] (6/8) Epoch 25, batch 1300, train_loss[loss=2.817, NarTop10Accuracy=0.7559, over 4995.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7088, over 6010.06 frames. ], batch size: 6, lr: 3.42e-03 +2024-08-06 20:01:02,016 INFO [trainer.py:765] (6/8) Epoch 25, batch 1400, train_loss[loss=3.004, NarTop10Accuracy=0.727, over 6189.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7089, over 6035.26 frames. ], batch size: 11, lr: 3.42e-03 +2024-08-06 20:01:32,823 INFO [trainer.py:765] (6/8) Epoch 25, batch 1500, train_loss[loss=3.267, NarTop10Accuracy=0.6664, over 6207.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7074, over 5977.65 frames. ], batch size: 50, lr: 3.41e-03 +2024-08-06 20:02:00,625 INFO [trainer.py:765] (6/8) Epoch 25, batch 1600, train_loss[loss=2.911, NarTop10Accuracy=0.7489, over 7026.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7081, over 5934.27 frames. ], batch size: 22, lr: 3.41e-03 +2024-08-06 20:02:27,360 INFO [trainer.py:765] (6/8) Epoch 25, batch 1700, train_loss[loss=3.105, NarTop10Accuracy=0.7037, over 6525.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.709, over 5912.70 frames. ], batch size: 14, lr: 3.41e-03 +2024-08-06 20:02:53,854 INFO [trainer.py:765] (6/8) Epoch 25, batch 1800, train_loss[loss=3.302, NarTop10Accuracy=0.6596, over 6927.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.705, over 5986.47 frames. ], batch size: 22, lr: 3.40e-03 +2024-08-06 20:03:20,340 INFO [trainer.py:765] (6/8) Epoch 25, batch 1900, train_loss[loss=3.309, NarTop10Accuracy=0.6691, over 6057.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.702, over 6024.94 frames. ], batch size: 51, lr: 3.40e-03 +2024-08-06 20:03:45,935 INFO [trainer.py:765] (6/8) Epoch 25, batch 2000, train_loss[loss=3.571, NarTop10Accuracy=0.6123, over 5847.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7006, over 5996.92 frames. ], batch size: 53, lr: 3.40e-03 +2024-08-06 20:04:11,245 INFO [trainer.py:765] (6/8) Epoch 25, batch 2100, train_loss[loss=2.719, NarTop10Accuracy=0.7859, over 4800.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7041, over 5969.48 frames. ], batch size: 5, lr: 3.40e-03 +2024-08-06 20:04:31,409 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 20:04:39,343 INFO [trainer.py:811] (6/8) Epoch 25, validation: loss=2.999, NarTop10Accuracy=0.7251, over 1905321.00 frames. +2024-08-06 20:04:39,344 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 20:04:39,840 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.185e+02 2.339e+02 2.507e+02 3.640e+02, threshold=4.678e+02, percent-clipped=0.0 +2024-08-06 20:04:44,513 INFO [trainer.py:765] (6/8) Epoch 25, batch 2200, train_loss[loss=3.283, NarTop10Accuracy=0.6682, over 7047.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7028, over 6000.93 frames. ], batch size: 31, lr: 3.39e-03 +2024-08-06 20:05:09,645 INFO [trainer.py:765] (6/8) Epoch 25, batch 2300, train_loss[loss=3.074, NarTop10Accuracy=0.7043, over 5775.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7031, over 6016.67 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 20:05:34,141 INFO [trainer.py:765] (6/8) Epoch 25, batch 2400, train_loss[loss=2.857, NarTop10Accuracy=0.7551, over 5184.00 frames. ], tot_loss[loss=3.104, NarTop10Accuracy=0.7048, over 5797.11 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:05:57,846 INFO [trainer.py:765] (6/8) Epoch 25, batch 2500, train_loss[loss=2.834, NarTop10Accuracy=0.7526, over 5598.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7111, over 5482.68 frames. ], batch size: 8, lr: 3.39e-03 +2024-08-06 20:06:17,612 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 20:07:19,305 INFO [trainer.py:765] (6/8) Epoch 26, batch 100, train_loss[loss=3.051, NarTop10Accuracy=0.716, over 7404.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7097, over 2364.35 frames. ], batch size: 32, lr: 3.32e-03 +2024-08-06 20:07:52,383 INFO [trainer.py:765] (6/8) Epoch 26, batch 200, train_loss[loss=2.884, NarTop10Accuracy=0.7564, over 6735.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7073, over 3860.68 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 20:08:24,734 INFO [trainer.py:765] (6/8) Epoch 26, batch 300, train_loss[loss=3.004, NarTop10Accuracy=0.7263, over 7062.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7077, over 4640.59 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 20:08:58,184 INFO [trainer.py:765] (6/8) Epoch 26, batch 400, train_loss[loss=2.803, NarTop10Accuracy=0.7551, over 5052.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7083, over 5071.95 frames. ], batch size: 7, lr: 3.31e-03 +2024-08-06 20:09:33,147 INFO [trainer.py:765] (6/8) Epoch 26, batch 500, train_loss[loss=2.781, NarTop10Accuracy=0.7788, over 6111.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7062, over 5360.39 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 20:10:03,891 INFO [trainer.py:765] (6/8) Epoch 26, batch 600, train_loss[loss=2.692, NarTop10Accuracy=0.7857, over 5709.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7122, over 5634.89 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 20:10:39,872 INFO [trainer.py:765] (6/8) Epoch 26, batch 700, train_loss[loss=3.177, NarTop10Accuracy=0.6877, over 4305.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7088, over 5717.02 frames. ], batch size: 5, lr: 3.30e-03 +2024-08-06 20:11:19,061 INFO [trainer.py:765] (6/8) Epoch 26, batch 800, train_loss[loss=3.086, NarTop10Accuracy=0.7135, over 5043.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7091, over 5768.77 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 20:11:49,315 INFO [trainer.py:765] (6/8) Epoch 26, batch 900, train_loss[loss=2.804, NarTop10Accuracy=0.7743, over 6657.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7097, over 5783.55 frames. ], batch size: 14, lr: 3.29e-03 +2024-08-06 20:12:25,973 INFO [trainer.py:765] (6/8) Epoch 26, batch 1000, train_loss[loss=2.812, NarTop10Accuracy=0.7606, over 6588.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7079, over 5884.72 frames. ], batch size: 14, lr: 3.29e-03 +2024-08-06 20:13:06,377 INFO [trainer.py:765] (6/8) Epoch 26, batch 1100, train_loss[loss=3.404, NarTop10Accuracy=0.6386, over 6837.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7063, over 5911.62 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 20:13:37,536 INFO [trainer.py:765] (6/8) Epoch 26, batch 1200, train_loss[loss=3.292, NarTop10Accuracy=0.6631, over 7479.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7087, over 5910.80 frames. ], batch size: 32, lr: 3.29e-03 +2024-08-06 20:14:13,696 INFO [trainer.py:765] (6/8) Epoch 26, batch 1300, train_loss[loss=2.793, NarTop10Accuracy=0.7707, over 4239.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7092, over 5984.59 frames. ], batch size: 5, lr: 3.28e-03 +2024-08-06 20:14:50,538 INFO [trainer.py:765] (6/8) Epoch 26, batch 1400, train_loss[loss=2.882, NarTop10Accuracy=0.7489, over 6081.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7081, over 5992.57 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 20:15:21,155 INFO [trainer.py:765] (6/8) Epoch 26, batch 1500, train_loss[loss=3.127, NarTop10Accuracy=0.6934, over 5805.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7077, over 5934.99 frames. ], batch size: 51, lr: 3.28e-03 +2024-08-06 20:15:48,980 INFO [trainer.py:765] (6/8) Epoch 26, batch 1600, train_loss[loss=2.89, NarTop10Accuracy=0.7496, over 6867.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.71, over 5938.37 frames. ], batch size: 22, lr: 3.28e-03 +2024-08-06 20:15:50,002 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 20:15:58,239 INFO [trainer.py:811] (6/8) Epoch 26, validation: loss=2.899, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 20:15:58,240 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 20:15:58,779 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.166e+02 2.322e+02 2.511e+02 3.952e+02, threshold=4.644e+02, percent-clipped=0.0 +2024-08-06 20:16:23,952 INFO [trainer.py:765] (6/8) Epoch 26, batch 1700, train_loss[loss=3.046, NarTop10Accuracy=0.7041, over 6783.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7133, over 5920.58 frames. ], batch size: 14, lr: 3.28e-03 +2024-08-06 20:16:50,427 INFO [trainer.py:765] (6/8) Epoch 26, batch 1800, train_loss[loss=2.898, NarTop10Accuracy=0.756, over 7173.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7116, over 5966.40 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 20:17:16,841 INFO [trainer.py:765] (6/8) Epoch 26, batch 1900, train_loss[loss=3.086, NarTop10Accuracy=0.7175, over 6243.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7092, over 6011.74 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 20:17:42,380 INFO [trainer.py:765] (6/8) Epoch 26, batch 2000, train_loss[loss=3.616, NarTop10Accuracy=0.6001, over 6021.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7083, over 6015.95 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 20:18:07,563 INFO [trainer.py:765] (6/8) Epoch 26, batch 2100, train_loss[loss=3.133, NarTop10Accuracy=0.6959, over 3909.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7078, over 5995.91 frames. ], batch size: 4, lr: 3.27e-03 +2024-08-06 20:18:32,777 INFO [trainer.py:765] (6/8) Epoch 26, batch 2200, train_loss[loss=2.812, NarTop10Accuracy=0.7604, over 7332.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7082, over 6030.98 frames. ], batch size: 31, lr: 3.26e-03 +2024-08-06 20:18:57,898 INFO [trainer.py:765] (6/8) Epoch 26, batch 2300, train_loss[loss=3.19, NarTop10Accuracy=0.689, over 5658.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7067, over 6032.28 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 20:19:22,206 INFO [trainer.py:765] (6/8) Epoch 26, batch 2400, train_loss[loss=2.817, NarTop10Accuracy=0.7637, over 5286.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7113, over 5777.91 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:19:45,652 INFO [trainer.py:765] (6/8) Epoch 26, batch 2500, train_loss[loss=2.796, NarTop10Accuracy=0.7803, over 5115.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.716, over 5484.08 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:20:05,933 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 20:21:04,873 INFO [trainer.py:765] (6/8) Epoch 27, batch 100, train_loss[loss=3.22, NarTop10Accuracy=0.6802, over 7233.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7129, over 2378.52 frames. ], batch size: 31, lr: 3.19e-03 +2024-08-06 20:21:39,783 INFO [trainer.py:765] (6/8) Epoch 27, batch 200, train_loss[loss=2.719, NarTop10Accuracy=0.7848, over 6813.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7094, over 3866.55 frames. ], batch size: 17, lr: 3.19e-03 +2024-08-06 20:22:13,049 INFO [trainer.py:765] (6/8) Epoch 27, batch 300, train_loss[loss=2.746, NarTop10Accuracy=0.7672, over 7131.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7107, over 4659.53 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 20:22:43,557 INFO [trainer.py:765] (6/8) Epoch 27, batch 400, train_loss[loss=2.875, NarTop10Accuracy=0.7551, over 5103.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.712, over 5106.96 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 20:23:18,083 INFO [trainer.py:765] (6/8) Epoch 27, batch 500, train_loss[loss=2.779, NarTop10Accuracy=0.7763, over 6078.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7147, over 5369.74 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 20:23:51,435 INFO [trainer.py:765] (6/8) Epoch 27, batch 600, train_loss[loss=3.266, NarTop10Accuracy=0.6664, over 5766.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7149, over 5642.91 frames. ], batch size: 9, lr: 3.18e-03 +2024-08-06 20:24:24,975 INFO [trainer.py:765] (6/8) Epoch 27, batch 700, train_loss[loss=2.874, NarTop10Accuracy=0.7603, over 4242.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7153, over 5724.40 frames. ], batch size: 5, lr: 3.18e-03 +2024-08-06 20:25:03,407 INFO [trainer.py:765] (6/8) Epoch 27, batch 800, train_loss[loss=3.209, NarTop10Accuracy=0.6722, over 4224.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7111, over 5785.22 frames. ], batch size: 5, lr: 3.17e-03 +2024-08-06 20:25:34,176 INFO [trainer.py:765] (6/8) Epoch 27, batch 900, train_loss[loss=3.334, NarTop10Accuracy=0.6517, over 6210.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7112, over 5804.61 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 20:26:10,097 INFO [trainer.py:765] (6/8) Epoch 27, batch 1000, train_loss[loss=2.825, NarTop10Accuracy=0.7656, over 6327.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7109, over 5901.53 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 20:26:18,314 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 20:26:26,346 INFO [trainer.py:811] (6/8) Epoch 27, validation: loss=2.95, NarTop10Accuracy=0.735, over 1905321.00 frames. +2024-08-06 20:26:26,347 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 20:26:26,877 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.166e+02 2.331e+02 2.512e+02 4.284e+02, threshold=4.663e+02, percent-clipped=0.0 +2024-08-06 20:26:50,900 INFO [trainer.py:765] (6/8) Epoch 27, batch 1100, train_loss[loss=2.994, NarTop10Accuracy=0.7358, over 6735.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7098, over 5933.57 frames. ], batch size: 17, lr: 3.17e-03 +2024-08-06 20:27:24,545 INFO [trainer.py:765] (6/8) Epoch 27, batch 1200, train_loss[loss=2.765, NarTop10Accuracy=0.7743, over 7473.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7119, over 5930.91 frames. ], batch size: 32, lr: 3.16e-03 +2024-08-06 20:27:58,568 INFO [trainer.py:765] (6/8) Epoch 27, batch 1300, train_loss[loss=2.79, NarTop10Accuracy=0.766, over 5004.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7138, over 5994.60 frames. ], batch size: 6, lr: 3.16e-03 +2024-08-06 20:28:36,745 INFO [trainer.py:765] (6/8) Epoch 27, batch 1400, train_loss[loss=3.423, NarTop10Accuracy=0.6415, over 6129.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7086, over 5996.43 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 20:29:04,632 INFO [trainer.py:765] (6/8) Epoch 27, batch 1500, train_loss[loss=3.047, NarTop10Accuracy=0.7164, over 5667.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7101, over 5945.94 frames. ], batch size: 50, lr: 3.16e-03 +2024-08-06 20:29:32,362 INFO [trainer.py:765] (6/8) Epoch 27, batch 1600, train_loss[loss=2.971, NarTop10Accuracy=0.7322, over 7017.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7074, over 5933.65 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:29:58,978 INFO [trainer.py:765] (6/8) Epoch 27, batch 1700, train_loss[loss=3.094, NarTop10Accuracy=0.6934, over 6075.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7091, over 5914.62 frames. ], batch size: 13, lr: 3.15e-03 +2024-08-06 20:30:25,463 INFO [trainer.py:765] (6/8) Epoch 27, batch 1800, train_loss[loss=3.393, NarTop10Accuracy=0.6504, over 7206.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.709, over 5981.79 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:30:51,845 INFO [trainer.py:765] (6/8) Epoch 27, batch 1900, train_loss[loss=3.132, NarTop10Accuracy=0.7057, over 5817.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7103, over 6025.29 frames. ], batch size: 51, lr: 3.15e-03 +2024-08-06 20:31:17,390 INFO [trainer.py:765] (6/8) Epoch 27, batch 2000, train_loss[loss=3.117, NarTop10Accuracy=0.7012, over 5589.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7131, over 5977.33 frames. ], batch size: 50, lr: 3.15e-03 +2024-08-06 20:31:42,659 INFO [trainer.py:765] (6/8) Epoch 27, batch 2100, train_loss[loss=2.719, NarTop10Accuracy=0.7765, over 4059.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7116, over 5972.37 frames. ], batch size: 4, lr: 3.14e-03 +2024-08-06 20:32:07,804 INFO [trainer.py:765] (6/8) Epoch 27, batch 2200, train_loss[loss=3.508, NarTop10Accuracy=0.6236, over 6978.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7085, over 6013.50 frames. ], batch size: 31, lr: 3.14e-03 +2024-08-06 20:32:32,942 INFO [trainer.py:765] (6/8) Epoch 27, batch 2300, train_loss[loss=2.951, NarTop10Accuracy=0.7349, over 5745.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7084, over 6040.84 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 20:32:57,246 INFO [trainer.py:765] (6/8) Epoch 27, batch 2400, train_loss[loss=2.711, NarTop10Accuracy=0.7901, over 5274.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7072, over 5785.21 frames. ], batch size: 7, lr: 3.14e-03 +2024-08-06 20:33:20,615 INFO [trainer.py:765] (6/8) Epoch 27, batch 2500, train_loss[loss=3.436, NarTop10Accuracy=0.6325, over 5118.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7131, over 5478.75 frames. ], batch size: 7, lr: 3.13e-03 +2024-08-06 20:33:40,788 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 20:34:35,827 INFO [trainer.py:765] (6/8) Epoch 28, batch 100, train_loss[loss=2.826, NarTop10Accuracy=0.7565, over 7236.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7079, over 2366.88 frames. ], batch size: 31, lr: 3.07e-03 +2024-08-06 20:35:07,392 INFO [trainer.py:765] (6/8) Epoch 28, batch 200, train_loss[loss=2.762, NarTop10Accuracy=0.7737, over 6735.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7072, over 3861.86 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 20:35:45,421 INFO [trainer.py:765] (6/8) Epoch 28, batch 300, train_loss[loss=3.045, NarTop10Accuracy=0.7087, over 7062.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7102, over 4672.01 frames. ], batch size: 22, lr: 3.07e-03 +2024-08-06 20:36:15,863 INFO [trainer.py:765] (6/8) Epoch 28, batch 400, train_loss[loss=3.171, NarTop10Accuracy=0.6817, over 5229.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7072, over 5123.82 frames. ], batch size: 7, lr: 3.07e-03 +2024-08-06 20:36:32,405 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 20:36:40,530 INFO [trainer.py:811] (6/8) Epoch 28, validation: loss=2.963, NarTop10Accuracy=0.7327, over 1905321.00 frames. +2024-08-06 20:36:40,531 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 20:36:41,102 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.179e+02 2.348e+02 2.536e+02 3.573e+02, threshold=4.696e+02, percent-clipped=0.0 +2024-08-06 20:36:56,663 INFO [trainer.py:765] (6/8) Epoch 28, batch 500, train_loss[loss=3.231, NarTop10Accuracy=0.6767, over 6075.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7105, over 5389.65 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 20:37:29,462 INFO [trainer.py:765] (6/8) Epoch 28, batch 600, train_loss[loss=2.934, NarTop10Accuracy=0.7444, over 5628.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7105, over 5649.65 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 20:38:08,890 INFO [trainer.py:765] (6/8) Epoch 28, batch 700, train_loss[loss=3.057, NarTop10Accuracy=0.7063, over 5085.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7087, over 5732.38 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 20:38:42,488 INFO [trainer.py:765] (6/8) Epoch 28, batch 800, train_loss[loss=2.925, NarTop10Accuracy=0.7554, over 4293.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7126, over 5789.33 frames. ], batch size: 5, lr: 3.06e-03 +2024-08-06 20:39:15,506 INFO [trainer.py:765] (6/8) Epoch 28, batch 900, train_loss[loss=3.29, NarTop10Accuracy=0.6706, over 6735.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7137, over 5803.37 frames. ], batch size: 14, lr: 3.06e-03 +2024-08-06 20:39:53,239 INFO [trainer.py:765] (6/8) Epoch 28, batch 1000, train_loss[loss=3.121, NarTop10Accuracy=0.6915, over 6252.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7135, over 5893.74 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 20:40:25,866 INFO [trainer.py:765] (6/8) Epoch 28, batch 1100, train_loss[loss=2.831, NarTop10Accuracy=0.7604, over 6900.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7109, over 5906.12 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 20:40:59,418 INFO [trainer.py:765] (6/8) Epoch 28, batch 1200, train_loss[loss=3.307, NarTop10Accuracy=0.6622, over 7053.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7102, over 5904.08 frames. ], batch size: 31, lr: 3.05e-03 +2024-08-06 20:41:38,680 INFO [trainer.py:765] (6/8) Epoch 28, batch 1300, train_loss[loss=2.955, NarTop10Accuracy=0.7242, over 5034.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7102, over 5960.69 frames. ], batch size: 6, lr: 3.05e-03 +2024-08-06 20:42:13,047 INFO [trainer.py:765] (6/8) Epoch 28, batch 1400, train_loss[loss=2.982, NarTop10Accuracy=0.7278, over 5958.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7087, over 6007.00 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 20:42:43,170 INFO [trainer.py:765] (6/8) Epoch 28, batch 1500, train_loss[loss=3.442, NarTop10Accuracy=0.6357, over 6003.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.71, over 5954.05 frames. ], batch size: 50, lr: 3.04e-03 +2024-08-06 20:43:11,080 INFO [trainer.py:765] (6/8) Epoch 28, batch 1600, train_loss[loss=2.825, NarTop10Accuracy=0.7525, over 7284.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7095, over 5926.05 frames. ], batch size: 23, lr: 3.04e-03 +2024-08-06 20:43:37,785 INFO [trainer.py:765] (6/8) Epoch 28, batch 1700, train_loss[loss=2.878, NarTop10Accuracy=0.7424, over 6540.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7083, over 5901.23 frames. ], batch size: 14, lr: 3.04e-03 +2024-08-06 20:44:04,325 INFO [trainer.py:765] (6/8) Epoch 28, batch 1800, train_loss[loss=2.919, NarTop10Accuracy=0.7419, over 6975.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.709, over 5958.74 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 20:44:30,756 INFO [trainer.py:765] (6/8) Epoch 28, batch 1900, train_loss[loss=3.201, NarTop10Accuracy=0.6889, over 6141.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7096, over 6014.14 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 20:44:56,328 INFO [trainer.py:765] (6/8) Epoch 28, batch 2000, train_loss[loss=2.934, NarTop10Accuracy=0.7389, over 6204.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7133, over 5997.62 frames. ], batch size: 51, lr: 3.03e-03 +2024-08-06 20:45:21,650 INFO [trainer.py:765] (6/8) Epoch 28, batch 2100, train_loss[loss=2.715, NarTop10Accuracy=0.7917, over 4869.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7147, over 5968.68 frames. ], batch size: 5, lr: 3.03e-03 +2024-08-06 20:45:47,077 INFO [trainer.py:765] (6/8) Epoch 28, batch 2200, train_loss[loss=2.901, NarTop10Accuracy=0.7515, over 7218.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7126, over 6015.17 frames. ], batch size: 31, lr: 3.03e-03 +2024-08-06 20:46:12,308 INFO [trainer.py:765] (6/8) Epoch 28, batch 2300, train_loss[loss=3.351, NarTop10Accuracy=0.6619, over 5703.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7088, over 6025.59 frames. ], batch size: 9, lr: 3.03e-03 +2024-08-06 20:46:36,807 INFO [trainer.py:765] (6/8) Epoch 28, batch 2400, train_loss[loss=3.016, NarTop10Accuracy=0.7217, over 5085.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7085, over 5779.15 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:46:48,595 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 20:46:56,604 INFO [trainer.py:811] (6/8) Epoch 28, validation: loss=2.931, NarTop10Accuracy=0.7396, over 1905321.00 frames. +2024-08-06 20:46:56,605 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 20:46:57,082 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.201e+02 2.381e+02 2.551e+02 4.872e+02, threshold=4.762e+02, percent-clipped=0.1 +2024-08-06 20:47:08,293 INFO [trainer.py:765] (6/8) Epoch 28, batch 2500, train_loss[loss=3.221, NarTop10Accuracy=0.683, over 5070.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7132, over 5477.56 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:47:28,322 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 20:48:21,052 INFO [trainer.py:765] (6/8) Epoch 29, batch 100, train_loss[loss=2.964, NarTop10Accuracy=0.7307, over 7269.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.71, over 2364.45 frames. ], batch size: 31, lr: 2.96e-03 +2024-08-06 20:48:53,405 INFO [trainer.py:765] (6/8) Epoch 29, batch 200, train_loss[loss=3.292, NarTop10Accuracy=0.6727, over 6897.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7156, over 3853.10 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 20:49:27,476 INFO [trainer.py:765] (6/8) Epoch 29, batch 300, train_loss[loss=3.114, NarTop10Accuracy=0.6976, over 6972.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7174, over 4642.08 frames. ], batch size: 22, lr: 2.96e-03 +2024-08-06 20:49:56,052 INFO [trainer.py:765] (6/8) Epoch 29, batch 400, train_loss[loss=3.206, NarTop10Accuracy=0.6811, over 5181.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.713, over 5105.77 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 20:50:29,435 INFO [trainer.py:765] (6/8) Epoch 29, batch 500, train_loss[loss=3.073, NarTop10Accuracy=0.712, over 6108.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7153, over 5380.54 frames. ], batch size: 11, lr: 2.96e-03 +2024-08-06 20:51:00,024 INFO [trainer.py:765] (6/8) Epoch 29, batch 600, train_loss[loss=2.743, NarTop10Accuracy=0.7794, over 5751.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7157, over 5661.98 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 20:51:35,677 INFO [trainer.py:765] (6/8) Epoch 29, batch 700, train_loss[loss=2.877, NarTop10Accuracy=0.7589, over 5079.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.711, over 5718.51 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 20:52:10,725 INFO [trainer.py:765] (6/8) Epoch 29, batch 800, train_loss[loss=2.78, NarTop10Accuracy=0.7666, over 5070.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7116, over 5777.79 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 20:52:40,742 INFO [trainer.py:765] (6/8) Epoch 29, batch 900, train_loss[loss=2.758, NarTop10Accuracy=0.7803, over 6621.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7114, over 5803.40 frames. ], batch size: 14, lr: 2.95e-03 +2024-08-06 20:53:16,861 INFO [trainer.py:765] (6/8) Epoch 29, batch 1000, train_loss[loss=3.389, NarTop10Accuracy=0.6534, over 6192.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7103, over 5895.17 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 20:53:52,902 INFO [trainer.py:765] (6/8) Epoch 29, batch 1100, train_loss[loss=3.188, NarTop10Accuracy=0.6812, over 6843.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7095, over 5928.43 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 20:54:23,690 INFO [trainer.py:765] (6/8) Epoch 29, batch 1200, train_loss[loss=3.064, NarTop10Accuracy=0.7089, over 7287.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7106, over 5922.91 frames. ], batch size: 31, lr: 2.94e-03 +2024-08-06 20:55:01,428 INFO [trainer.py:765] (6/8) Epoch 29, batch 1300, train_loss[loss=3.004, NarTop10Accuracy=0.7184, over 4371.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7106, over 5998.58 frames. ], batch size: 5, lr: 2.94e-03 +2024-08-06 20:55:32,557 INFO [trainer.py:765] (6/8) Epoch 29, batch 1400, train_loss[loss=3.262, NarTop10Accuracy=0.6666, over 6144.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7105, over 6021.86 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 20:56:04,359 INFO [trainer.py:765] (6/8) Epoch 29, batch 1500, train_loss[loss=3.339, NarTop10Accuracy=0.6546, over 6285.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7106, over 5946.00 frames. ], batch size: 50, lr: 2.94e-03 +2024-08-06 20:56:32,041 INFO [trainer.py:765] (6/8) Epoch 29, batch 1600, train_loss[loss=3.367, NarTop10Accuracy=0.6421, over 6942.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.709, over 5925.47 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:56:58,640 INFO [trainer.py:765] (6/8) Epoch 29, batch 1700, train_loss[loss=2.727, NarTop10Accuracy=0.786, over 6312.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7114, over 5908.11 frames. ], batch size: 13, lr: 2.93e-03 +2024-08-06 20:57:25,000 INFO [trainer.py:765] (6/8) Epoch 29, batch 1800, train_loss[loss=3.053, NarTop10Accuracy=0.7131, over 6957.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7124, over 5979.61 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:57:44,622 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 20:57:52,863 INFO [trainer.py:811] (6/8) Epoch 29, validation: loss=2.897, NarTop10Accuracy=0.7458, over 1905321.00 frames. +2024-08-06 20:57:52,864 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 20:57:53,424 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.206e+02 2.380e+02 2.554e+02 4.464e+02, threshold=4.759e+02, percent-clipped=0.0 +2024-08-06 20:57:59,756 INFO [trainer.py:765] (6/8) Epoch 29, batch 1900, train_loss[loss=3.027, NarTop10Accuracy=0.7209, over 6513.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7093, over 6033.43 frames. ], batch size: 51, lr: 2.93e-03 +2024-08-06 20:58:25,308 INFO [trainer.py:765] (6/8) Epoch 29, batch 2000, train_loss[loss=3.487, NarTop10Accuracy=0.6188, over 6336.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.709, over 6022.33 frames. ], batch size: 50, lr: 2.93e-03 +2024-08-06 20:58:50,629 INFO [trainer.py:765] (6/8) Epoch 29, batch 2100, train_loss[loss=2.782, NarTop10Accuracy=0.7636, over 3840.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7075, over 5995.05 frames. ], batch size: 4, lr: 2.92e-03 +2024-08-06 20:59:15,805 INFO [trainer.py:765] (6/8) Epoch 29, batch 2200, train_loss[loss=2.85, NarTop10Accuracy=0.7564, over 7263.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7095, over 6026.28 frames. ], batch size: 31, lr: 2.92e-03 +2024-08-06 20:59:40,910 INFO [trainer.py:765] (6/8) Epoch 29, batch 2300, train_loss[loss=2.813, NarTop10Accuracy=0.7664, over 5784.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7057, over 6040.58 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 21:00:05,155 INFO [trainer.py:765] (6/8) Epoch 29, batch 2400, train_loss[loss=2.996, NarTop10Accuracy=0.7351, over 5238.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7085, over 5776.15 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 21:00:28,742 INFO [trainer.py:765] (6/8) Epoch 29, batch 2500, train_loss[loss=3.281, NarTop10Accuracy=0.6763, over 5133.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7145, over 5491.30 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 21:00:48,687 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 21:01:41,717 INFO [trainer.py:765] (6/8) Epoch 30, batch 100, train_loss[loss=2.917, NarTop10Accuracy=0.7502, over 7374.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7202, over 2361.22 frames. ], batch size: 31, lr: 2.86e-03 +2024-08-06 21:02:17,013 INFO [trainer.py:765] (6/8) Epoch 30, batch 200, train_loss[loss=2.991, NarTop10Accuracy=0.7354, over 7149.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7231, over 3857.63 frames. ], batch size: 18, lr: 2.86e-03 +2024-08-06 21:02:51,343 INFO [trainer.py:765] (6/8) Epoch 30, batch 300, train_loss[loss=2.955, NarTop10Accuracy=0.7307, over 7281.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7229, over 4651.28 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 21:03:21,643 INFO [trainer.py:765] (6/8) Epoch 30, batch 400, train_loss[loss=2.804, NarTop10Accuracy=0.7651, over 5091.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7197, over 5115.71 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 21:03:58,546 INFO [trainer.py:765] (6/8) Epoch 30, batch 500, train_loss[loss=3.532, NarTop10Accuracy=0.6108, over 6135.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7172, over 5401.59 frames. ], batch size: 11, lr: 2.86e-03 +2024-08-06 21:04:31,656 INFO [trainer.py:765] (6/8) Epoch 30, batch 600, train_loss[loss=3.058, NarTop10Accuracy=0.7189, over 5832.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7163, over 5668.73 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 21:05:03,526 INFO [trainer.py:765] (6/8) Epoch 30, batch 700, train_loss[loss=3.017, NarTop10Accuracy=0.7243, over 4977.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7177, over 5737.76 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 21:05:44,132 INFO [trainer.py:765] (6/8) Epoch 30, batch 800, train_loss[loss=2.894, NarTop10Accuracy=0.7416, over 4233.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7188, over 5778.80 frames. ], batch size: 5, lr: 2.85e-03 +2024-08-06 21:06:14,843 INFO [trainer.py:765] (6/8) Epoch 30, batch 900, train_loss[loss=2.932, NarTop10Accuracy=0.7406, over 6324.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7183, over 5795.36 frames. ], batch size: 13, lr: 2.85e-03 +2024-08-06 21:06:48,952 INFO [trainer.py:765] (6/8) Epoch 30, batch 1000, train_loss[loss=2.945, NarTop10Accuracy=0.7333, over 6597.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7105, over 5899.25 frames. ], batch size: 14, lr: 2.85e-03 +2024-08-06 21:07:25,936 INFO [trainer.py:765] (6/8) Epoch 30, batch 1100, train_loss[loss=3.306, NarTop10Accuracy=0.6643, over 6747.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7086, over 5937.48 frames. ], batch size: 17, lr: 2.84e-03 +2024-08-06 21:08:02,381 INFO [trainer.py:765] (6/8) Epoch 30, batch 1200, train_loss[loss=2.987, NarTop10Accuracy=0.7362, over 7323.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7109, over 5930.36 frames. ], batch size: 31, lr: 2.84e-03 +2024-08-06 21:08:35,370 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 21:08:43,457 INFO [trainer.py:811] (6/8) Epoch 30, validation: loss=2.93, NarTop10Accuracy=0.7391, over 1905321.00 frames. +2024-08-06 21:08:43,458 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 21:08:44,197 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.209e+02 2.377e+02 2.553e+02 3.956e+02, threshold=4.754e+02, percent-clipped=0.0 +2024-08-06 21:08:44,203 INFO [trainer.py:765] (6/8) Epoch 30, batch 1300, train_loss[loss=2.977, NarTop10Accuracy=0.7268, over 5079.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7119, over 6002.96 frames. ], batch size: 6, lr: 2.84e-03 +2024-08-06 21:09:22,397 INFO [trainer.py:765] (6/8) Epoch 30, batch 1400, train_loss[loss=2.806, NarTop10Accuracy=0.7614, over 6030.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7113, over 6015.82 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 21:09:52,373 INFO [trainer.py:765] (6/8) Epoch 30, batch 1500, train_loss[loss=3.075, NarTop10Accuracy=0.7197, over 6081.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7119, over 5957.95 frames. ], batch size: 51, lr: 2.84e-03 +2024-08-06 21:10:20,084 INFO [trainer.py:765] (6/8) Epoch 30, batch 1600, train_loss[loss=3.053, NarTop10Accuracy=0.724, over 7425.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.711, over 5937.20 frames. ], batch size: 23, lr: 2.84e-03 +2024-08-06 21:10:46,679 INFO [trainer.py:765] (6/8) Epoch 30, batch 1700, train_loss[loss=3.156, NarTop10Accuracy=0.6953, over 6600.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7088, over 5917.27 frames. ], batch size: 14, lr: 2.83e-03 +2024-08-06 21:11:13,059 INFO [trainer.py:765] (6/8) Epoch 30, batch 1800, train_loss[loss=3.403, NarTop10Accuracy=0.6543, over 6822.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7095, over 5962.56 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 21:11:39,419 INFO [trainer.py:765] (6/8) Epoch 30, batch 1900, train_loss[loss=3.023, NarTop10Accuracy=0.7277, over 6132.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7099, over 6022.42 frames. ], batch size: 50, lr: 2.83e-03 +2024-08-06 21:12:04,826 INFO [trainer.py:765] (6/8) Epoch 30, batch 2000, train_loss[loss=3.309, NarTop10Accuracy=0.6675, over 5871.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7125, over 5985.14 frames. ], batch size: 52, lr: 2.83e-03 +2024-08-06 21:12:30,089 INFO [trainer.py:765] (6/8) Epoch 30, batch 2100, train_loss[loss=2.641, NarTop10Accuracy=0.7888, over 4776.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.712, over 5970.47 frames. ], batch size: 5, lr: 2.83e-03 +2024-08-06 21:12:55,225 INFO [trainer.py:765] (6/8) Epoch 30, batch 2200, train_loss[loss=2.94, NarTop10Accuracy=0.7376, over 7455.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7113, over 6009.45 frames. ], batch size: 31, lr: 2.82e-03 +2024-08-06 21:13:20,297 INFO [trainer.py:765] (6/8) Epoch 30, batch 2300, train_loss[loss=2.71, NarTop10Accuracy=0.7904, over 5655.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7084, over 6024.16 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 21:13:44,491 INFO [trainer.py:765] (6/8) Epoch 30, batch 2400, train_loss[loss=2.686, NarTop10Accuracy=0.7896, over 5097.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7157, over 5782.44 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:07,988 INFO [trainer.py:765] (6/8) Epoch 30, batch 2500, train_loss[loss=2.883, NarTop10Accuracy=0.7397, over 5355.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.716, over 5485.52 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:27,968 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 21:15:23,633 INFO [trainer.py:765] (6/8) Epoch 31, batch 100, train_loss[loss=3.424, NarTop10Accuracy=0.6417, over 7080.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7134, over 2379.35 frames. ], batch size: 31, lr: 2.77e-03 +2024-08-06 21:15:55,128 INFO [trainer.py:765] (6/8) Epoch 31, batch 200, train_loss[loss=2.804, NarTop10Accuracy=0.7568, over 6741.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7191, over 3865.26 frames. ], batch size: 17, lr: 2.77e-03 +2024-08-06 21:16:31,216 INFO [trainer.py:765] (6/8) Epoch 31, batch 300, train_loss[loss=3.017, NarTop10Accuracy=0.7231, over 7119.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7195, over 4662.09 frames. ], batch size: 22, lr: 2.77e-03 +2024-08-06 21:17:01,625 INFO [trainer.py:765] (6/8) Epoch 31, batch 400, train_loss[loss=3.156, NarTop10Accuracy=0.6863, over 5139.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7168, over 5101.54 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 21:17:35,725 INFO [trainer.py:765] (6/8) Epoch 31, batch 500, train_loss[loss=2.813, NarTop10Accuracy=0.7684, over 6129.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7178, over 5363.84 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 21:18:07,084 INFO [trainer.py:765] (6/8) Epoch 31, batch 600, train_loss[loss=2.671, NarTop10Accuracy=0.7968, over 5760.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7145, over 5625.61 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 21:18:44,611 INFO [trainer.py:765] (6/8) Epoch 31, batch 700, train_loss[loss=3.229, NarTop10Accuracy=0.6757, over 5016.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7136, over 5705.04 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 21:18:51,096 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 21:18:59,276 INFO [trainer.py:811] (6/8) Epoch 31, validation: loss=2.984, NarTop10Accuracy=0.7279, over 1905321.00 frames. +2024-08-06 21:18:59,277 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 21:18:59,985 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.222e+02 2.378e+02 2.557e+02 4.306e+02, threshold=4.755e+02, percent-clipped=0.0 +2024-08-06 21:19:24,245 INFO [trainer.py:765] (6/8) Epoch 31, batch 800, train_loss[loss=2.776, NarTop10Accuracy=0.7712, over 5004.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7151, over 5772.30 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 21:19:56,950 INFO [trainer.py:765] (6/8) Epoch 31, batch 900, train_loss[loss=3.145, NarTop10Accuracy=0.6816, over 6246.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7168, over 5785.27 frames. ], batch size: 13, lr: 2.76e-03 +2024-08-06 21:20:33,310 INFO [trainer.py:765] (6/8) Epoch 31, batch 1000, train_loss[loss=3.414, NarTop10Accuracy=0.639, over 6216.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.716, over 5897.50 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 21:21:10,215 INFO [trainer.py:765] (6/8) Epoch 31, batch 1100, train_loss[loss=3.32, NarTop10Accuracy=0.6594, over 6903.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7155, over 5946.62 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 21:21:41,119 INFO [trainer.py:765] (6/8) Epoch 31, batch 1200, train_loss[loss=2.906, NarTop10Accuracy=0.7412, over 7353.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7186, over 5932.06 frames. ], batch size: 31, lr: 2.75e-03 +2024-08-06 21:22:19,741 INFO [trainer.py:765] (6/8) Epoch 31, batch 1300, train_loss[loss=2.662, NarTop10Accuracy=0.7835, over 4386.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7128, over 5985.87 frames. ], batch size: 5, lr: 2.75e-03 +2024-08-06 21:22:53,533 INFO [trainer.py:765] (6/8) Epoch 31, batch 1400, train_loss[loss=2.865, NarTop10Accuracy=0.7463, over 6129.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7115, over 6005.84 frames. ], batch size: 11, lr: 2.75e-03 +2024-08-06 21:23:21,269 INFO [trainer.py:765] (6/8) Epoch 31, batch 1500, train_loss[loss=3.315, NarTop10Accuracy=0.6609, over 6054.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7136, over 5948.06 frames. ], batch size: 51, lr: 2.74e-03 +2024-08-06 21:23:49,004 INFO [trainer.py:765] (6/8) Epoch 31, batch 1600, train_loss[loss=3.221, NarTop10Accuracy=0.6764, over 7080.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7151, over 5923.04 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 21:24:15,511 INFO [trainer.py:765] (6/8) Epoch 31, batch 1700, train_loss[loss=3.281, NarTop10Accuracy=0.6731, over 6237.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7154, over 5915.46 frames. ], batch size: 13, lr: 2.74e-03 +2024-08-06 21:24:41,995 INFO [trainer.py:765] (6/8) Epoch 31, batch 1800, train_loss[loss=2.804, NarTop10Accuracy=0.7674, over 6951.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7177, over 5984.95 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 21:25:08,357 INFO [trainer.py:765] (6/8) Epoch 31, batch 1900, train_loss[loss=3.099, NarTop10Accuracy=0.7045, over 6489.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7146, over 6032.14 frames. ], batch size: 52, lr: 2.74e-03 +2024-08-06 21:25:33,773 INFO [trainer.py:765] (6/8) Epoch 31, batch 2000, train_loss[loss=3.02, NarTop10Accuracy=0.7218, over 6210.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7155, over 5998.65 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 21:25:59,106 INFO [trainer.py:765] (6/8) Epoch 31, batch 2100, train_loss[loss=2.644, NarTop10Accuracy=0.7915, over 3912.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7152, over 5973.89 frames. ], batch size: 4, lr: 2.73e-03 +2024-08-06 21:26:24,238 INFO [trainer.py:765] (6/8) Epoch 31, batch 2200, train_loss[loss=2.982, NarTop10Accuracy=0.7321, over 7116.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.718, over 6005.64 frames. ], batch size: 31, lr: 2.73e-03 +2024-08-06 21:26:49,321 INFO [trainer.py:765] (6/8) Epoch 31, batch 2300, train_loss[loss=2.751, NarTop10Accuracy=0.7704, over 5667.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7146, over 6009.88 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 21:27:13,607 INFO [trainer.py:765] (6/8) Epoch 31, batch 2400, train_loss[loss=2.8, NarTop10Accuracy=0.7589, over 5109.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7153, over 5763.81 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 21:27:37,027 INFO [trainer.py:765] (6/8) Epoch 31, batch 2500, train_loss[loss=2.99, NarTop10Accuracy=0.7293, over 5022.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7185, over 5456.04 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 21:27:57,405 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 21:28:49,392 INFO [trainer.py:765] (6/8) Epoch 32, batch 100, train_loss[loss=2.874, NarTop10Accuracy=0.756, over 7374.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7144, over 2372.63 frames. ], batch size: 31, lr: 2.68e-03 +2024-08-06 21:29:08,160 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 21:29:16,392 INFO [trainer.py:811] (6/8) Epoch 32, validation: loss=2.919, NarTop10Accuracy=0.7409, over 1905321.00 frames. +2024-08-06 21:29:16,393 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 21:29:16,939 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.842e+02 2.253e+02 2.413e+02 2.600e+02 5.680e+02, threshold=4.826e+02, percent-clipped=0.1 +2024-08-06 21:29:32,272 INFO [trainer.py:765] (6/8) Epoch 32, batch 200, train_loss[loss=3.171, NarTop10Accuracy=0.6944, over 6912.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7124, over 3861.03 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 21:30:05,279 INFO [trainer.py:765] (6/8) Epoch 32, batch 300, train_loss[loss=3.103, NarTop10Accuracy=0.7019, over 6981.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7153, over 4673.28 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 21:30:34,103 INFO [trainer.py:765] (6/8) Epoch 32, batch 400, train_loss[loss=2.784, NarTop10Accuracy=0.7723, over 5229.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7131, over 5101.30 frames. ], batch size: 7, lr: 2.68e-03 +2024-08-06 21:31:13,530 INFO [trainer.py:765] (6/8) Epoch 32, batch 500, train_loss[loss=3.016, NarTop10Accuracy=0.7277, over 6108.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7142, over 5392.92 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 21:31:42,486 INFO [trainer.py:765] (6/8) Epoch 32, batch 600, train_loss[loss=3.084, NarTop10Accuracy=0.7173, over 5577.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7151, over 5638.86 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 21:32:17,029 INFO [trainer.py:765] (6/8) Epoch 32, batch 700, train_loss[loss=2.829, NarTop10Accuracy=0.7593, over 5136.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7178, over 5736.13 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 21:33:00,646 INFO [trainer.py:765] (6/8) Epoch 32, batch 800, train_loss[loss=3.324, NarTop10Accuracy=0.6554, over 5070.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7171, over 5787.63 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 21:33:28,991 INFO [trainer.py:765] (6/8) Epoch 32, batch 900, train_loss[loss=2.765, NarTop10Accuracy=0.7768, over 6180.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7193, over 5804.08 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 21:34:04,049 INFO [trainer.py:765] (6/8) Epoch 32, batch 1000, train_loss[loss=3.279, NarTop10Accuracy=0.6702, over 6591.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7167, over 5904.03 frames. ], batch size: 14, lr: 2.67e-03 +2024-08-06 21:34:46,674 INFO [trainer.py:765] (6/8) Epoch 32, batch 1100, train_loss[loss=3.125, NarTop10Accuracy=0.6955, over 6795.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7158, over 5931.52 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 21:35:18,171 INFO [trainer.py:765] (6/8) Epoch 32, batch 1200, train_loss[loss=3.247, NarTop10Accuracy=0.671, over 7371.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7142, over 5912.97 frames. ], batch size: 31, lr: 2.66e-03 +2024-08-06 21:35:52,801 INFO [trainer.py:765] (6/8) Epoch 32, batch 1300, train_loss[loss=3.259, NarTop10Accuracy=0.6749, over 4278.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7133, over 5989.30 frames. ], batch size: 5, lr: 2.66e-03 +2024-08-06 21:36:29,479 INFO [trainer.py:765] (6/8) Epoch 32, batch 1400, train_loss[loss=3.452, NarTop10Accuracy=0.6366, over 6141.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7134, over 6036.88 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 21:37:04,733 INFO [trainer.py:765] (6/8) Epoch 32, batch 1500, train_loss[loss=3.493, NarTop10Accuracy=0.6277, over 6057.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7133, over 5956.80 frames. ], batch size: 50, lr: 2.66e-03 +2024-08-06 21:37:32,522 INFO [trainer.py:765] (6/8) Epoch 32, batch 1600, train_loss[loss=3.055, NarTop10Accuracy=0.722, over 7473.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.714, over 5933.51 frames. ], batch size: 23, lr: 2.66e-03 +2024-08-06 21:37:59,160 INFO [trainer.py:765] (6/8) Epoch 32, batch 1700, train_loss[loss=3.153, NarTop10Accuracy=0.7085, over 6216.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7135, over 5912.68 frames. ], batch size: 13, lr: 2.65e-03 +2024-08-06 21:38:25,702 INFO [trainer.py:765] (6/8) Epoch 32, batch 1800, train_loss[loss=3.1, NarTop10Accuracy=0.7023, over 7209.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7132, over 5978.03 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 21:38:52,169 INFO [trainer.py:765] (6/8) Epoch 32, batch 1900, train_loss[loss=3.024, NarTop10Accuracy=0.7233, over 6420.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7093, over 6011.29 frames. ], batch size: 51, lr: 2.65e-03 +2024-08-06 21:39:17,769 INFO [trainer.py:765] (6/8) Epoch 32, batch 2000, train_loss[loss=3.448, NarTop10Accuracy=0.6353, over 5946.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7132, over 5997.90 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 21:39:43,178 INFO [trainer.py:765] (6/8) Epoch 32, batch 2100, train_loss[loss=2.443, NarTop10Accuracy=0.835, over 3912.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7143, over 5987.25 frames. ], batch size: 4, lr: 2.65e-03 +2024-08-06 21:39:54,782 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 21:40:02,941 INFO [trainer.py:811] (6/8) Epoch 32, validation: loss=2.886, NarTop10Accuracy=0.7482, over 1905321.00 frames. +2024-08-06 21:40:02,942 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 21:40:03,423 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.874e+02 2.278e+02 2.449e+02 2.609e+02 8.207e+02, threshold=4.898e+02, percent-clipped=0.3 +2024-08-06 21:40:16,629 INFO [trainer.py:765] (6/8) Epoch 32, batch 2200, train_loss[loss=3.115, NarTop10Accuracy=0.7035, over 7053.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7148, over 6036.09 frames. ], batch size: 31, lr: 2.65e-03 +2024-08-06 21:40:41,718 INFO [trainer.py:765] (6/8) Epoch 32, batch 2300, train_loss[loss=3.374, NarTop10Accuracy=0.634, over 5727.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7101, over 6043.57 frames. ], batch size: 9, lr: 2.65e-03 +2024-08-06 21:41:06,072 INFO [trainer.py:765] (6/8) Epoch 32, batch 2400, train_loss[loss=3.258, NarTop10Accuracy=0.6767, over 5157.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7123, over 5779.57 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:29,538 INFO [trainer.py:765] (6/8) Epoch 32, batch 2500, train_loss[loss=2.911, NarTop10Accuracy=0.7553, over 5127.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.721, over 5478.01 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:49,419 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 21:42:47,615 INFO [trainer.py:765] (6/8) Epoch 33, batch 100, train_loss[loss=3.109, NarTop10Accuracy=0.7033, over 7668.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7233, over 2368.40 frames. ], batch size: 33, lr: 2.60e-03 +2024-08-06 21:43:22,368 INFO [trainer.py:765] (6/8) Epoch 33, batch 200, train_loss[loss=2.801, NarTop10Accuracy=0.7671, over 6873.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7207, over 3857.98 frames. ], batch size: 17, lr: 2.60e-03 +2024-08-06 21:43:56,513 INFO [trainer.py:765] (6/8) Epoch 33, batch 300, train_loss[loss=3.455, NarTop10Accuracy=0.6272, over 7014.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7178, over 4662.42 frames. ], batch size: 22, lr: 2.60e-03 +2024-08-06 21:44:30,316 INFO [trainer.py:765] (6/8) Epoch 33, batch 400, train_loss[loss=2.707, NarTop10Accuracy=0.772, over 5106.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7178, over 5118.33 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 21:45:02,870 INFO [trainer.py:765] (6/8) Epoch 33, batch 500, train_loss[loss=2.892, NarTop10Accuracy=0.7467, over 6090.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7214, over 5389.03 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 21:45:36,226 INFO [trainer.py:765] (6/8) Epoch 33, batch 600, train_loss[loss=3.296, NarTop10Accuracy=0.6551, over 5757.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.717, over 5656.78 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 21:46:11,316 INFO [trainer.py:765] (6/8) Epoch 33, batch 700, train_loss[loss=2.848, NarTop10Accuracy=0.7528, over 5112.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7155, over 5719.11 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:46:46,169 INFO [trainer.py:765] (6/8) Epoch 33, batch 800, train_loss[loss=2.649, NarTop10Accuracy=0.8002, over 5034.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7156, over 5780.96 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:47:18,908 INFO [trainer.py:765] (6/8) Epoch 33, batch 900, train_loss[loss=3.312, NarTop10Accuracy=0.6678, over 6576.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7145, over 5787.91 frames. ], batch size: 14, lr: 2.59e-03 +2024-08-06 21:47:57,316 INFO [trainer.py:765] (6/8) Epoch 33, batch 1000, train_loss[loss=3.003, NarTop10Accuracy=0.7277, over 6237.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7154, over 5879.16 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 21:48:30,908 INFO [trainer.py:765] (6/8) Epoch 33, batch 1100, train_loss[loss=2.893, NarTop10Accuracy=0.7457, over 6783.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.712, over 5916.13 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 21:49:06,659 INFO [trainer.py:765] (6/8) Epoch 33, batch 1200, train_loss[loss=2.858, NarTop10Accuracy=0.749, over 7236.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7139, over 5913.02 frames. ], batch size: 31, lr: 2.58e-03 +2024-08-06 21:49:42,815 INFO [trainer.py:765] (6/8) Epoch 33, batch 1300, train_loss[loss=2.886, NarTop10Accuracy=0.7373, over 5046.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7135, over 5996.84 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 21:50:17,310 INFO [trainer.py:765] (6/8) Epoch 33, batch 1400, train_loss[loss=3.329, NarTop10Accuracy=0.6521, over 6048.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7123, over 6012.78 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 21:50:45,370 INFO [trainer.py:765] (6/8) Epoch 33, batch 1500, train_loss[loss=3.053, NarTop10Accuracy=0.7223, over 5634.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7139, over 5944.21 frames. ], batch size: 50, lr: 2.58e-03 +2024-08-06 21:51:04,606 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 21:51:12,661 INFO [trainer.py:811] (6/8) Epoch 33, validation: loss=2.938, NarTop10Accuracy=0.7372, over 1905321.00 frames. +2024-08-06 21:51:12,662 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 21:51:13,180 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.834e+02 2.250e+02 2.409e+02 2.586e+02 3.975e+02, threshold=4.818e+02, percent-clipped=0.0 +2024-08-06 21:51:21,262 INFO [trainer.py:765] (6/8) Epoch 33, batch 1600, train_loss[loss=3.135, NarTop10Accuracy=0.6892, over 6987.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7165, over 5951.61 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:51:47,923 INFO [trainer.py:765] (6/8) Epoch 33, batch 1700, train_loss[loss=2.751, NarTop10Accuracy=0.7751, over 6255.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7143, over 5932.55 frames. ], batch size: 13, lr: 2.57e-03 +2024-08-06 21:52:14,392 INFO [trainer.py:765] (6/8) Epoch 33, batch 1800, train_loss[loss=2.748, NarTop10Accuracy=0.7718, over 7065.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7163, over 5983.27 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:52:40,856 INFO [trainer.py:765] (6/8) Epoch 33, batch 1900, train_loss[loss=3.544, NarTop10Accuracy=0.6198, over 5892.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.712, over 6014.41 frames. ], batch size: 50, lr: 2.57e-03 +2024-08-06 21:53:06,352 INFO [trainer.py:765] (6/8) Epoch 33, batch 2000, train_loss[loss=3.399, NarTop10Accuracy=0.64, over 5889.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7188, over 5995.41 frames. ], batch size: 50, lr: 2.57e-03 +2024-08-06 21:53:31,659 INFO [trainer.py:765] (6/8) Epoch 33, batch 2100, train_loss[loss=2.995, NarTop10Accuracy=0.7101, over 4833.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7173, over 5956.65 frames. ], batch size: 5, lr: 2.57e-03 +2024-08-06 21:53:56,890 INFO [trainer.py:765] (6/8) Epoch 33, batch 2200, train_loss[loss=3.399, NarTop10Accuracy=0.6387, over 7344.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.715, over 5996.96 frames. ], batch size: 31, lr: 2.57e-03 +2024-08-06 21:54:21,990 INFO [trainer.py:765] (6/8) Epoch 33, batch 2300, train_loss[loss=2.738, NarTop10Accuracy=0.7713, over 5742.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7141, over 6024.86 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 21:54:46,430 INFO [trainer.py:765] (6/8) Epoch 33, batch 2400, train_loss[loss=2.665, NarTop10Accuracy=0.7927, over 5265.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.718, over 5778.76 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:09,862 INFO [trainer.py:765] (6/8) Epoch 33, batch 2500, train_loss[loss=2.872, NarTop10Accuracy=0.7608, over 5220.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7218, over 5493.36 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:29,808 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 21:56:24,721 INFO [trainer.py:765] (6/8) Epoch 34, batch 100, train_loss[loss=3.474, NarTop10Accuracy=0.626, over 7437.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7203, over 2367.41 frames. ], batch size: 31, lr: 2.52e-03 +2024-08-06 21:56:55,613 INFO [trainer.py:765] (6/8) Epoch 34, batch 200, train_loss[loss=3.168, NarTop10Accuracy=0.6873, over 6726.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7225, over 3861.83 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 21:57:31,776 INFO [trainer.py:765] (6/8) Epoch 34, batch 300, train_loss[loss=2.803, NarTop10Accuracy=0.7671, over 7116.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7188, over 4662.80 frames. ], batch size: 22, lr: 2.52e-03 +2024-08-06 21:58:02,724 INFO [trainer.py:765] (6/8) Epoch 34, batch 400, train_loss[loss=3.233, NarTop10Accuracy=0.6815, over 5292.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7209, over 5106.14 frames. ], batch size: 7, lr: 2.52e-03 +2024-08-06 21:58:34,689 INFO [trainer.py:765] (6/8) Epoch 34, batch 500, train_loss[loss=3.371, NarTop10Accuracy=0.6481, over 6024.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7185, over 5387.77 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 21:59:09,616 INFO [trainer.py:765] (6/8) Epoch 34, batch 600, train_loss[loss=2.833, NarTop10Accuracy=0.7552, over 5832.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7191, over 5659.55 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 21:59:46,056 INFO [trainer.py:765] (6/8) Epoch 34, batch 700, train_loss[loss=3.073, NarTop10Accuracy=0.7091, over 5004.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7174, over 5716.86 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:17,575 INFO [trainer.py:765] (6/8) Epoch 34, batch 800, train_loss[loss=2.946, NarTop10Accuracy=0.7324, over 4455.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7202, over 5759.30 frames. ], batch size: 5, lr: 2.51e-03 +2024-08-06 22:00:49,874 INFO [trainer.py:765] (6/8) Epoch 34, batch 900, train_loss[loss=2.897, NarTop10Accuracy=0.7461, over 6597.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7202, over 5773.28 frames. ], batch size: 14, lr: 2.51e-03 +2024-08-06 22:01:25,338 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 22:01:33,386 INFO [trainer.py:811] (6/8) Epoch 34, validation: loss=2.9, NarTop10Accuracy=0.7444, over 1905321.00 frames. +2024-08-06 22:01:33,387 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 22:01:34,092 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.259e+02 2.434e+02 2.615e+02 5.125e+02, threshold=4.868e+02, percent-clipped=0.1 +2024-08-06 22:01:35,624 INFO [trainer.py:765] (6/8) Epoch 34, batch 1000, train_loss[loss=3.36, NarTop10Accuracy=0.657, over 6210.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7186, over 5880.09 frames. ], batch size: 13, lr: 2.51e-03 +2024-08-06 22:02:10,829 INFO [trainer.py:765] (6/8) Epoch 34, batch 1100, train_loss[loss=3.264, NarTop10Accuracy=0.6752, over 6912.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7168, over 5921.65 frames. ], batch size: 17, lr: 2.51e-03 +2024-08-06 22:02:46,786 INFO [trainer.py:765] (6/8) Epoch 34, batch 1200, train_loss[loss=2.787, NarTop10Accuracy=0.764, over 7533.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7168, over 5919.88 frames. ], batch size: 31, lr: 2.50e-03 +2024-08-06 22:03:20,814 INFO [trainer.py:765] (6/8) Epoch 34, batch 1300, train_loss[loss=2.767, NarTop10Accuracy=0.7731, over 5079.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7171, over 5992.11 frames. ], batch size: 6, lr: 2.50e-03 +2024-08-06 22:03:52,949 INFO [trainer.py:765] (6/8) Epoch 34, batch 1400, train_loss[loss=3.115, NarTop10Accuracy=0.7009, over 6036.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7161, over 6024.18 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 22:04:20,822 INFO [trainer.py:765] (6/8) Epoch 34, batch 1500, train_loss[loss=3.118, NarTop10Accuracy=0.7014, over 6399.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7158, over 5956.72 frames. ], batch size: 52, lr: 2.50e-03 +2024-08-06 22:04:48,600 INFO [trainer.py:765] (6/8) Epoch 34, batch 1600, train_loss[loss=2.837, NarTop10Accuracy=0.7611, over 7491.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7151, over 5939.07 frames. ], batch size: 23, lr: 2.50e-03 +2024-08-06 22:05:15,241 INFO [trainer.py:765] (6/8) Epoch 34, batch 1700, train_loss[loss=3.038, NarTop10Accuracy=0.7113, over 6666.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7181, over 5929.60 frames. ], batch size: 14, lr: 2.50e-03 +2024-08-06 22:05:41,720 INFO [trainer.py:765] (6/8) Epoch 34, batch 1800, train_loss[loss=3.206, NarTop10Accuracy=0.6817, over 6963.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7154, over 5986.96 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 22:06:08,206 INFO [trainer.py:765] (6/8) Epoch 34, batch 1900, train_loss[loss=3.006, NarTop10Accuracy=0.7249, over 6108.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7119, over 6019.96 frames. ], batch size: 51, lr: 2.49e-03 +2024-08-06 22:06:33,770 INFO [trainer.py:765] (6/8) Epoch 34, batch 2000, train_loss[loss=3.051, NarTop10Accuracy=0.7145, over 5598.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7153, over 6002.01 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 22:06:59,126 INFO [trainer.py:765] (6/8) Epoch 34, batch 2100, train_loss[loss=3.233, NarTop10Accuracy=0.6739, over 4986.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7126, over 5997.21 frames. ], batch size: 5, lr: 2.49e-03 +2024-08-06 22:07:24,398 INFO [trainer.py:765] (6/8) Epoch 34, batch 2200, train_loss[loss=2.867, NarTop10Accuracy=0.76, over 7344.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7123, over 6015.75 frames. ], batch size: 31, lr: 2.49e-03 +2024-08-06 22:07:49,536 INFO [trainer.py:765] (6/8) Epoch 34, batch 2300, train_loss[loss=2.752, NarTop10Accuracy=0.7717, over 5682.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7121, over 6015.98 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 22:08:14,059 INFO [trainer.py:765] (6/8) Epoch 34, batch 2400, train_loss[loss=3.29, NarTop10Accuracy=0.6612, over 5076.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7118, over 5774.87 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:37,648 INFO [trainer.py:765] (6/8) Epoch 34, batch 2500, train_loss[loss=2.853, NarTop10Accuracy=0.7673, over 5103.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.72, over 5481.65 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:57,519 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 22:09:52,640 INFO [trainer.py:765] (6/8) Epoch 35, batch 100, train_loss[loss=2.891, NarTop10Accuracy=0.7508, over 7335.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7153, over 2366.62 frames. ], batch size: 32, lr: 2.45e-03 +2024-08-06 22:10:29,697 INFO [trainer.py:765] (6/8) Epoch 35, batch 200, train_loss[loss=3.153, NarTop10Accuracy=0.6923, over 6711.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.714, over 3849.04 frames. ], batch size: 17, lr: 2.45e-03 +2024-08-06 22:11:04,943 INFO [trainer.py:765] (6/8) Epoch 35, batch 300, train_loss[loss=2.898, NarTop10Accuracy=0.7489, over 6873.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7183, over 4649.65 frames. ], batch size: 22, lr: 2.44e-03 +2024-08-06 22:11:35,333 INFO [trainer.py:765] (6/8) Epoch 35, batch 400, train_loss[loss=2.932, NarTop10Accuracy=0.7399, over 5100.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7194, over 5110.40 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 22:11:40,047 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 22:11:48,129 INFO [trainer.py:811] (6/8) Epoch 35, validation: loss=2.84, NarTop10Accuracy=0.7576, over 1905321.00 frames. +2024-08-06 22:11:48,129 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 22:11:48,702 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.275e+02 2.426e+02 2.615e+02 4.095e+02, threshold=4.852e+02, percent-clipped=0.0 +2024-08-06 22:12:17,723 INFO [trainer.py:765] (6/8) Epoch 35, batch 500, train_loss[loss=2.564, NarTop10Accuracy=0.8146, over 6060.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7216, over 5388.69 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 22:12:51,424 INFO [trainer.py:765] (6/8) Epoch 35, batch 600, train_loss[loss=3.3, NarTop10Accuracy=0.6648, over 5757.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7189, over 5643.86 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 22:13:24,940 INFO [trainer.py:765] (6/8) Epoch 35, batch 700, train_loss[loss=2.698, NarTop10Accuracy=0.7852, over 4995.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7182, over 5728.18 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 22:14:01,384 INFO [trainer.py:765] (6/8) Epoch 35, batch 800, train_loss[loss=2.632, NarTop10Accuracy=0.8089, over 5181.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7171, over 5773.57 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 22:14:34,372 INFO [trainer.py:765] (6/8) Epoch 35, batch 900, train_loss[loss=3.164, NarTop10Accuracy=0.6931, over 6675.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7199, over 5786.81 frames. ], batch size: 14, lr: 2.44e-03 +2024-08-06 22:15:09,372 INFO [trainer.py:765] (6/8) Epoch 35, batch 1000, train_loss[loss=2.896, NarTop10Accuracy=0.7452, over 6108.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.718, over 5892.84 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 22:15:48,495 INFO [trainer.py:765] (6/8) Epoch 35, batch 1100, train_loss[loss=2.975, NarTop10Accuracy=0.7344, over 6735.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7169, over 5914.38 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 22:16:22,483 INFO [trainer.py:765] (6/8) Epoch 35, batch 1200, train_loss[loss=2.958, NarTop10Accuracy=0.7389, over 7503.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7185, over 5926.77 frames. ], batch size: 32, lr: 2.43e-03 +2024-08-06 22:16:57,060 INFO [trainer.py:765] (6/8) Epoch 35, batch 1300, train_loss[loss=2.988, NarTop10Accuracy=0.7289, over 5079.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7204, over 5991.18 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 22:17:31,060 INFO [trainer.py:765] (6/8) Epoch 35, batch 1400, train_loss[loss=3.05, NarTop10Accuracy=0.7152, over 6057.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7184, over 6000.84 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 22:18:03,062 INFO [trainer.py:765] (6/8) Epoch 35, batch 1500, train_loss[loss=3.078, NarTop10Accuracy=0.7053, over 6201.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.717, over 5961.36 frames. ], batch size: 50, lr: 2.43e-03 +2024-08-06 22:18:30,728 INFO [trainer.py:765] (6/8) Epoch 35, batch 1600, train_loss[loss=2.934, NarTop10Accuracy=0.7408, over 7425.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7143, over 5939.21 frames. ], batch size: 22, lr: 2.43e-03 +2024-08-06 22:18:57,320 INFO [trainer.py:765] (6/8) Epoch 35, batch 1700, train_loss[loss=2.753, NarTop10Accuracy=0.78, over 6285.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.714, over 5922.13 frames. ], batch size: 13, lr: 2.42e-03 +2024-08-06 22:19:23,702 INFO [trainer.py:765] (6/8) Epoch 35, batch 1800, train_loss[loss=3.455, NarTop10Accuracy=0.6315, over 7428.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7152, over 5989.18 frames. ], batch size: 23, lr: 2.42e-03 +2024-08-06 22:19:50,201 INFO [trainer.py:765] (6/8) Epoch 35, batch 1900, train_loss[loss=3.147, NarTop10Accuracy=0.6898, over 5898.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7152, over 6018.65 frames. ], batch size: 50, lr: 2.42e-03 +2024-08-06 22:20:15,762 INFO [trainer.py:765] (6/8) Epoch 35, batch 2000, train_loss[loss=3.088, NarTop10Accuracy=0.7092, over 6621.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7166, over 5993.93 frames. ], batch size: 52, lr: 2.42e-03 +2024-08-06 22:20:41,045 INFO [trainer.py:765] (6/8) Epoch 35, batch 2100, train_loss[loss=2.701, NarTop10Accuracy=0.7995, over 4761.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7154, over 5958.96 frames. ], batch size: 5, lr: 2.42e-03 +2024-08-06 22:21:06,227 INFO [trainer.py:765] (6/8) Epoch 35, batch 2200, train_loss[loss=2.902, NarTop10Accuracy=0.7492, over 7374.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7149, over 6010.01 frames. ], batch size: 31, lr: 2.42e-03 +2024-08-06 22:21:31,286 INFO [trainer.py:765] (6/8) Epoch 35, batch 2300, train_loss[loss=2.894, NarTop10Accuracy=0.7404, over 5631.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7146, over 6025.82 frames. ], batch size: 9, lr: 2.42e-03 +2024-08-06 22:21:55,648 INFO [trainer.py:765] (6/8) Epoch 35, batch 2400, train_loss[loss=3.338, NarTop10Accuracy=0.6502, over 5331.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7149, over 5778.91 frames. ], batch size: 7, lr: 2.42e-03 +2024-08-06 22:21:59,681 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 22:22:07,656 INFO [trainer.py:811] (6/8) Epoch 35, validation: loss=2.905, NarTop10Accuracy=0.7437, over 1905321.00 frames. +2024-08-06 22:22:07,657 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 22:22:08,116 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.895e+02 2.316e+02 2.462e+02 2.653e+02 5.566e+02, threshold=4.923e+02, percent-clipped=0.1 +2024-08-06 22:22:27,128 INFO [trainer.py:765] (6/8) Epoch 35, batch 2500, train_loss[loss=3.093, NarTop10Accuracy=0.7154, over 5142.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7192, over 5488.80 frames. ], batch size: 7, lr: 2.41e-03 +2024-08-06 22:22:46,747 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 22:23:47,172 INFO [trainer.py:765] (6/8) Epoch 36, batch 100, train_loss[loss=3.225, NarTop10Accuracy=0.6761, over 6990.00 frames. ], tot_loss[loss=3.001, NarTop10Accuracy=0.7262, over 2378.31 frames. ], batch size: 31, lr: 2.38e-03 +2024-08-06 22:24:22,494 INFO [trainer.py:765] (6/8) Epoch 36, batch 200, train_loss[loss=2.851, NarTop10Accuracy=0.7669, over 6759.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7209, over 3861.54 frames. ], batch size: 17, lr: 2.38e-03 +2024-08-06 22:24:54,721 INFO [trainer.py:765] (6/8) Epoch 36, batch 300, train_loss[loss=3.311, NarTop10Accuracy=0.6648, over 7095.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7182, over 4667.47 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 22:25:29,276 INFO [trainer.py:765] (6/8) Epoch 36, batch 400, train_loss[loss=2.868, NarTop10Accuracy=0.7556, over 5241.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7224, over 5134.06 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 22:26:01,818 INFO [trainer.py:765] (6/8) Epoch 36, batch 500, train_loss[loss=3.409, NarTop10Accuracy=0.646, over 6096.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7217, over 5429.81 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 22:26:35,025 INFO [trainer.py:765] (6/8) Epoch 36, batch 600, train_loss[loss=2.931, NarTop10Accuracy=0.7494, over 5679.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7222, over 5679.70 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 22:27:10,991 INFO [trainer.py:765] (6/8) Epoch 36, batch 700, train_loss[loss=3.235, NarTop10Accuracy=0.6876, over 4419.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7224, over 5732.32 frames. ], batch size: 5, lr: 2.37e-03 +2024-08-06 22:27:44,915 INFO [trainer.py:765] (6/8) Epoch 36, batch 800, train_loss[loss=3.314, NarTop10Accuracy=0.6586, over 4965.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7197, over 5794.89 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 22:28:17,812 INFO [trainer.py:765] (6/8) Epoch 36, batch 900, train_loss[loss=2.767, NarTop10Accuracy=0.7696, over 6573.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7221, over 5815.37 frames. ], batch size: 14, lr: 2.37e-03 +2024-08-06 22:28:56,984 INFO [trainer.py:765] (6/8) Epoch 36, batch 1000, train_loss[loss=3.411, NarTop10Accuracy=0.6441, over 6672.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7199, over 5918.68 frames. ], batch size: 14, lr: 2.37e-03 +2024-08-06 22:29:29,364 INFO [trainer.py:765] (6/8) Epoch 36, batch 1100, train_loss[loss=2.963, NarTop10Accuracy=0.7313, over 6717.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7196, over 5952.48 frames. ], batch size: 17, lr: 2.36e-03 +2024-08-06 22:30:05,681 INFO [trainer.py:765] (6/8) Epoch 36, batch 1200, train_loss[loss=2.991, NarTop10Accuracy=0.7272, over 7395.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7212, over 5936.13 frames. ], batch size: 31, lr: 2.36e-03 +2024-08-06 22:30:42,576 INFO [trainer.py:765] (6/8) Epoch 36, batch 1300, train_loss[loss=2.796, NarTop10Accuracy=0.7689, over 5010.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7191, over 5991.38 frames. ], batch size: 6, lr: 2.36e-03 +2024-08-06 22:31:15,939 INFO [trainer.py:765] (6/8) Epoch 36, batch 1400, train_loss[loss=3.059, NarTop10Accuracy=0.7172, over 5976.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7213, over 6023.50 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 22:31:43,749 INFO [trainer.py:765] (6/8) Epoch 36, batch 1500, train_loss[loss=3.529, NarTop10Accuracy=0.6193, over 6264.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7202, over 5944.27 frames. ], batch size: 50, lr: 2.36e-03 +2024-08-06 22:32:11,460 INFO [trainer.py:765] (6/8) Epoch 36, batch 1600, train_loss[loss=3.341, NarTop10Accuracy=0.6455, over 7056.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.72, over 5931.04 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 22:32:38,109 INFO [trainer.py:765] (6/8) Epoch 36, batch 1700, train_loss[loss=3.324, NarTop10Accuracy=0.6505, over 6105.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7176, over 5908.60 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 22:33:04,554 INFO [trainer.py:765] (6/8) Epoch 36, batch 1800, train_loss[loss=3.079, NarTop10Accuracy=0.705, over 7089.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7188, over 5986.33 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 22:33:15,171 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 22:33:23,567 INFO [trainer.py:811] (6/8) Epoch 36, validation: loss=2.897, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 22:33:23,568 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 22:33:24,096 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.876e+02 2.309e+02 2.476e+02 2.664e+02 4.811e+02, threshold=4.951e+02, percent-clipped=0.0 +2024-08-06 22:33:39,456 INFO [trainer.py:765] (6/8) Epoch 36, batch 1900, train_loss[loss=3.019, NarTop10Accuracy=0.7281, over 6189.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7181, over 6028.92 frames. ], batch size: 50, lr: 2.35e-03 +2024-08-06 22:34:05,077 INFO [trainer.py:765] (6/8) Epoch 36, batch 2000, train_loss[loss=3.182, NarTop10Accuracy=0.6954, over 6069.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.719, over 6009.28 frames. ], batch size: 50, lr: 2.35e-03 +2024-08-06 22:34:30,514 INFO [trainer.py:765] (6/8) Epoch 36, batch 2100, train_loss[loss=2.84, NarTop10Accuracy=0.7571, over 4896.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7198, over 5971.47 frames. ], batch size: 5, lr: 2.35e-03 +2024-08-06 22:34:55,938 INFO [trainer.py:765] (6/8) Epoch 36, batch 2200, train_loss[loss=3.369, NarTop10Accuracy=0.651, over 7149.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7163, over 6005.18 frames. ], batch size: 31, lr: 2.35e-03 +2024-08-06 22:35:21,145 INFO [trainer.py:765] (6/8) Epoch 36, batch 2300, train_loss[loss=3.365, NarTop10Accuracy=0.6495, over 5631.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7139, over 6009.96 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 22:35:45,600 INFO [trainer.py:765] (6/8) Epoch 36, batch 2400, train_loss[loss=3.136, NarTop10Accuracy=0.6861, over 5232.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7168, over 5765.29 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:09,182 INFO [trainer.py:765] (6/8) Epoch 36, batch 2500, train_loss[loss=2.872, NarTop10Accuracy=0.7537, over 5268.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7205, over 5468.45 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:29,013 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 22:37:29,727 INFO [trainer.py:765] (6/8) Epoch 37, batch 100, train_loss[loss=2.873, NarTop10Accuracy=0.759, over 7284.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7149, over 2357.00 frames. ], batch size: 31, lr: 2.31e-03 +2024-08-06 22:38:01,273 INFO [trainer.py:765] (6/8) Epoch 37, batch 200, train_loss[loss=2.902, NarTop10Accuracy=0.7505, over 6810.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7197, over 3859.61 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 22:38:35,956 INFO [trainer.py:765] (6/8) Epoch 37, batch 300, train_loss[loss=3.182, NarTop10Accuracy=0.6885, over 6888.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7206, over 4675.85 frames. ], batch size: 22, lr: 2.31e-03 +2024-08-06 22:39:09,307 INFO [trainer.py:765] (6/8) Epoch 37, batch 400, train_loss[loss=2.735, NarTop10Accuracy=0.7772, over 5256.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7237, over 5121.50 frames. ], batch size: 7, lr: 2.31e-03 +2024-08-06 22:39:43,862 INFO [trainer.py:765] (6/8) Epoch 37, batch 500, train_loss[loss=3.258, NarTop10Accuracy=0.6776, over 6093.00 frames. ], tot_loss[loss=3.007, NarTop10Accuracy=0.7241, over 5383.23 frames. ], batch size: 11, lr: 2.31e-03 +2024-08-06 22:40:17,334 INFO [trainer.py:765] (6/8) Epoch 37, batch 600, train_loss[loss=2.626, NarTop10Accuracy=0.8029, over 5802.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7228, over 5653.19 frames. ], batch size: 9, lr: 2.31e-03 +2024-08-06 22:40:51,616 INFO [trainer.py:765] (6/8) Epoch 37, batch 700, train_loss[loss=3.038, NarTop10Accuracy=0.7098, over 4368.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.717, over 5725.37 frames. ], batch size: 5, lr: 2.30e-03 +2024-08-06 22:41:30,565 INFO [trainer.py:765] (6/8) Epoch 37, batch 800, train_loss[loss=2.765, NarTop10Accuracy=0.7627, over 5160.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7173, over 5770.62 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:41:59,083 INFO [trainer.py:765] (6/8) Epoch 37, batch 900, train_loss[loss=2.798, NarTop10Accuracy=0.7671, over 6186.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7203, over 5787.07 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 22:42:38,268 INFO [trainer.py:765] (6/8) Epoch 37, batch 1000, train_loss[loss=3.271, NarTop10Accuracy=0.6637, over 6207.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7168, over 5887.45 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 22:43:15,907 INFO [trainer.py:765] (6/8) Epoch 37, batch 1100, train_loss[loss=3.142, NarTop10Accuracy=0.6991, over 6870.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7159, over 5926.83 frames. ], batch size: 17, lr: 2.30e-03 +2024-08-06 22:43:47,740 INFO [trainer.py:765] (6/8) Epoch 37, batch 1200, train_loss[loss=2.853, NarTop10Accuracy=0.7556, over 7122.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7154, over 5923.16 frames. ], batch size: 31, lr: 2.30e-03 +2024-08-06 22:44:11,755 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 22:44:20,075 INFO [trainer.py:811] (6/8) Epoch 37, validation: loss=2.92, NarTop10Accuracy=0.7407, over 1905321.00 frames. +2024-08-06 22:44:20,076 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 22:44:20,605 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.887e+02 2.309e+02 2.481e+02 2.647e+02 8.766e+02, threshold=4.961e+02, percent-clipped=0.1 +2024-08-06 22:44:32,782 INFO [trainer.py:765] (6/8) Epoch 37, batch 1300, train_loss[loss=2.669, NarTop10Accuracy=0.7893, over 4296.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7193, over 5993.98 frames. ], batch size: 5, lr: 2.30e-03 +2024-08-06 22:45:10,386 INFO [trainer.py:765] (6/8) Epoch 37, batch 1400, train_loss[loss=2.72, NarTop10Accuracy=0.7837, over 6057.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7197, over 6012.99 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 22:45:40,511 INFO [trainer.py:765] (6/8) Epoch 37, batch 1500, train_loss[loss=2.9, NarTop10Accuracy=0.7446, over 6279.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7185, over 5947.38 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:46:08,436 INFO [trainer.py:765] (6/8) Epoch 37, batch 1600, train_loss[loss=3.441, NarTop10Accuracy=0.6325, over 7272.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7171, over 5938.36 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 22:46:35,186 INFO [trainer.py:765] (6/8) Epoch 37, batch 1700, train_loss[loss=3.309, NarTop10Accuracy=0.662, over 6714.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.719, over 5937.26 frames. ], batch size: 14, lr: 2.29e-03 +2024-08-06 22:47:01,791 INFO [trainer.py:765] (6/8) Epoch 37, batch 1800, train_loss[loss=2.836, NarTop10Accuracy=0.7613, over 7281.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7198, over 5991.81 frames. ], batch size: 23, lr: 2.29e-03 +2024-08-06 22:47:28,310 INFO [trainer.py:765] (6/8) Epoch 37, batch 1900, train_loss[loss=3.003, NarTop10Accuracy=0.7243, over 5886.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7186, over 6032.61 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:47:53,923 INFO [trainer.py:765] (6/8) Epoch 37, batch 2000, train_loss[loss=3.213, NarTop10Accuracy=0.6839, over 5658.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7199, over 6015.19 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:48:19,324 INFO [trainer.py:765] (6/8) Epoch 37, batch 2100, train_loss[loss=2.979, NarTop10Accuracy=0.7232, over 3942.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.718, over 5975.88 frames. ], batch size: 4, lr: 2.29e-03 +2024-08-06 22:48:44,706 INFO [trainer.py:765] (6/8) Epoch 37, batch 2200, train_loss[loss=2.932, NarTop10Accuracy=0.744, over 7131.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7156, over 6011.73 frames. ], batch size: 31, lr: 2.29e-03 +2024-08-06 22:49:09,911 INFO [trainer.py:765] (6/8) Epoch 37, batch 2300, train_loss[loss=2.732, NarTop10Accuracy=0.7856, over 5760.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7167, over 6043.72 frames. ], batch size: 9, lr: 2.29e-03 +2024-08-06 22:49:34,317 INFO [trainer.py:765] (6/8) Epoch 37, batch 2400, train_loss[loss=3.195, NarTop10Accuracy=0.6837, over 5157.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7197, over 5762.07 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:49:57,859 INFO [trainer.py:765] (6/8) Epoch 37, batch 2500, train_loss[loss=3.243, NarTop10Accuracy=0.6834, over 4995.00 frames. ], tot_loss[loss=2.992, NarTop10Accuracy=0.7267, over 5471.08 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:50:17,882 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 22:51:16,151 INFO [trainer.py:765] (6/8) Epoch 38, batch 100, train_loss[loss=3.006, NarTop10Accuracy=0.7211, over 7269.00 frames. ], tot_loss[loss=3.011, NarTop10Accuracy=0.7233, over 2354.38 frames. ], batch size: 31, lr: 2.25e-03 +2024-08-06 22:51:53,014 INFO [trainer.py:765] (6/8) Epoch 38, batch 200, train_loss[loss=3.243, NarTop10Accuracy=0.6791, over 6714.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7228, over 3859.93 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 22:52:25,201 INFO [trainer.py:765] (6/8) Epoch 38, batch 300, train_loss[loss=2.869, NarTop10Accuracy=0.7441, over 7083.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7194, over 4667.82 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 22:52:55,627 INFO [trainer.py:765] (6/8) Epoch 38, batch 400, train_loss[loss=3.275, NarTop10Accuracy=0.6714, over 5007.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7219, over 5115.46 frames. ], batch size: 7, lr: 2.25e-03 +2024-08-06 22:53:32,227 INFO [trainer.py:765] (6/8) Epoch 38, batch 500, train_loss[loss=2.788, NarTop10Accuracy=0.7643, over 6069.00 frames. ], tot_loss[loss=2.99, NarTop10Accuracy=0.7272, over 5408.20 frames. ], batch size: 11, lr: 2.25e-03 +2024-08-06 22:54:05,497 INFO [trainer.py:765] (6/8) Epoch 38, batch 600, train_loss[loss=3.266, NarTop10Accuracy=0.6713, over 5676.00 frames. ], tot_loss[loss=2.999, NarTop10Accuracy=0.7256, over 5668.65 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 22:54:36,002 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 22:54:43,918 INFO [trainer.py:811] (6/8) Epoch 38, validation: loss=2.939, NarTop10Accuracy=0.7369, over 1905321.00 frames. +2024-08-06 22:54:43,919 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 22:54:44,427 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.313e+02 2.478e+02 2.663e+02 7.254e+02, threshold=4.957e+02, percent-clipped=0.3 +2024-08-06 22:54:46,659 INFO [trainer.py:765] (6/8) Epoch 38, batch 700, train_loss[loss=2.764, NarTop10Accuracy=0.7678, over 4938.00 frames. ], tot_loss[loss=2.999, NarTop10Accuracy=0.7255, over 5738.21 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:24,938 INFO [trainer.py:765] (6/8) Epoch 38, batch 800, train_loss[loss=2.888, NarTop10Accuracy=0.7472, over 5073.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7225, over 5776.81 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:59,704 INFO [trainer.py:765] (6/8) Epoch 38, batch 900, train_loss[loss=2.728, NarTop10Accuracy=0.7759, over 6333.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7227, over 5800.13 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 22:56:32,090 INFO [trainer.py:765] (6/8) Epoch 38, batch 1000, train_loss[loss=3.406, NarTop10Accuracy=0.6439, over 6684.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7223, over 5891.02 frames. ], batch size: 14, lr: 2.24e-03 +2024-08-06 22:57:08,991 INFO [trainer.py:765] (6/8) Epoch 38, batch 1100, train_loss[loss=3.12, NarTop10Accuracy=0.6909, over 6831.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7194, over 5919.81 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 22:57:42,663 INFO [trainer.py:765] (6/8) Epoch 38, batch 1200, train_loss[loss=2.868, NarTop10Accuracy=0.7526, over 7149.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7194, over 5925.33 frames. ], batch size: 31, lr: 2.24e-03 +2024-08-06 22:58:16,546 INFO [trainer.py:765] (6/8) Epoch 38, batch 1300, train_loss[loss=3.289, NarTop10Accuracy=0.6712, over 5106.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7191, over 5989.40 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:58:49,810 INFO [trainer.py:765] (6/8) Epoch 38, batch 1400, train_loss[loss=2.842, NarTop10Accuracy=0.7558, over 6153.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7152, over 6001.23 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 22:59:22,854 INFO [trainer.py:765] (6/8) Epoch 38, batch 1500, train_loss[loss=3.472, NarTop10Accuracy=0.6353, over 6321.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.718, over 5924.26 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 22:59:50,644 INFO [trainer.py:765] (6/8) Epoch 38, batch 1600, train_loss[loss=3.304, NarTop10Accuracy=0.6613, over 6999.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7165, over 5917.85 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 23:00:17,315 INFO [trainer.py:765] (6/8) Epoch 38, batch 1700, train_loss[loss=2.873, NarTop10Accuracy=0.7458, over 6213.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.7138, over 5902.33 frames. ], batch size: 13, lr: 2.23e-03 +2024-08-06 23:00:43,764 INFO [trainer.py:765] (6/8) Epoch 38, batch 1800, train_loss[loss=3.133, NarTop10Accuracy=0.6893, over 7050.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7153, over 5962.08 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 23:01:10,192 INFO [trainer.py:765] (6/8) Epoch 38, batch 1900, train_loss[loss=3.442, NarTop10Accuracy=0.6355, over 6081.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7147, over 6022.31 frames. ], batch size: 51, lr: 2.23e-03 +2024-08-06 23:01:35,683 INFO [trainer.py:765] (6/8) Epoch 38, batch 2000, train_loss[loss=3.304, NarTop10Accuracy=0.6668, over 6039.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7157, over 5987.17 frames. ], batch size: 52, lr: 2.23e-03 +2024-08-06 23:02:01,050 INFO [trainer.py:765] (6/8) Epoch 38, batch 2100, train_loss[loss=2.847, NarTop10Accuracy=0.7505, over 4905.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7174, over 5976.48 frames. ], batch size: 5, lr: 2.23e-03 +2024-08-06 23:02:26,315 INFO [trainer.py:765] (6/8) Epoch 38, batch 2200, train_loss[loss=2.727, NarTop10Accuracy=0.7791, over 7329.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7176, over 6020.98 frames. ], batch size: 31, lr: 2.23e-03 +2024-08-06 23:02:51,420 INFO [trainer.py:765] (6/8) Epoch 38, batch 2300, train_loss[loss=2.807, NarTop10Accuracy=0.7629, over 5781.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7165, over 6034.18 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 23:03:16,349 INFO [trainer.py:765] (6/8) Epoch 38, batch 2400, train_loss[loss=2.684, NarTop10Accuracy=0.7859, over 5190.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.719, over 5797.06 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:39,823 INFO [trainer.py:765] (6/8) Epoch 38, batch 2500, train_loss[loss=3.229, NarTop10Accuracy=0.6767, over 4986.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7231, over 5466.14 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:59,661 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 23:04:58,940 INFO [trainer.py:765] (6/8) Epoch 39, batch 100, train_loss[loss=3.227, NarTop10Accuracy=0.6815, over 7251.00 frames. ], tot_loss[loss=2.976, NarTop10Accuracy=0.731, over 2372.46 frames. ], batch size: 31, lr: 2.19e-03 +2024-08-06 23:05:03,468 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 23:05:11,563 INFO [trainer.py:811] (6/8) Epoch 39, validation: loss=2.9, NarTop10Accuracy=0.7445, over 1905321.00 frames. +2024-08-06 23:05:11,564 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 23:05:12,137 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 2.316e+02 2.500e+02 2.688e+02 4.683e+02, threshold=5.001e+02, percent-clipped=0.0 +2024-08-06 23:05:40,163 INFO [trainer.py:765] (6/8) Epoch 39, batch 200, train_loss[loss=2.77, NarTop10Accuracy=0.7739, over 6819.00 frames. ], tot_loss[loss=2.995, NarTop10Accuracy=0.7271, over 3858.76 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 23:06:17,293 INFO [trainer.py:765] (6/8) Epoch 39, batch 300, train_loss[loss=2.983, NarTop10Accuracy=0.7278, over 6966.00 frames. ], tot_loss[loss=2.99, NarTop10Accuracy=0.7276, over 4666.83 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 23:06:48,275 INFO [trainer.py:765] (6/8) Epoch 39, batch 400, train_loss[loss=2.919, NarTop10Accuracy=0.7413, over 5262.00 frames. ], tot_loss[loss=2.981, NarTop10Accuracy=0.7292, over 5098.07 frames. ], batch size: 7, lr: 2.19e-03 +2024-08-06 23:07:19,175 INFO [trainer.py:765] (6/8) Epoch 39, batch 500, train_loss[loss=3.402, NarTop10Accuracy=0.6376, over 6036.00 frames. ], tot_loss[loss=2.996, NarTop10Accuracy=0.726, over 5360.48 frames. ], batch size: 11, lr: 2.19e-03 +2024-08-06 23:07:52,563 INFO [trainer.py:765] (6/8) Epoch 39, batch 600, train_loss[loss=2.812, NarTop10Accuracy=0.7649, over 5610.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7234, over 5636.22 frames. ], batch size: 9, lr: 2.19e-03 +2024-08-06 23:08:33,694 INFO [trainer.py:765] (6/8) Epoch 39, batch 700, train_loss[loss=3.347, NarTop10Accuracy=0.6572, over 5154.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7207, over 5706.52 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:09:05,861 INFO [trainer.py:765] (6/8) Epoch 39, batch 800, train_loss[loss=2.814, NarTop10Accuracy=0.7623, over 4305.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7204, over 5787.68 frames. ], batch size: 5, lr: 2.18e-03 +2024-08-06 23:09:38,865 INFO [trainer.py:765] (6/8) Epoch 39, batch 900, train_loss[loss=3.303, NarTop10Accuracy=0.6618, over 6663.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7216, over 5818.56 frames. ], batch size: 14, lr: 2.18e-03 +2024-08-06 23:10:18,460 INFO [trainer.py:765] (6/8) Epoch 39, batch 1000, train_loss[loss=2.859, NarTop10Accuracy=0.7566, over 6222.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7224, over 5909.29 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 23:10:53,934 INFO [trainer.py:765] (6/8) Epoch 39, batch 1100, train_loss[loss=2.854, NarTop10Accuracy=0.7524, over 6813.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.718, over 5934.94 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 23:11:27,822 INFO [trainer.py:765] (6/8) Epoch 39, batch 1200, train_loss[loss=2.928, NarTop10Accuracy=0.7437, over 7161.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7217, over 5928.40 frames. ], batch size: 31, lr: 2.18e-03 +2024-08-06 23:12:07,252 INFO [trainer.py:765] (6/8) Epoch 39, batch 1300, train_loss[loss=2.741, NarTop10Accuracy=0.7781, over 5073.00 frames. ], tot_loss[loss=3.003, NarTop10Accuracy=0.7244, over 6006.87 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:12:39,301 INFO [trainer.py:765] (6/8) Epoch 39, batch 1400, train_loss[loss=2.924, NarTop10Accuracy=0.736, over 5958.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7225, over 6021.92 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 23:13:09,756 INFO [trainer.py:765] (6/8) Epoch 39, batch 1500, train_loss[loss=3.615, NarTop10Accuracy=0.6038, over 6357.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7225, over 5954.15 frames. ], batch size: 50, lr: 2.18e-03 +2024-08-06 23:13:37,586 INFO [trainer.py:765] (6/8) Epoch 39, batch 1600, train_loss[loss=2.864, NarTop10Accuracy=0.7472, over 6930.00 frames. ], tot_loss[loss=3.004, NarTop10Accuracy=0.7243, over 5931.97 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:04,219 INFO [trainer.py:765] (6/8) Epoch 39, batch 1700, train_loss[loss=3.39, NarTop10Accuracy=0.6503, over 6156.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7184, over 5900.92 frames. ], batch size: 13, lr: 2.17e-03 +2024-08-06 23:14:30,767 INFO [trainer.py:765] (6/8) Epoch 39, batch 1800, train_loss[loss=2.864, NarTop10Accuracy=0.7569, over 7170.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.718, over 5981.18 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:57,179 INFO [trainer.py:765] (6/8) Epoch 39, batch 1900, train_loss[loss=3.002, NarTop10Accuracy=0.7307, over 6312.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7159, over 6026.16 frames. ], batch size: 51, lr: 2.17e-03 +2024-08-06 23:15:22,750 INFO [trainer.py:765] (6/8) Epoch 39, batch 2000, train_loss[loss=3.386, NarTop10Accuracy=0.6476, over 6645.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.72, over 5982.67 frames. ], batch size: 51, lr: 2.17e-03 +2024-08-06 23:15:48,060 INFO [trainer.py:765] (6/8) Epoch 39, batch 2100, train_loss[loss=3.239, NarTop10Accuracy=0.6744, over 3882.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7204, over 5962.40 frames. ], batch size: 4, lr: 2.17e-03 +2024-08-06 23:15:51,870 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 23:16:02,156 INFO [trainer.py:811] (6/8) Epoch 39, validation: loss=2.85, NarTop10Accuracy=0.7552, over 1905321.00 frames. +2024-08-06 23:16:02,156 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 23:16:02,645 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.369e+02 2.530e+02 2.720e+02 6.127e+02, threshold=5.059e+02, percent-clipped=0.2 +2024-08-06 23:16:23,652 INFO [trainer.py:765] (6/8) Epoch 39, batch 2200, train_loss[loss=3.225, NarTop10Accuracy=0.6891, over 7137.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7194, over 6012.78 frames. ], batch size: 31, lr: 2.17e-03 +2024-08-06 23:16:48,847 INFO [trainer.py:765] (6/8) Epoch 39, batch 2300, train_loss[loss=2.54, NarTop10Accuracy=0.8173, over 5643.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7175, over 6030.73 frames. ], batch size: 9, lr: 2.17e-03 +2024-08-06 23:17:13,136 INFO [trainer.py:765] (6/8) Epoch 39, batch 2400, train_loss[loss=2.675, NarTop10Accuracy=0.7983, over 5238.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7233, over 5776.75 frames. ], batch size: 7, lr: 2.17e-03 +2024-08-06 23:17:36,712 INFO [trainer.py:765] (6/8) Epoch 39, batch 2500, train_loss[loss=2.96, NarTop10Accuracy=0.7377, over 5148.00 frames. ], tot_loss[loss=2.989, NarTop10Accuracy=0.727, over 5463.48 frames. ], batch size: 7, lr: 2.16e-03 +2024-08-06 23:17:56,481 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 23:18:48,946 INFO [trainer.py:765] (6/8) Epoch 40, batch 100, train_loss[loss=2.992, NarTop10Accuracy=0.7267, over 7323.00 frames. ], tot_loss[loss=3.006, NarTop10Accuracy=0.725, over 2369.38 frames. ], batch size: 31, lr: 2.14e-03 +2024-08-06 23:19:23,035 INFO [trainer.py:765] (6/8) Epoch 40, batch 200, train_loss[loss=2.826, NarTop10Accuracy=0.7586, over 6672.00 frames. ], tot_loss[loss=3.001, NarTop10Accuracy=0.726, over 3863.52 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 23:19:57,187 INFO [trainer.py:765] (6/8) Epoch 40, batch 300, train_loss[loss=2.739, NarTop10Accuracy=0.7779, over 7113.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7232, over 4653.26 frames. ], batch size: 22, lr: 2.13e-03 +2024-08-06 23:20:30,182 INFO [trainer.py:765] (6/8) Epoch 40, batch 400, train_loss[loss=2.84, NarTop10Accuracy=0.7514, over 5445.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.723, over 5124.71 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 23:21:00,250 INFO [trainer.py:765] (6/8) Epoch 40, batch 500, train_loss[loss=2.633, NarTop10Accuracy=0.7876, over 6048.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.723, over 5386.32 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 23:21:34,881 INFO [trainer.py:765] (6/8) Epoch 40, batch 600, train_loss[loss=2.96, NarTop10Accuracy=0.7416, over 5748.00 frames. ], tot_loss[loss=3.002, NarTop10Accuracy=0.7248, over 5647.05 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 23:22:11,097 INFO [trainer.py:765] (6/8) Epoch 40, batch 700, train_loss[loss=3.017, NarTop10Accuracy=0.7359, over 5190.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7238, over 5727.68 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:22:44,755 INFO [trainer.py:765] (6/8) Epoch 40, batch 800, train_loss[loss=2.646, NarTop10Accuracy=0.7968, over 5253.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7203, over 5785.13 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:23:16,636 INFO [trainer.py:765] (6/8) Epoch 40, batch 900, train_loss[loss=3.327, NarTop10Accuracy=0.6602, over 6186.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7204, over 5789.27 frames. ], batch size: 13, lr: 2.13e-03 +2024-08-06 23:23:55,591 INFO [trainer.py:765] (6/8) Epoch 40, batch 1000, train_loss[loss=3.255, NarTop10Accuracy=0.6706, over 6177.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7199, over 5892.79 frames. ], batch size: 13, lr: 2.13e-03 +2024-08-06 23:24:30,208 INFO [trainer.py:765] (6/8) Epoch 40, batch 1100, train_loss[loss=2.729, NarTop10Accuracy=0.7792, over 6849.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7209, over 5933.04 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 23:25:03,090 INFO [trainer.py:765] (6/8) Epoch 40, batch 1200, train_loss[loss=2.885, NarTop10Accuracy=0.7489, over 7473.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.722, over 5909.94 frames. ], batch size: 31, lr: 2.12e-03 +2024-08-06 23:25:41,842 INFO [trainer.py:765] (6/8) Epoch 40, batch 1300, train_loss[loss=2.741, NarTop10Accuracy=0.7833, over 5004.00 frames. ], tot_loss[loss=3.011, NarTop10Accuracy=0.7229, over 5987.59 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 23:26:13,384 INFO [trainer.py:765] (6/8) Epoch 40, batch 1400, train_loss[loss=2.757, NarTop10Accuracy=0.7715, over 6159.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7207, over 6011.54 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 23:26:43,377 INFO [trainer.py:765] (6/8) Epoch 40, batch 1500, train_loss[loss=3.284, NarTop10Accuracy=0.6697, over 5988.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7234, over 5948.77 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:26:54,419 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 23:27:02,676 INFO [trainer.py:811] (6/8) Epoch 40, validation: loss=2.86, NarTop10Accuracy=0.7522, over 1905321.00 frames. +2024-08-06 23:27:02,677 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30374MB +2024-08-06 23:27:03,156 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.329e+02 2.511e+02 2.723e+02 1.241e+03, threshold=5.022e+02, percent-clipped=0.2 +2024-08-06 23:27:19,382 INFO [trainer.py:765] (6/8) Epoch 40, batch 1600, train_loss[loss=2.914, NarTop10Accuracy=0.7456, over 7050.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7216, over 5932.89 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:27:46,056 INFO [trainer.py:765] (6/8) Epoch 40, batch 1700, train_loss[loss=3.251, NarTop10Accuracy=0.6697, over 6609.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.721, over 5916.77 frames. ], batch size: 14, lr: 2.12e-03 +2024-08-06 23:28:12,579 INFO [trainer.py:765] (6/8) Epoch 40, batch 1800, train_loss[loss=3.029, NarTop10Accuracy=0.7215, over 7074.00 frames. ], tot_loss[loss=3.003, NarTop10Accuracy=0.7244, over 5993.07 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:28:38,909 INFO [trainer.py:765] (6/8) Epoch 40, batch 1900, train_loss[loss=3.103, NarTop10Accuracy=0.704, over 6453.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7233, over 6025.05 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:04,444 INFO [trainer.py:765] (6/8) Epoch 40, batch 2000, train_loss[loss=3.597, NarTop10Accuracy=0.6041, over 6372.00 frames. ], tot_loss[loss=3.007, NarTop10Accuracy=0.7234, over 6007.21 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:29,750 INFO [trainer.py:765] (6/8) Epoch 40, batch 2100, train_loss[loss=2.659, NarTop10Accuracy=0.794, over 4896.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7233, over 5988.10 frames. ], batch size: 5, lr: 2.11e-03 +2024-08-06 23:29:54,940 INFO [trainer.py:765] (6/8) Epoch 40, batch 2200, train_loss[loss=3.252, NarTop10Accuracy=0.6788, over 7203.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7202, over 6013.90 frames. ], batch size: 31, lr: 2.11e-03 +2024-08-06 23:30:20,012 INFO [trainer.py:765] (6/8) Epoch 40, batch 2300, train_loss[loss=2.954, NarTop10Accuracy=0.7343, over 5679.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.719, over 6010.38 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 23:30:44,296 INFO [trainer.py:765] (6/8) Epoch 40, batch 2400, train_loss[loss=2.799, NarTop10Accuracy=0.7685, over 5148.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7206, over 5761.50 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:07,738 INFO [trainer.py:765] (6/8) Epoch 40, batch 2500, train_loss[loss=2.996, NarTop10Accuracy=0.7235, over 5238.00 frames. ], tot_loss[loss=2.982, NarTop10Accuracy=0.7287, over 5463.32 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:27,899 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 23:31:27,901 INFO [trainer.py:1069] (6/8) Done! diff --git a/libritts-r/log/log-train-2024-08-06-14-23-41-7 b/libritts-r/log/log-train-2024-08-06-14-23-41-7 new file mode 100644 index 0000000000000000000000000000000000000000..822677b4423d702eee3712f5b62d32b02d9387da --- /dev/null +++ b/libritts-r/log/log-train-2024-08-06-14-23-41-7 @@ -0,0 +1,1260 @@ +2024-08-06 14:23:41,780 INFO [trainer.py:870] (7/8) Training started +2024-08-06 14:23:41,781 INFO [trainer.py:889] (7/8) Device: cuda:7 +2024-08-06 14:23:41,781 INFO [trainer.py:890] (7/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': None, 'icefall-git-sha1': None, 'icefall-git-date': None, 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6867463', 'IP address': '0.104.202.7'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 100000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 14:23:41,781 INFO [trainer.py:892] (7/8) About to create model +2024-08-06 14:23:42,502 INFO [trainer.py:899] (7/8) Number of model parameters: 367386628 +2024-08-06 14:23:42,503 INFO [checkpoint.py:112] (7/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 14:23:47,405 INFO [trainer.py:914] (7/8) Using DDP +2024-08-06 14:23:49,635 INFO [datamodule.py:427] (7/8) About to get train cuts +2024-08-06 14:23:49,638 INFO [datamodule.py:434] (7/8) About to get dev cuts +2024-08-06 14:23:49,639 INFO [datamodule.py:292] (7/8) Disable SpecAugment +2024-08-06 14:23:49,639 INFO [datamodule.py:294] (7/8) About to create train dataset +2024-08-06 14:23:49,642 INFO [datamodule.py:323] (7/8) Using DynamicBucketingSampler +2024-08-06 14:23:50,260 INFO [datamodule.py:344] (7/8) About to create train dataloader +2024-08-06 14:23:50,260 INFO [datamodule.py:367] (7/8) About to create dev dataset +2024-08-06 14:23:50,593 INFO [datamodule.py:388] (7/8) About to create dev dataloader +2024-08-06 14:24:38,250 INFO [trainer.py:765] (7/8) Epoch 1, batch 100, train_loss[loss=103.5, NarTop10Accuracy=0.02183, over 7275.00 frames. ], tot_loss[loss=74.16, NarTop10Accuracy=0.04611, over 2357.02 frames. ], batch size: 31, lr: 2.25e-02 +2024-08-06 14:25:07,520 INFO [trainer.py:765] (7/8) Epoch 1, batch 200, train_loss[loss=143.3, NarTop10Accuracy=0.01556, over 6918.00 frames. ], tot_loss[loss=97.4, NarTop10Accuracy=0.04159, over 3849.95 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 14:25:37,111 INFO [trainer.py:765] (7/8) Epoch 1, batch 300, train_loss[loss=104.7, NarTop10Accuracy=0.0253, over 7089.00 frames. ], tot_loss[loss=84.96, NarTop10Accuracy=0.04262, over 4637.01 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 14:26:07,483 INFO [trainer.py:765] (7/8) Epoch 1, batch 400, train_loss[loss=52.83, NarTop10Accuracy=0.02097, over 5247.00 frames. ], tot_loss[loss=67.82, NarTop10Accuracy=0.04698, over 5084.36 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 14:26:35,358 INFO [trainer.py:765] (7/8) Epoch 1, batch 500, train_loss[loss=14.57, NarTop10Accuracy=0.02305, over 5964.00 frames. ], tot_loss[loss=48.99, NarTop10Accuracy=0.05073, over 5372.44 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 14:27:04,001 INFO [trainer.py:765] (7/8) Epoch 1, batch 600, train_loss[loss=6.292, NarTop10Accuracy=0.1426, over 5664.00 frames. ], tot_loss[loss=33.4, NarTop10Accuracy=0.05573, over 5644.19 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 14:27:39,491 INFO [trainer.py:765] (7/8) Epoch 1, batch 700, train_loss[loss=6.78, NarTop10Accuracy=0.1415, over 4347.00 frames. ], tot_loss[loss=23.36, NarTop10Accuracy=0.06433, over 5726.15 frames. ], batch size: 5, lr: 2.99e-02 +2024-08-06 14:28:08,832 INFO [trainer.py:765] (7/8) Epoch 1, batch 800, train_loss[loss=6.497, NarTop10Accuracy=0.1292, over 5106.00 frames. ], tot_loss[loss=17.1, NarTop10Accuracy=0.08539, over 5802.30 frames. ], batch size: 6, lr: 2.98e-02 +2024-08-06 14:28:36,758 INFO [trainer.py:765] (7/8) Epoch 1, batch 900, train_loss[loss=5.749, NarTop10Accuracy=0.1873, over 6552.00 frames. ], tot_loss[loss=12.76, NarTop10Accuracy=0.1136, over 5809.49 frames. ], batch size: 14, lr: 2.98e-02 +2024-08-06 14:29:12,587 INFO [trainer.py:765] (7/8) Epoch 1, batch 1000, train_loss[loss=5.706, NarTop10Accuracy=0.1899, over 6186.00 frames. ], tot_loss[loss=10.09, NarTop10Accuracy=0.1334, over 5911.50 frames. ], batch size: 13, lr: 2.97e-02 +2024-08-06 14:29:42,826 INFO [trainer.py:765] (7/8) Epoch 1, batch 1100, train_loss[loss=5.693, NarTop10Accuracy=0.1925, over 6918.00 frames. ], tot_loss[loss=8.407, NarTop10Accuracy=0.1515, over 5957.99 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 14:30:11,469 INFO [trainer.py:765] (7/8) Epoch 1, batch 1200, train_loss[loss=5.936, NarTop10Accuracy=0.1677, over 7143.00 frames. ], tot_loss[loss=7.343, NarTop10Accuracy=0.1717, over 5930.53 frames. ], batch size: 31, lr: 2.96e-02 +2024-08-06 14:30:48,748 INFO [trainer.py:765] (7/8) Epoch 1, batch 1300, train_loss[loss=5.253, NarTop10Accuracy=0.2725, over 4968.00 frames. ], tot_loss[loss=6.673, NarTop10Accuracy=0.1876, over 5993.47 frames. ], batch size: 6, lr: 2.95e-02 +2024-08-06 14:31:18,144 INFO [trainer.py:765] (7/8) Epoch 1, batch 1400, train_loss[loss=5.685, NarTop10Accuracy=0.1904, over 6060.00 frames. ], tot_loss[loss=6.247, NarTop10Accuracy=0.1979, over 6003.05 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 14:31:46,026 INFO [trainer.py:765] (7/8) Epoch 1, batch 1500, train_loss[loss=5.648, NarTop10Accuracy=0.209, over 5748.00 frames. ], tot_loss[loss=5.969, NarTop10Accuracy=0.2095, over 5936.07 frames. ], batch size: 50, lr: 2.94e-02 +2024-08-06 14:32:13,692 INFO [trainer.py:765] (7/8) Epoch 1, batch 1600, train_loss[loss=5.588, NarTop10Accuracy=0.215, over 6840.00 frames. ], tot_loss[loss=5.788, NarTop10Accuracy=0.218, over 5928.93 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 14:32:40,199 INFO [trainer.py:765] (7/8) Epoch 1, batch 1700, train_loss[loss=5.307, NarTop10Accuracy=0.281, over 6201.00 frames. ], tot_loss[loss=5.669, NarTop10Accuracy=0.2244, over 5940.57 frames. ], batch size: 13, lr: 2.92e-02 +2024-08-06 14:33:06,500 INFO [trainer.py:765] (7/8) Epoch 1, batch 1800, train_loss[loss=5.657, NarTop10Accuracy=0.1914, over 6969.00 frames. ], tot_loss[loss=5.581, NarTop10Accuracy=0.232, over 5985.38 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 14:33:32,625 INFO [trainer.py:765] (7/8) Epoch 1, batch 1900, train_loss[loss=5.803, NarTop10Accuracy=0.1758, over 5970.00 frames. ], tot_loss[loss=5.516, NarTop10Accuracy=0.2392, over 6028.30 frames. ], batch size: 50, lr: 2.90e-02 +2024-08-06 14:33:58,015 INFO [trainer.py:765] (7/8) Epoch 1, batch 2000, train_loss[loss=5.538, NarTop10Accuracy=0.2249, over 6180.00 frames. ], tot_loss[loss=5.453, NarTop10Accuracy=0.2481, over 5974.13 frames. ], batch size: 52, lr: 2.89e-02 +2024-08-06 14:33:58,016 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 14:34:06,103 INFO [trainer.py:811] (7/8) Epoch 1, validation: loss=5.397, NarTop10Accuracy=0.2581, over 1905321.00 frames. +2024-08-06 14:34:06,104 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 26703MB +2024-08-06 14:34:06,612 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 4.749e+01 2.278e+02 7.300e+02 1.664e+04 7.177e+05, threshold=1.460e+03, percent-clipped=0.0 +2024-08-06 14:34:32,062 INFO [trainer.py:765] (7/8) Epoch 1, batch 2100, train_loss[loss=5.309, NarTop10Accuracy=0.2659, over 4800.00 frames. ], tot_loss[loss=5.385, NarTop10Accuracy=0.2597, over 5967.37 frames. ], batch size: 5, lr: 2.88e-02 +2024-08-06 14:34:57,304 INFO [trainer.py:765] (7/8) Epoch 1, batch 2200, train_loss[loss=5.409, NarTop10Accuracy=0.2639, over 7545.00 frames. ], tot_loss[loss=5.345, NarTop10Accuracy=0.266, over 6007.51 frames. ], batch size: 31, lr: 2.87e-02 +2024-08-06 14:35:22,456 INFO [trainer.py:765] (7/8) Epoch 1, batch 2300, train_loss[loss=5.259, NarTop10Accuracy=0.2727, over 5718.00 frames. ], tot_loss[loss=5.331, NarTop10Accuracy=0.2684, over 6013.97 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 14:35:46,816 INFO [trainer.py:765] (7/8) Epoch 1, batch 2400, train_loss[loss=5.347, NarTop10Accuracy=0.2648, over 5049.00 frames. ], tot_loss[loss=5.278, NarTop10Accuracy=0.2783, over 5761.50 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 14:36:10,408 INFO [trainer.py:765] (7/8) Epoch 1, batch 2500, train_loss[loss=4.847, NarTop10Accuracy=0.3618, over 5199.00 frames. ], tot_loss[loss=5.217, NarTop10Accuracy=0.2889, over 5448.35 frames. ], batch size: 7, lr: 2.84e-02 +2024-08-06 14:36:31,241 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 14:37:29,670 INFO [trainer.py:765] (7/8) Epoch 2, batch 100, train_loss[loss=4.949, NarTop10Accuracy=0.3522, over 7461.00 frames. ], tot_loss[loss=5.194, NarTop10Accuracy=0.2945, over 2368.21 frames. ], batch size: 32, lr: 2.77e-02 +2024-08-06 14:38:10,016 INFO [trainer.py:765] (7/8) Epoch 2, batch 200, train_loss[loss=5.033, NarTop10Accuracy=0.3297, over 6681.00 frames. ], tot_loss[loss=5.165, NarTop10Accuracy=0.2994, over 3849.30 frames. ], batch size: 17, lr: 2.76e-02 +2024-08-06 14:38:38,299 INFO [trainer.py:765] (7/8) Epoch 2, batch 300, train_loss[loss=5.161, NarTop10Accuracy=0.3015, over 6945.00 frames. ], tot_loss[loss=5.144, NarTop10Accuracy=0.3021, over 4651.14 frames. ], batch size: 22, lr: 2.75e-02 +2024-08-06 14:39:07,000 INFO [trainer.py:765] (7/8) Epoch 2, batch 400, train_loss[loss=4.947, NarTop10Accuracy=0.3375, over 5034.00 frames. ], tot_loss[loss=5.11, NarTop10Accuracy=0.3078, over 5106.76 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 14:39:46,121 INFO [trainer.py:765] (7/8) Epoch 2, batch 500, train_loss[loss=5.028, NarTop10Accuracy=0.326, over 6078.00 frames. ], tot_loss[loss=5.076, NarTop10Accuracy=0.3149, over 5381.89 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 14:40:15,084 INFO [trainer.py:765] (7/8) Epoch 2, batch 600, train_loss[loss=4.928, NarTop10Accuracy=0.342, over 5619.00 frames. ], tot_loss[loss=5.053, NarTop10Accuracy=0.3191, over 5653.02 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 14:40:44,590 INFO [trainer.py:765] (7/8) Epoch 2, batch 700, train_loss[loss=5.016, NarTop10Accuracy=0.3258, over 4920.00 frames. ], tot_loss[loss=5.035, NarTop10Accuracy=0.322, over 5720.62 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 14:41:24,515 INFO [trainer.py:765] (7/8) Epoch 2, batch 800, train_loss[loss=5.147, NarTop10Accuracy=0.2925, over 5196.00 frames. ], tot_loss[loss=5.021, NarTop10Accuracy=0.3245, over 5769.14 frames. ], batch size: 6, lr: 2.69e-02 +2024-08-06 14:41:54,406 INFO [trainer.py:765] (7/8) Epoch 2, batch 900, train_loss[loss=4.723, NarTop10Accuracy=0.382, over 6552.00 frames. ], tot_loss[loss=4.98, NarTop10Accuracy=0.3326, over 5812.00 frames. ], batch size: 14, lr: 2.68e-02 +2024-08-06 14:42:23,903 INFO [trainer.py:765] (7/8) Epoch 2, batch 1000, train_loss[loss=4.744, NarTop10Accuracy=0.3803, over 6279.00 frames. ], tot_loss[loss=4.943, NarTop10Accuracy=0.3396, over 5902.87 frames. ], batch size: 13, lr: 2.66e-02 +2024-08-06 14:42:56,256 INFO [trainer.py:765] (7/8) Epoch 2, batch 1100, train_loss[loss=5.004, NarTop10Accuracy=0.3297, over 6792.00 frames. ], tot_loss[loss=4.931, NarTop10Accuracy=0.3418, over 5936.72 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 14:43:35,187 INFO [trainer.py:765] (7/8) Epoch 2, batch 1200, train_loss[loss=4.809, NarTop10Accuracy=0.3634, over 7329.00 frames. ], tot_loss[loss=4.913, NarTop10Accuracy=0.3449, over 5931.35 frames. ], batch size: 31, lr: 2.64e-02 +2024-08-06 14:44:04,348 INFO [trainer.py:765] (7/8) Epoch 2, batch 1300, train_loss[loss=4.874, NarTop10Accuracy=0.3499, over 5040.00 frames. ], tot_loss[loss=4.867, NarTop10Accuracy=0.3537, over 6000.91 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 14:44:33,728 INFO [trainer.py:765] (7/8) Epoch 2, batch 1400, train_loss[loss=4.878, NarTop10Accuracy=0.3497, over 6090.00 frames. ], tot_loss[loss=4.848, NarTop10Accuracy=0.3572, over 6011.51 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 14:44:40,442 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 14:44:48,506 INFO [trainer.py:811] (7/8) Epoch 2, validation: loss=4.808, NarTop10Accuracy=0.3642, over 1905321.00 frames. +2024-08-06 14:44:48,506 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 26703MB +2024-08-06 14:44:49,204 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 6.328e+01 1.178e+02 1.410e+02 1.789e+02 6.269e+02, threshold=2.821e+02, percent-clipped=0.0 +2024-08-06 14:45:09,806 INFO [trainer.py:765] (7/8) Epoch 2, batch 1500, train_loss[loss=4.873, NarTop10Accuracy=0.3492, over 5802.00 frames. ], tot_loss[loss=4.826, NarTop10Accuracy=0.3616, over 5951.23 frames. ], batch size: 50, lr: 2.60e-02 +2024-08-06 14:45:37,659 INFO [trainer.py:765] (7/8) Epoch 2, batch 1600, train_loss[loss=4.719, NarTop10Accuracy=0.3809, over 7161.00 frames. ], tot_loss[loss=4.798, NarTop10Accuracy=0.3671, over 5933.45 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 14:46:04,368 INFO [trainer.py:765] (7/8) Epoch 2, batch 1700, train_loss[loss=4.798, NarTop10Accuracy=0.3643, over 6657.00 frames. ], tot_loss[loss=4.793, NarTop10Accuracy=0.3678, over 5918.01 frames. ], batch size: 14, lr: 2.58e-02 +2024-08-06 14:46:31,034 INFO [trainer.py:765] (7/8) Epoch 2, batch 1800, train_loss[loss=4.672, NarTop10Accuracy=0.39, over 7017.00 frames. ], tot_loss[loss=4.771, NarTop10Accuracy=0.3722, over 5971.63 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 14:46:57,532 INFO [trainer.py:765] (7/8) Epoch 2, batch 1900, train_loss[loss=4.749, NarTop10Accuracy=0.3719, over 6399.00 frames. ], tot_loss[loss=4.743, NarTop10Accuracy=0.3779, over 6018.52 frames. ], batch size: 50, lr: 2.55e-02 +2024-08-06 14:47:23,234 INFO [trainer.py:765] (7/8) Epoch 2, batch 2000, train_loss[loss=4.861, NarTop10Accuracy=0.362, over 5814.00 frames. ], tot_loss[loss=4.721, NarTop10Accuracy=0.3825, over 5995.37 frames. ], batch size: 50, lr: 2.54e-02 +2024-08-06 14:47:48,588 INFO [trainer.py:765] (7/8) Epoch 2, batch 2100, train_loss[loss=4.703, NarTop10Accuracy=0.3802, over 4101.00 frames. ], tot_loss[loss=4.711, NarTop10Accuracy=0.3841, over 5968.96 frames. ], batch size: 4, lr: 2.53e-02 +2024-08-06 14:48:13,765 INFO [trainer.py:765] (7/8) Epoch 2, batch 2200, train_loss[loss=4.628, NarTop10Accuracy=0.3973, over 7197.00 frames. ], tot_loss[loss=4.677, NarTop10Accuracy=0.3904, over 6003.98 frames. ], batch size: 31, lr: 2.51e-02 +2024-08-06 14:48:38,951 INFO [trainer.py:765] (7/8) Epoch 2, batch 2300, train_loss[loss=4.781, NarTop10Accuracy=0.3589, over 5640.00 frames. ], tot_loss[loss=4.685, NarTop10Accuracy=0.3886, over 6021.34 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 14:49:03,319 INFO [trainer.py:765] (7/8) Epoch 2, batch 2400, train_loss[loss=4.532, NarTop10Accuracy=0.4153, over 5103.00 frames. ], tot_loss[loss=4.641, NarTop10Accuracy=0.3967, over 5765.71 frames. ], batch size: 7, lr: 2.49e-02 +2024-08-06 14:49:26,867 INFO [trainer.py:765] (7/8) Epoch 2, batch 2500, train_loss[loss=4.678, NarTop10Accuracy=0.3882, over 5067.00 frames. ], tot_loss[loss=4.612, NarTop10Accuracy=0.4023, over 5475.98 frames. ], batch size: 7, lr: 2.48e-02 +2024-08-06 14:49:46,816 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 14:50:51,117 INFO [trainer.py:765] (7/8) Epoch 3, batch 100, train_loss[loss=4.718, NarTop10Accuracy=0.3759, over 7464.00 frames. ], tot_loss[loss=4.585, NarTop10Accuracy=0.4079, over 2368.24 frames. ], batch size: 31, lr: 2.36e-02 +2024-08-06 14:51:20,388 INFO [trainer.py:765] (7/8) Epoch 3, batch 200, train_loss[loss=4.751, NarTop10Accuracy=0.3717, over 6906.00 frames. ], tot_loss[loss=4.547, NarTop10Accuracy=0.4153, over 3860.36 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 14:51:50,953 INFO [trainer.py:765] (7/8) Epoch 3, batch 300, train_loss[loss=4.827, NarTop10Accuracy=0.3605, over 6906.00 frames. ], tot_loss[loss=4.521, NarTop10Accuracy=0.4205, over 4650.83 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 14:52:32,359 INFO [trainer.py:765] (7/8) Epoch 3, batch 400, train_loss[loss=4.758, NarTop10Accuracy=0.3733, over 5100.00 frames. ], tot_loss[loss=4.504, NarTop10Accuracy=0.4238, over 5089.34 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 14:53:00,680 INFO [trainer.py:765] (7/8) Epoch 3, batch 500, train_loss[loss=4.335, NarTop10Accuracy=0.4548, over 6504.00 frames. ], tot_loss[loss=4.495, NarTop10Accuracy=0.4256, over 5382.54 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 14:53:29,551 INFO [trainer.py:765] (7/8) Epoch 3, batch 600, train_loss[loss=4.186, NarTop10Accuracy=0.4943, over 5646.00 frames. ], tot_loss[loss=4.477, NarTop10Accuracy=0.4296, over 5649.23 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 14:54:12,465 INFO [trainer.py:765] (7/8) Epoch 3, batch 700, train_loss[loss=4.455, NarTop10Accuracy=0.4348, over 4269.00 frames. ], tot_loss[loss=4.45, NarTop10Accuracy=0.4346, over 5721.19 frames. ], batch size: 5, lr: 2.29e-02 +2024-08-06 14:54:44,785 INFO [trainer.py:765] (7/8) Epoch 3, batch 800, train_loss[loss=4.282, NarTop10Accuracy=0.467, over 5262.00 frames. ], tot_loss[loss=4.426, NarTop10Accuracy=0.4397, over 5774.01 frames. ], batch size: 6, lr: 2.28e-02 +2024-08-06 14:54:58,684 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 14:55:06,655 INFO [trainer.py:811] (7/8) Epoch 3, validation: loss=4.276, NarTop10Accuracy=0.4689, over 1905321.00 frames. +2024-08-06 14:55:06,656 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 26703MB +2024-08-06 14:55:07,183 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 8.443e+01 1.396e+02 1.639e+02 2.017e+02 7.124e+02, threshold=3.277e+02, percent-clipped=4.5 +2024-08-06 14:55:21,052 INFO [trainer.py:765] (7/8) Epoch 3, batch 900, train_loss[loss=4.25, NarTop10Accuracy=0.4737, over 6186.00 frames. ], tot_loss[loss=4.399, NarTop10Accuracy=0.4454, over 5787.96 frames. ], batch size: 13, lr: 2.26e-02 +2024-08-06 14:56:04,958 INFO [trainer.py:765] (7/8) Epoch 3, batch 1000, train_loss[loss=4.297, NarTop10Accuracy=0.4703, over 6312.00 frames. ], tot_loss[loss=4.379, NarTop10Accuracy=0.4491, over 5888.54 frames. ], batch size: 13, lr: 2.25e-02 +2024-08-06 14:56:37,300 INFO [trainer.py:765] (7/8) Epoch 3, batch 1100, train_loss[loss=4.542, NarTop10Accuracy=0.4187, over 6738.00 frames. ], tot_loss[loss=4.354, NarTop10Accuracy=0.4538, over 5928.27 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 14:57:06,377 INFO [trainer.py:765] (7/8) Epoch 3, batch 1200, train_loss[loss=4.429, NarTop10Accuracy=0.4404, over 7449.00 frames. ], tot_loss[loss=4.337, NarTop10Accuracy=0.4566, over 5923.02 frames. ], batch size: 31, lr: 2.23e-02 +2024-08-06 14:57:51,630 INFO [trainer.py:765] (7/8) Epoch 3, batch 1300, train_loss[loss=4.304, NarTop10Accuracy=0.4529, over 5055.00 frames. ], tot_loss[loss=4.308, NarTop10Accuracy=0.4621, over 5985.69 frames. ], batch size: 6, lr: 2.22e-02 +2024-08-06 14:58:22,899 INFO [trainer.py:765] (7/8) Epoch 3, batch 1400, train_loss[loss=4.16, NarTop10Accuracy=0.496, over 6102.00 frames. ], tot_loss[loss=4.297, NarTop10Accuracy=0.4642, over 6010.56 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 14:58:50,855 INFO [trainer.py:765] (7/8) Epoch 3, batch 1500, train_loss[loss=4.293, NarTop10Accuracy=0.4642, over 6270.00 frames. ], tot_loss[loss=4.278, NarTop10Accuracy=0.4675, over 5962.08 frames. ], batch size: 51, lr: 2.20e-02 +2024-08-06 14:59:18,715 INFO [trainer.py:765] (7/8) Epoch 3, batch 1600, train_loss[loss=4.158, NarTop10Accuracy=0.4883, over 7050.00 frames. ], tot_loss[loss=4.254, NarTop10Accuracy=0.4722, over 5947.29 frames. ], batch size: 23, lr: 2.19e-02 +2024-08-06 14:59:45,952 INFO [trainer.py:765] (7/8) Epoch 3, batch 1700, train_loss[loss=4.121, NarTop10Accuracy=0.5042, over 6747.00 frames. ], tot_loss[loss=4.228, NarTop10Accuracy=0.4774, over 5950.28 frames. ], batch size: 14, lr: 2.18e-02 +2024-08-06 15:00:12,498 INFO [trainer.py:765] (7/8) Epoch 3, batch 1800, train_loss[loss=3.952, NarTop10Accuracy=0.5365, over 6996.00 frames. ], tot_loss[loss=4.207, NarTop10Accuracy=0.4816, over 5996.67 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 15:00:38,949 INFO [trainer.py:765] (7/8) Epoch 3, batch 1900, train_loss[loss=4.696, NarTop10Accuracy=0.3814, over 5850.00 frames. ], tot_loss[loss=4.194, NarTop10Accuracy=0.4848, over 6034.34 frames. ], batch size: 51, lr: 2.16e-02 +2024-08-06 15:01:04,606 INFO [trainer.py:765] (7/8) Epoch 3, batch 2000, train_loss[loss=4.539, NarTop10Accuracy=0.4137, over 6354.00 frames. ], tot_loss[loss=4.172, NarTop10Accuracy=0.4895, over 6011.68 frames. ], batch size: 50, lr: 2.15e-02 +2024-08-06 15:01:29,898 INFO [trainer.py:765] (7/8) Epoch 3, batch 2100, train_loss[loss=3.732, NarTop10Accuracy=0.5744, over 4746.00 frames. ], tot_loss[loss=4.148, NarTop10Accuracy=0.494, over 5976.48 frames. ], batch size: 5, lr: 2.14e-02 +2024-08-06 15:01:55,182 INFO [trainer.py:765] (7/8) Epoch 3, batch 2200, train_loss[loss=4.032, NarTop10Accuracy=0.5145, over 7209.00 frames. ], tot_loss[loss=4.115, NarTop10Accuracy=0.5009, over 6012.76 frames. ], batch size: 31, lr: 2.13e-02 +2024-08-06 15:02:20,410 INFO [trainer.py:765] (7/8) Epoch 3, batch 2300, train_loss[loss=4.282, NarTop10Accuracy=0.4601, over 5613.00 frames. ], tot_loss[loss=4.128, NarTop10Accuracy=0.4985, over 6034.86 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 15:02:44,663 INFO [trainer.py:765] (7/8) Epoch 3, batch 2400, train_loss[loss=4.15, NarTop10Accuracy=0.485, over 5097.00 frames. ], tot_loss[loss=4.095, NarTop10Accuracy=0.5051, over 5767.93 frames. ], batch size: 7, lr: 2.11e-02 +2024-08-06 15:03:08,234 INFO [trainer.py:765] (7/8) Epoch 3, batch 2500, train_loss[loss=3.706, NarTop10Accuracy=0.5856, over 5820.00 frames. ], tot_loss[loss=4.036, NarTop10Accuracy=0.5168, over 5483.00 frames. ], batch size: 8, lr: 2.10e-02 +2024-08-06 15:03:28,174 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 15:04:28,130 INFO [trainer.py:765] (7/8) Epoch 4, batch 100, train_loss[loss=3.927, NarTop10Accuracy=0.5392, over 7353.00 frames. ], tot_loss[loss=4.026, NarTop10Accuracy=0.5193, over 2364.79 frames. ], batch size: 31, lr: 1.97e-02 +2024-08-06 15:04:59,842 INFO [trainer.py:765] (7/8) Epoch 4, batch 200, train_loss[loss=3.931, NarTop10Accuracy=0.536, over 6729.00 frames. ], tot_loss[loss=4.001, NarTop10Accuracy=0.5249, over 3861.09 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 15:05:27,509 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 15:05:35,694 INFO [trainer.py:811] (7/8) Epoch 4, validation: loss=3.804, NarTop10Accuracy=0.5644, over 1905321.00 frames. +2024-08-06 15:05:35,695 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 26785MB +2024-08-06 15:05:36,238 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.166e+02 1.765e+02 1.975e+02 2.270e+02 5.852e+02, threshold=3.949e+02, percent-clipped=2.8 +2024-08-06 15:05:43,889 INFO [trainer.py:765] (7/8) Epoch 4, batch 300, train_loss[loss=3.744, NarTop10Accuracy=0.5823, over 7143.00 frames. ], tot_loss[loss=3.996, NarTop10Accuracy=0.5255, over 4650.09 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 15:06:16,124 INFO [trainer.py:765] (7/8) Epoch 4, batch 400, train_loss[loss=3.818, NarTop10Accuracy=0.5576, over 5157.00 frames. ], tot_loss[loss=4.01, NarTop10Accuracy=0.5227, over 5098.02 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 15:06:46,473 INFO [trainer.py:765] (7/8) Epoch 4, batch 500, train_loss[loss=4.156, NarTop10Accuracy=0.49, over 6168.00 frames. ], tot_loss[loss=3.985, NarTop10Accuracy=0.5274, over 5371.29 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 15:07:23,817 INFO [trainer.py:765] (7/8) Epoch 4, batch 600, train_loss[loss=3.71, NarTop10Accuracy=0.5924, over 5796.00 frames. ], tot_loss[loss=3.984, NarTop10Accuracy=0.5282, over 5646.29 frames. ], batch size: 9, lr: 1.93e-02 +2024-08-06 15:07:59,001 INFO [trainer.py:765] (7/8) Epoch 4, batch 700, train_loss[loss=4.104, NarTop10Accuracy=0.5056, over 5031.00 frames. ], tot_loss[loss=3.973, NarTop10Accuracy=0.5305, over 5721.10 frames. ], batch size: 6, lr: 1.92e-02 +2024-08-06 15:08:32,429 INFO [trainer.py:765] (7/8) Epoch 4, batch 800, train_loss[loss=3.667, NarTop10Accuracy=0.5867, over 4251.00 frames. ], tot_loss[loss=3.963, NarTop10Accuracy=0.5322, over 5759.15 frames. ], batch size: 5, lr: 1.91e-02 +2024-08-06 15:09:10,689 INFO [trainer.py:765] (7/8) Epoch 4, batch 900, train_loss[loss=3.577, NarTop10Accuracy=0.6132, over 6744.00 frames. ], tot_loss[loss=3.925, NarTop10Accuracy=0.5404, over 5779.11 frames. ], batch size: 14, lr: 1.90e-02 +2024-08-06 15:09:46,076 INFO [trainer.py:765] (7/8) Epoch 4, batch 1000, train_loss[loss=3.641, NarTop10Accuracy=0.6031, over 6567.00 frames. ], tot_loss[loss=3.912, NarTop10Accuracy=0.5427, over 5892.94 frames. ], batch size: 14, lr: 1.89e-02 +2024-08-06 15:10:18,139 INFO [trainer.py:765] (7/8) Epoch 4, batch 1100, train_loss[loss=3.795, NarTop10Accuracy=0.5656, over 6909.00 frames. ], tot_loss[loss=3.905, NarTop10Accuracy=0.5439, over 5923.38 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 15:10:55,075 INFO [trainer.py:765] (7/8) Epoch 4, batch 1200, train_loss[loss=4.307, NarTop10Accuracy=0.4696, over 7392.00 frames. ], tot_loss[loss=3.905, NarTop10Accuracy=0.5434, over 5922.54 frames. ], batch size: 32, lr: 1.88e-02 +2024-08-06 15:11:32,074 INFO [trainer.py:765] (7/8) Epoch 4, batch 1300, train_loss[loss=3.558, NarTop10Accuracy=0.6132, over 5130.00 frames. ], tot_loss[loss=3.863, NarTop10Accuracy=0.5519, over 5989.00 frames. ], batch size: 6, lr: 1.87e-02 +2024-08-06 15:12:05,688 INFO [trainer.py:765] (7/8) Epoch 4, batch 1400, train_loss[loss=3.725, NarTop10Accuracy=0.5846, over 6153.00 frames. ], tot_loss[loss=3.865, NarTop10Accuracy=0.5516, over 6009.16 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 15:12:33,695 INFO [trainer.py:765] (7/8) Epoch 4, batch 1500, train_loss[loss=3.871, NarTop10Accuracy=0.5519, over 6504.00 frames. ], tot_loss[loss=3.863, NarTop10Accuracy=0.5521, over 5951.62 frames. ], batch size: 50, lr: 1.85e-02 +2024-08-06 15:13:01,510 INFO [trainer.py:765] (7/8) Epoch 4, batch 1600, train_loss[loss=3.777, NarTop10Accuracy=0.5704, over 6957.00 frames. ], tot_loss[loss=3.849, NarTop10Accuracy=0.5547, over 5933.40 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 15:13:28,132 INFO [trainer.py:765] (7/8) Epoch 4, batch 1700, train_loss[loss=3.74, NarTop10Accuracy=0.5869, over 6681.00 frames. ], tot_loss[loss=3.823, NarTop10Accuracy=0.56, over 5909.51 frames. ], batch size: 14, lr: 1.84e-02 +2024-08-06 15:13:54,557 INFO [trainer.py:765] (7/8) Epoch 4, batch 1800, train_loss[loss=3.84, NarTop10Accuracy=0.5541, over 7113.00 frames. ], tot_loss[loss=3.828, NarTop10Accuracy=0.5589, over 5983.18 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 15:14:20,998 INFO [trainer.py:765] (7/8) Epoch 4, batch 1900, train_loss[loss=3.893, NarTop10Accuracy=0.5435, over 5700.00 frames. ], tot_loss[loss=3.851, NarTop10Accuracy=0.5542, over 6024.03 frames. ], batch size: 50, lr: 1.82e-02 +2024-08-06 15:14:46,671 INFO [trainer.py:765] (7/8) Epoch 4, batch 2000, train_loss[loss=3.794, NarTop10Accuracy=0.5671, over 6126.00 frames. ], tot_loss[loss=3.826, NarTop10Accuracy=0.5592, over 6001.90 frames. ], batch size: 50, lr: 1.81e-02 +2024-08-06 15:15:11,859 INFO [trainer.py:765] (7/8) Epoch 4, batch 2100, train_loss[loss=3.56, NarTop10Accuracy=0.6139, over 4893.00 frames. ], tot_loss[loss=3.809, NarTop10Accuracy=0.563, over 5971.42 frames. ], batch size: 5, lr: 1.81e-02 +2024-08-06 15:15:37,089 INFO [trainer.py:765] (7/8) Epoch 4, batch 2200, train_loss[loss=3.638, NarTop10Accuracy=0.6055, over 7227.00 frames. ], tot_loss[loss=3.805, NarTop10Accuracy=0.5635, over 6013.24 frames. ], batch size: 31, lr: 1.80e-02 +2024-08-06 15:15:55,089 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 15:16:03,243 INFO [trainer.py:811] (7/8) Epoch 4, validation: loss=3.665, NarTop10Accuracy=0.5912, over 1905321.00 frames. +2024-08-06 15:16:03,243 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 26785MB +2024-08-06 15:16:03,741 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.414e+02 1.889e+02 2.096e+02 2.369e+02 1.168e+03, threshold=4.192e+02, percent-clipped=1.7 +2024-08-06 15:16:10,347 INFO [trainer.py:765] (7/8) Epoch 4, batch 2300, train_loss[loss=3.668, NarTop10Accuracy=0.5994, over 5550.00 frames. ], tot_loss[loss=3.82, NarTop10Accuracy=0.5607, over 6027.18 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 15:16:34,841 INFO [trainer.py:765] (7/8) Epoch 4, batch 2400, train_loss[loss=3.532, NarTop10Accuracy=0.621, over 5166.00 frames. ], tot_loss[loss=3.787, NarTop10Accuracy=0.5676, over 5772.52 frames. ], batch size: 7, lr: 1.79e-02 +2024-08-06 15:16:58,535 INFO [trainer.py:765] (7/8) Epoch 4, batch 2500, train_loss[loss=3.633, NarTop10Accuracy=0.6096, over 5097.00 frames. ], tot_loss[loss=3.773, NarTop10Accuracy=0.5702, over 5474.48 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 15:17:18,638 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 15:18:24,100 INFO [trainer.py:765] (7/8) Epoch 5, batch 100, train_loss[loss=3.498, NarTop10Accuracy=0.6302, over 7143.00 frames. ], tot_loss[loss=3.771, NarTop10Accuracy=0.5701, over 2363.87 frames. ], batch size: 31, lr: 1.66e-02 +2024-08-06 15:18:59,675 INFO [trainer.py:765] (7/8) Epoch 5, batch 200, train_loss[loss=4.205, NarTop10Accuracy=0.4892, over 6942.00 frames. ], tot_loss[loss=3.758, NarTop10Accuracy=0.5735, over 3869.10 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 15:19:32,887 INFO [trainer.py:765] (7/8) Epoch 5, batch 300, train_loss[loss=3.948, NarTop10Accuracy=0.5282, over 7209.00 frames. ], tot_loss[loss=3.735, NarTop10Accuracy=0.5779, over 4659.35 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 15:20:01,655 INFO [trainer.py:765] (7/8) Epoch 5, batch 400, train_loss[loss=3.558, NarTop10Accuracy=0.6101, over 5094.00 frames. ], tot_loss[loss=3.726, NarTop10Accuracy=0.5794, over 5112.63 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 15:20:38,298 INFO [trainer.py:765] (7/8) Epoch 5, batch 500, train_loss[loss=3.993, NarTop10Accuracy=0.5197, over 6051.00 frames. ], tot_loss[loss=3.73, NarTop10Accuracy=0.578, over 5376.42 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 15:21:13,710 INFO [trainer.py:765] (7/8) Epoch 5, batch 600, train_loss[loss=3.73, NarTop10Accuracy=0.5663, over 5745.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.5796, over 5653.77 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 15:21:45,881 INFO [trainer.py:765] (7/8) Epoch 5, batch 700, train_loss[loss=3.599, NarTop10Accuracy=0.6105, over 5127.00 frames. ], tot_loss[loss=3.721, NarTop10Accuracy=0.5806, over 5724.83 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 15:22:24,498 INFO [trainer.py:765] (7/8) Epoch 5, batch 800, train_loss[loss=3.929, NarTop10Accuracy=0.5297, over 4992.00 frames. ], tot_loss[loss=3.705, NarTop10Accuracy=0.5838, over 5786.73 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 15:22:56,783 INFO [trainer.py:765] (7/8) Epoch 5, batch 900, train_loss[loss=3.708, NarTop10Accuracy=0.5849, over 6663.00 frames. ], tot_loss[loss=3.694, NarTop10Accuracy=0.5859, over 5810.10 frames. ], batch size: 14, lr: 1.61e-02 +2024-08-06 15:23:31,914 INFO [trainer.py:765] (7/8) Epoch 5, batch 1000, train_loss[loss=3.579, NarTop10Accuracy=0.6171, over 6246.00 frames. ], tot_loss[loss=3.685, NarTop10Accuracy=0.5878, over 5911.18 frames. ], batch size: 13, lr: 1.60e-02 +2024-08-06 15:24:09,571 INFO [trainer.py:765] (7/8) Epoch 5, batch 1100, train_loss[loss=3.513, NarTop10Accuracy=0.6277, over 6876.00 frames. ], tot_loss[loss=3.677, NarTop10Accuracy=0.5894, over 5946.48 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 15:24:44,528 INFO [trainer.py:765] (7/8) Epoch 5, batch 1200, train_loss[loss=3.497, NarTop10Accuracy=0.6227, over 7305.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.59, over 5950.72 frames. ], batch size: 31, lr: 1.59e-02 +2024-08-06 15:25:19,379 INFO [trainer.py:765] (7/8) Epoch 5, batch 1300, train_loss[loss=3.813, NarTop10Accuracy=0.556, over 4305.00 frames. ], tot_loss[loss=3.658, NarTop10Accuracy=0.5931, over 6004.84 frames. ], batch size: 5, lr: 1.59e-02 +2024-08-06 15:25:51,694 INFO [trainer.py:765] (7/8) Epoch 5, batch 1400, train_loss[loss=3.721, NarTop10Accuracy=0.5731, over 6033.00 frames. ], tot_loss[loss=3.667, NarTop10Accuracy=0.5913, over 6019.79 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 15:26:26,195 INFO [trainer.py:765] (7/8) Epoch 5, batch 1500, train_loss[loss=3.712, NarTop10Accuracy=0.5908, over 6423.00 frames. ], tot_loss[loss=3.663, NarTop10Accuracy=0.5922, over 5940.01 frames. ], batch size: 50, lr: 1.58e-02 +2024-08-06 15:26:54,130 INFO [trainer.py:765] (7/8) Epoch 5, batch 1600, train_loss[loss=3.525, NarTop10Accuracy=0.6237, over 7053.00 frames. ], tot_loss[loss=3.676, NarTop10Accuracy=0.59, over 5924.97 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 15:27:19,603 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 15:27:27,821 INFO [trainer.py:811] (7/8) Epoch 5, validation: loss=3.552, NarTop10Accuracy=0.6147, over 1905321.00 frames. +2024-08-06 15:27:27,822 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27149MB +2024-08-06 15:27:28,341 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 1.756e+02 1.962e+02 2.205e+02 5.880e+02, threshold=3.924e+02, percent-clipped=0.8 +2024-08-06 15:27:29,131 INFO [trainer.py:765] (7/8) Epoch 5, batch 1700, train_loss[loss=3.711, NarTop10Accuracy=0.5779, over 6516.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5913, over 5914.84 frames. ], batch size: 14, lr: 1.56e-02 +2024-08-06 15:27:55,653 INFO [trainer.py:765] (7/8) Epoch 5, batch 1800, train_loss[loss=3.85, NarTop10Accuracy=0.5517, over 6945.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.5919, over 5985.80 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 15:28:22,172 INFO [trainer.py:765] (7/8) Epoch 5, batch 1900, train_loss[loss=3.861, NarTop10Accuracy=0.5582, over 6030.00 frames. ], tot_loss[loss=3.675, NarTop10Accuracy=0.5901, over 6035.88 frames. ], batch size: 53, lr: 1.55e-02 +2024-08-06 15:28:47,893 INFO [trainer.py:765] (7/8) Epoch 5, batch 2000, train_loss[loss=3.679, NarTop10Accuracy=0.593, over 6087.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5911, over 6020.19 frames. ], batch size: 50, lr: 1.55e-02 +2024-08-06 15:29:13,770 INFO [trainer.py:765] (7/8) Epoch 5, batch 2100, train_loss[loss=3.382, NarTop10Accuracy=0.6575, over 4839.00 frames. ], tot_loss[loss=3.688, NarTop10Accuracy=0.5874, over 6001.31 frames. ], batch size: 5, lr: 1.54e-02 +2024-08-06 15:29:39,177 INFO [trainer.py:765] (7/8) Epoch 5, batch 2200, train_loss[loss=4.039, NarTop10Accuracy=0.5124, over 7101.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5923, over 6013.35 frames. ], batch size: 31, lr: 1.54e-02 +2024-08-06 15:30:04,430 INFO [trainer.py:765] (7/8) Epoch 5, batch 2300, train_loss[loss=3.526, NarTop10Accuracy=0.6265, over 5727.00 frames. ], tot_loss[loss=3.675, NarTop10Accuracy=0.5902, over 6032.12 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 15:30:28,862 INFO [trainer.py:765] (7/8) Epoch 5, batch 2400, train_loss[loss=3.42, NarTop10Accuracy=0.647, over 5184.00 frames. ], tot_loss[loss=3.646, NarTop10Accuracy=0.5955, over 5774.66 frames. ], batch size: 7, lr: 1.53e-02 +2024-08-06 15:30:52,503 INFO [trainer.py:765] (7/8) Epoch 5, batch 2500, train_loss[loss=3.31, NarTop10Accuracy=0.665, over 5127.00 frames. ], tot_loss[loss=3.604, NarTop10Accuracy=0.6039, over 5477.97 frames. ], batch size: 7, lr: 1.52e-02 +2024-08-06 15:31:12,586 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 15:32:14,415 INFO [trainer.py:765] (7/8) Epoch 6, batch 100, train_loss[loss=3.568, NarTop10Accuracy=0.6134, over 7119.00 frames. ], tot_loss[loss=3.627, NarTop10Accuracy=0.6001, over 2372.64 frames. ], batch size: 31, lr: 1.42e-02 +2024-08-06 15:32:46,016 INFO [trainer.py:765] (7/8) Epoch 6, batch 200, train_loss[loss=4.041, NarTop10Accuracy=0.5128, over 6678.00 frames. ], tot_loss[loss=3.61, NarTop10Accuracy=0.6025, over 3871.57 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 15:33:21,243 INFO [trainer.py:765] (7/8) Epoch 6, batch 300, train_loss[loss=3.499, NarTop10Accuracy=0.6288, over 6948.00 frames. ], tot_loss[loss=3.605, NarTop10Accuracy=0.6037, over 4664.07 frames. ], batch size: 22, lr: 1.41e-02 +2024-08-06 15:33:56,035 INFO [trainer.py:765] (7/8) Epoch 6, batch 400, train_loss[loss=3.514, NarTop10Accuracy=0.627, over 5052.00 frames. ], tot_loss[loss=3.589, NarTop10Accuracy=0.6068, over 5111.94 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 15:34:26,759 INFO [trainer.py:765] (7/8) Epoch 6, batch 500, train_loss[loss=3.285, NarTop10Accuracy=0.6645, over 6138.00 frames. ], tot_loss[loss=3.58, NarTop10Accuracy=0.6092, over 5386.00 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 15:35:01,458 INFO [trainer.py:765] (7/8) Epoch 6, batch 600, train_loss[loss=3.228, NarTop10Accuracy=0.6753, over 5658.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6088, over 5656.36 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 15:35:32,734 INFO [trainer.py:765] (7/8) Epoch 6, batch 700, train_loss[loss=3.307, NarTop10Accuracy=0.6699, over 4305.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6089, over 5726.69 frames. ], batch size: 5, lr: 1.39e-02 +2024-08-06 15:36:06,844 INFO [trainer.py:765] (7/8) Epoch 6, batch 800, train_loss[loss=3.837, NarTop10Accuracy=0.5524, over 5184.00 frames. ], tot_loss[loss=3.588, NarTop10Accuracy=0.6076, over 5772.62 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 15:36:40,385 INFO [trainer.py:765] (7/8) Epoch 6, batch 900, train_loss[loss=3.964, NarTop10Accuracy=0.5266, over 6597.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.6095, over 5809.53 frames. ], batch size: 14, lr: 1.38e-02 +2024-08-06 15:37:15,272 INFO [trainer.py:765] (7/8) Epoch 6, batch 1000, train_loss[loss=3.386, NarTop10Accuracy=0.6538, over 6702.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.6066, over 5913.69 frames. ], batch size: 14, lr: 1.38e-02 +2024-08-06 15:37:50,508 INFO [trainer.py:765] (7/8) Epoch 6, batch 1100, train_loss[loss=3.328, NarTop10Accuracy=0.6662, over 6852.00 frames. ], tot_loss[loss=3.59, NarTop10Accuracy=0.6074, over 5941.16 frames. ], batch size: 17, lr: 1.38e-02 +2024-08-06 15:37:55,829 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 15:38:04,436 INFO [trainer.py:811] (7/8) Epoch 6, validation: loss=3.421, NarTop10Accuracy=0.6418, over 1905321.00 frames. +2024-08-06 15:38:04,437 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27149MB +2024-08-06 15:38:04,965 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 1.809e+02 1.991e+02 2.234e+02 5.215e+02, threshold=3.983e+02, percent-clipped=0.5 +2024-08-06 15:38:36,168 INFO [trainer.py:765] (7/8) Epoch 6, batch 1200, train_loss[loss=3.387, NarTop10Accuracy=0.6469, over 7329.00 frames. ], tot_loss[loss=3.579, NarTop10Accuracy=0.6095, over 5935.24 frames. ], batch size: 31, lr: 1.37e-02 +2024-08-06 15:39:08,242 INFO [trainer.py:765] (7/8) Epoch 6, batch 1300, train_loss[loss=3.373, NarTop10Accuracy=0.6515, over 5031.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6102, over 6000.52 frames. ], batch size: 6, lr: 1.37e-02 +2024-08-06 15:39:44,069 INFO [trainer.py:765] (7/8) Epoch 6, batch 1400, train_loss[loss=3.309, NarTop10Accuracy=0.6549, over 6102.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6108, over 6034.41 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 15:40:15,383 INFO [trainer.py:765] (7/8) Epoch 6, batch 1500, train_loss[loss=3.989, NarTop10Accuracy=0.5371, over 6018.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6114, over 5967.98 frames. ], batch size: 50, lr: 1.36e-02 +2024-08-06 15:40:43,105 INFO [trainer.py:765] (7/8) Epoch 6, batch 1600, train_loss[loss=3.549, NarTop10Accuracy=0.6201, over 7179.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.6109, over 5954.16 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 15:41:09,788 INFO [trainer.py:765] (7/8) Epoch 6, batch 1700, train_loss[loss=3.55, NarTop10Accuracy=0.6164, over 6708.00 frames. ], tot_loss[loss=3.555, NarTop10Accuracy=0.6142, over 5940.03 frames. ], batch size: 14, lr: 1.35e-02 +2024-08-06 15:41:36,317 INFO [trainer.py:765] (7/8) Epoch 6, batch 1800, train_loss[loss=3.513, NarTop10Accuracy=0.6264, over 7212.00 frames. ], tot_loss[loss=3.561, NarTop10Accuracy=0.6129, over 5984.40 frames. ], batch size: 23, lr: 1.35e-02 +2024-08-06 15:42:02,720 INFO [trainer.py:765] (7/8) Epoch 6, batch 1900, train_loss[loss=3.905, NarTop10Accuracy=0.5398, over 6222.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.609, over 6019.04 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 15:42:28,319 INFO [trainer.py:765] (7/8) Epoch 6, batch 2000, train_loss[loss=3.454, NarTop10Accuracy=0.6348, over 5904.00 frames. ], tot_loss[loss=3.574, NarTop10Accuracy=0.6103, over 6001.97 frames. ], batch size: 51, lr: 1.34e-02 +2024-08-06 15:42:53,668 INFO [trainer.py:765] (7/8) Epoch 6, batch 2100, train_loss[loss=3.06, NarTop10Accuracy=0.7183, over 3843.00 frames. ], tot_loss[loss=3.561, NarTop10Accuracy=0.6125, over 5966.34 frames. ], batch size: 4, lr: 1.33e-02 +2024-08-06 15:43:18,977 INFO [trainer.py:765] (7/8) Epoch 6, batch 2200, train_loss[loss=3.804, NarTop10Accuracy=0.5603, over 7215.00 frames. ], tot_loss[loss=3.563, NarTop10Accuracy=0.6123, over 6009.95 frames. ], batch size: 31, lr: 1.33e-02 +2024-08-06 15:43:44,106 INFO [trainer.py:765] (7/8) Epoch 6, batch 2300, train_loss[loss=3.431, NarTop10Accuracy=0.6364, over 5754.00 frames. ], tot_loss[loss=3.56, NarTop10Accuracy=0.6132, over 6028.19 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 15:44:08,620 INFO [trainer.py:765] (7/8) Epoch 6, batch 2400, train_loss[loss=3.24, NarTop10Accuracy=0.6702, over 5091.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6179, over 5776.22 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:32,132 INFO [trainer.py:765] (7/8) Epoch 6, batch 2500, train_loss[loss=3.33, NarTop10Accuracy=0.6548, over 5292.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6208, over 5482.82 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 15:44:51,850 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 15:45:58,043 INFO [trainer.py:765] (7/8) Epoch 7, batch 100, train_loss[loss=3.321, NarTop10Accuracy=0.66, over 7185.00 frames. ], tot_loss[loss=3.532, NarTop10Accuracy=0.6184, over 2353.99 frames. ], batch size: 32, lr: 1.24e-02 +2024-08-06 15:46:33,614 INFO [trainer.py:765] (7/8) Epoch 7, batch 200, train_loss[loss=3.422, NarTop10Accuracy=0.6411, over 6807.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6212, over 3841.06 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 15:47:03,246 INFO [trainer.py:765] (7/8) Epoch 7, batch 300, train_loss[loss=3.86, NarTop10Accuracy=0.5528, over 7176.00 frames. ], tot_loss[loss=3.54, NarTop10Accuracy=0.6177, over 4638.91 frames. ], batch size: 22, lr: 1.23e-02 +2024-08-06 15:47:34,495 INFO [trainer.py:765] (7/8) Epoch 7, batch 400, train_loss[loss=3.502, NarTop10Accuracy=0.6179, over 5091.00 frames. ], tot_loss[loss=3.528, NarTop10Accuracy=0.6198, over 5095.27 frames. ], batch size: 7, lr: 1.23e-02 +2024-08-06 15:48:13,730 INFO [trainer.py:765] (7/8) Epoch 7, batch 500, train_loss[loss=3.663, NarTop10Accuracy=0.591, over 6018.00 frames. ], tot_loss[loss=3.523, NarTop10Accuracy=0.6208, over 5362.79 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 15:48:26,369 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 15:48:34,533 INFO [trainer.py:811] (7/8) Epoch 7, validation: loss=3.326, NarTop10Accuracy=0.6612, over 1905321.00 frames. +2024-08-06 15:48:34,534 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27149MB +2024-08-06 15:48:35,079 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.860e+02 2.018e+02 2.241e+02 5.111e+02, threshold=4.035e+02, percent-clipped=0.3 +2024-08-06 15:48:52,720 INFO [trainer.py:765] (7/8) Epoch 7, batch 600, train_loss[loss=3.251, NarTop10Accuracy=0.6802, over 5721.00 frames. ], tot_loss[loss=3.526, NarTop10Accuracy=0.6202, over 5633.23 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 15:49:24,913 INFO [trainer.py:765] (7/8) Epoch 7, batch 700, train_loss[loss=3.846, NarTop10Accuracy=0.5487, over 5019.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6212, over 5688.11 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 15:50:04,381 INFO [trainer.py:765] (7/8) Epoch 7, batch 800, train_loss[loss=3.151, NarTop10Accuracy=0.6989, over 5070.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6249, over 5760.99 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 15:50:34,549 INFO [trainer.py:765] (7/8) Epoch 7, batch 900, train_loss[loss=3.324, NarTop10Accuracy=0.6551, over 6717.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6267, over 5788.01 frames. ], batch size: 14, lr: 1.21e-02 +2024-08-06 15:51:07,156 INFO [trainer.py:765] (7/8) Epoch 7, batch 1000, train_loss[loss=3.274, NarTop10Accuracy=0.6777, over 6618.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6274, over 5888.15 frames. ], batch size: 14, lr: 1.20e-02 +2024-08-06 15:51:51,758 INFO [trainer.py:765] (7/8) Epoch 7, batch 1100, train_loss[loss=3.288, NarTop10Accuracy=0.6714, over 6837.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6265, over 5937.00 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 15:52:22,700 INFO [trainer.py:765] (7/8) Epoch 7, batch 1200, train_loss[loss=3.315, NarTop10Accuracy=0.6598, over 7314.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6276, over 5923.06 frames. ], batch size: 31, lr: 1.20e-02 +2024-08-06 15:52:52,008 INFO [trainer.py:765] (7/8) Epoch 7, batch 1300, train_loss[loss=3.499, NarTop10Accuracy=0.6276, over 4890.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6273, over 5993.87 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 15:53:33,842 INFO [trainer.py:765] (7/8) Epoch 7, batch 1400, train_loss[loss=3.302, NarTop10Accuracy=0.6708, over 6087.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6244, over 6013.93 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 15:54:04,600 INFO [trainer.py:765] (7/8) Epoch 7, batch 1500, train_loss[loss=3.789, NarTop10Accuracy=0.5631, over 6171.00 frames. ], tot_loss[loss=3.484, NarTop10Accuracy=0.6286, over 5951.81 frames. ], batch size: 50, lr: 1.19e-02 +2024-08-06 15:54:32,385 INFO [trainer.py:765] (7/8) Epoch 7, batch 1600, train_loss[loss=3.594, NarTop10Accuracy=0.6044, over 7032.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6282, over 5920.57 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 15:54:59,055 INFO [trainer.py:765] (7/8) Epoch 7, batch 1700, train_loss[loss=3.656, NarTop10Accuracy=0.5901, over 6252.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6242, over 5920.10 frames. ], batch size: 13, lr: 1.18e-02 +2024-08-06 15:55:25,513 INFO [trainer.py:765] (7/8) Epoch 7, batch 1800, train_loss[loss=4.025, NarTop10Accuracy=0.5231, over 7176.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6257, over 5983.61 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 15:55:52,083 INFO [trainer.py:765] (7/8) Epoch 7, batch 1900, train_loss[loss=3.351, NarTop10Accuracy=0.6528, over 6186.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.6229, over 6027.70 frames. ], batch size: 54, lr: 1.18e-02 +2024-08-06 15:56:17,592 INFO [trainer.py:765] (7/8) Epoch 7, batch 2000, train_loss[loss=3.789, NarTop10Accuracy=0.5633, over 6033.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.624, over 5990.23 frames. ], batch size: 50, lr: 1.17e-02 +2024-08-06 15:56:42,856 INFO [trainer.py:765] (7/8) Epoch 7, batch 2100, train_loss[loss=3.704, NarTop10Accuracy=0.5812, over 4857.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6278, over 5974.93 frames. ], batch size: 5, lr: 1.17e-02 +2024-08-06 15:57:08,079 INFO [trainer.py:765] (7/8) Epoch 7, batch 2200, train_loss[loss=3.494, NarTop10Accuracy=0.6249, over 7134.00 frames. ], tot_loss[loss=3.505, NarTop10Accuracy=0.6239, over 6013.48 frames. ], batch size: 31, lr: 1.17e-02 +2024-08-06 15:57:33,178 INFO [trainer.py:765] (7/8) Epoch 7, batch 2300, train_loss[loss=3.22, NarTop10Accuracy=0.6858, over 5652.00 frames. ], tot_loss[loss=3.51, NarTop10Accuracy=0.6237, over 6021.57 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 15:57:57,619 INFO [trainer.py:765] (7/8) Epoch 7, batch 2400, train_loss[loss=3.213, NarTop10Accuracy=0.6848, over 5154.00 frames. ], tot_loss[loss=3.496, NarTop10Accuracy=0.6262, over 5785.10 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 15:58:21,088 INFO [trainer.py:765] (7/8) Epoch 7, batch 2500, train_loss[loss=3.588, NarTop10Accuracy=0.5998, over 5133.00 frames. ], tot_loss[loss=3.466, NarTop10Accuracy=0.6317, over 5492.87 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 15:58:31,565 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 15:58:39,769 INFO [trainer.py:811] (7/8) Epoch 7, validation: loss=3.381, NarTop10Accuracy=0.6488, over 1905321.00 frames. +2024-08-06 15:58:39,770 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27149MB +2024-08-06 15:58:40,221 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.831e+02 1.996e+02 2.207e+02 5.229e+02, threshold=3.992e+02, percent-clipped=0.2 +2024-08-06 15:58:48,984 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 15:59:52,876 INFO [trainer.py:765] (7/8) Epoch 8, batch 100, train_loss[loss=3.686, NarTop10Accuracy=0.5803, over 7323.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6329, over 2369.09 frames. ], batch size: 31, lr: 1.09e-02 +2024-08-06 16:00:27,881 INFO [trainer.py:765] (7/8) Epoch 8, batch 200, train_loss[loss=3.377, NarTop10Accuracy=0.6479, over 6741.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6284, over 3881.24 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 16:00:58,563 INFO [trainer.py:765] (7/8) Epoch 8, batch 300, train_loss[loss=3.326, NarTop10Accuracy=0.6521, over 7080.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6318, over 4679.84 frames. ], batch size: 22, lr: 1.08e-02 +2024-08-06 16:01:29,760 INFO [trainer.py:765] (7/8) Epoch 8, batch 400, train_loss[loss=3.73, NarTop10Accuracy=0.5732, over 5235.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.631, over 5100.91 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 16:02:04,066 INFO [trainer.py:765] (7/8) Epoch 8, batch 500, train_loss[loss=3.78, NarTop10Accuracy=0.5647, over 6093.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6356, over 5393.60 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 16:02:41,836 INFO [trainer.py:765] (7/8) Epoch 8, batch 600, train_loss[loss=3.266, NarTop10Accuracy=0.6759, over 5739.00 frames. ], tot_loss[loss=3.467, NarTop10Accuracy=0.6319, over 5660.96 frames. ], batch size: 9, lr: 1.08e-02 +2024-08-06 16:03:11,500 INFO [trainer.py:765] (7/8) Epoch 8, batch 700, train_loss[loss=3.927, NarTop10Accuracy=0.5281, over 4344.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6304, over 5713.02 frames. ], batch size: 5, lr: 1.07e-02 +2024-08-06 16:03:50,084 INFO [trainer.py:765] (7/8) Epoch 8, batch 800, train_loss[loss=3.432, NarTop10Accuracy=0.6358, over 4272.00 frames. ], tot_loss[loss=3.467, NarTop10Accuracy=0.632, over 5768.11 frames. ], batch size: 5, lr: 1.07e-02 +2024-08-06 16:04:27,588 INFO [trainer.py:765] (7/8) Epoch 8, batch 900, train_loss[loss=3.261, NarTop10Accuracy=0.6804, over 6609.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.636, over 5795.25 frames. ], batch size: 14, lr: 1.07e-02 +2024-08-06 16:04:57,466 INFO [trainer.py:765] (7/8) Epoch 8, batch 1000, train_loss[loss=3.65, NarTop10Accuracy=0.5951, over 6159.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6375, over 5900.88 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 16:05:37,294 INFO [trainer.py:765] (7/8) Epoch 8, batch 1100, train_loss[loss=3.718, NarTop10Accuracy=0.5864, over 6846.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6393, over 5927.97 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 16:06:15,859 INFO [trainer.py:765] (7/8) Epoch 8, batch 1200, train_loss[loss=3.401, NarTop10Accuracy=0.6378, over 7182.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6366, over 5926.39 frames. ], batch size: 32, lr: 1.06e-02 +2024-08-06 16:06:45,187 INFO [trainer.py:765] (7/8) Epoch 8, batch 1300, train_loss[loss=3.212, NarTop10Accuracy=0.6889, over 4365.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.638, over 5995.79 frames. ], batch size: 5, lr: 1.06e-02 +2024-08-06 16:07:24,235 INFO [trainer.py:765] (7/8) Epoch 8, batch 1400, train_loss[loss=3.489, NarTop10Accuracy=0.6348, over 6108.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6381, over 6023.02 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 16:07:52,169 INFO [trainer.py:765] (7/8) Epoch 8, batch 1500, train_loss[loss=3.419, NarTop10Accuracy=0.6395, over 6042.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6407, over 5957.26 frames. ], batch size: 50, lr: 1.05e-02 +2024-08-06 16:08:19,948 INFO [trainer.py:765] (7/8) Epoch 8, batch 1600, train_loss[loss=3.265, NarTop10Accuracy=0.6752, over 7191.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6406, over 5943.25 frames. ], batch size: 22, lr: 1.05e-02 +2024-08-06 16:08:46,617 INFO [trainer.py:765] (7/8) Epoch 8, batch 1700, train_loss[loss=3.397, NarTop10Accuracy=0.646, over 6687.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6407, over 5934.78 frames. ], batch size: 14, lr: 1.05e-02 +2024-08-06 16:09:13,106 INFO [trainer.py:765] (7/8) Epoch 8, batch 1800, train_loss[loss=3.262, NarTop10Accuracy=0.6711, over 7164.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6412, over 5994.72 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 16:09:39,636 INFO [trainer.py:765] (7/8) Epoch 8, batch 1900, train_loss[loss=3.828, NarTop10Accuracy=0.5578, over 6165.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6439, over 6023.91 frames. ], batch size: 52, lr: 1.04e-02 +2024-08-06 16:09:56,940 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 16:10:04,970 INFO [trainer.py:811] (7/8) Epoch 8, validation: loss=3.282, NarTop10Accuracy=0.6699, over 1905321.00 frames. +2024-08-06 16:10:04,970 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27199MB +2024-08-06 16:10:05,470 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.814e+02 1.981e+02 2.158e+02 5.862e+02, threshold=3.962e+02, percent-clipped=0.1 +2024-08-06 16:10:13,204 INFO [trainer.py:765] (7/8) Epoch 8, batch 2000, train_loss[loss=3.973, NarTop10Accuracy=0.5303, over 6417.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6411, over 6008.34 frames. ], batch size: 50, lr: 1.04e-02 +2024-08-06 16:10:38,514 INFO [trainer.py:765] (7/8) Epoch 8, batch 2100, train_loss[loss=3.22, NarTop10Accuracy=0.6813, over 3981.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6435, over 5992.54 frames. ], batch size: 4, lr: 1.04e-02 +2024-08-06 16:11:03,747 INFO [trainer.py:765] (7/8) Epoch 8, batch 2200, train_loss[loss=3.677, NarTop10Accuracy=0.5856, over 7245.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6407, over 6029.51 frames. ], batch size: 31, lr: 1.04e-02 +2024-08-06 16:11:28,904 INFO [trainer.py:765] (7/8) Epoch 8, batch 2300, train_loss[loss=3.646, NarTop10Accuracy=0.5847, over 5763.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6364, over 6046.38 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 16:11:53,093 INFO [trainer.py:765] (7/8) Epoch 8, batch 2400, train_loss[loss=3.49, NarTop10Accuracy=0.6324, over 5685.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6408, over 5781.81 frames. ], batch size: 8, lr: 1.03e-02 +2024-08-06 16:12:16,444 INFO [trainer.py:765] (7/8) Epoch 8, batch 2500, train_loss[loss=3.344, NarTop10Accuracy=0.6568, over 5835.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6414, over 5485.87 frames. ], batch size: 8, lr: 1.03e-02 +2024-08-06 16:12:36,280 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 16:13:37,515 INFO [trainer.py:765] (7/8) Epoch 9, batch 100, train_loss[loss=3.239, NarTop10Accuracy=0.6811, over 7458.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.651, over 2376.84 frames. ], batch size: 31, lr: 9.72e-03 +2024-08-06 16:14:14,441 INFO [trainer.py:765] (7/8) Epoch 9, batch 200, train_loss[loss=3.636, NarTop10Accuracy=0.5947, over 6837.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6531, over 3861.61 frames. ], batch size: 17, lr: 9.70e-03 +2024-08-06 16:14:44,508 INFO [trainer.py:765] (7/8) Epoch 9, batch 300, train_loss[loss=3.345, NarTop10Accuracy=0.6562, over 7377.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.652, over 4655.25 frames. ], batch size: 23, lr: 9.68e-03 +2024-08-06 16:15:14,915 INFO [trainer.py:765] (7/8) Epoch 9, batch 400, train_loss[loss=3.263, NarTop10Accuracy=0.6774, over 5031.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6538, over 5121.21 frames. ], batch size: 7, lr: 9.65e-03 +2024-08-06 16:15:50,336 INFO [trainer.py:765] (7/8) Epoch 9, batch 500, train_loss[loss=3.282, NarTop10Accuracy=0.6709, over 6111.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6559, over 5398.52 frames. ], batch size: 11, lr: 9.63e-03 +2024-08-06 16:16:23,973 INFO [trainer.py:765] (7/8) Epoch 9, batch 600, train_loss[loss=3.525, NarTop10Accuracy=0.6237, over 5823.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6586, over 5653.29 frames. ], batch size: 9, lr: 9.61e-03 +2024-08-06 16:16:57,145 INFO [trainer.py:765] (7/8) Epoch 9, batch 700, train_loss[loss=3.192, NarTop10Accuracy=0.696, over 5064.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6549, over 5737.59 frames. ], batch size: 6, lr: 9.59e-03 +2024-08-06 16:17:32,052 INFO [trainer.py:765] (7/8) Epoch 9, batch 800, train_loss[loss=3.182, NarTop10Accuracy=0.6908, over 5043.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6479, over 5777.11 frames. ], batch size: 6, lr: 9.57e-03 +2024-08-06 16:18:07,816 INFO [trainer.py:765] (7/8) Epoch 9, batch 900, train_loss[loss=3.245, NarTop10Accuracy=0.6838, over 6180.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6497, over 5793.31 frames. ], batch size: 13, lr: 9.55e-03 +2024-08-06 16:18:39,345 INFO [trainer.py:765] (7/8) Epoch 9, batch 1000, train_loss[loss=3.157, NarTop10Accuracy=0.6921, over 6189.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6472, over 5900.64 frames. ], batch size: 13, lr: 9.53e-03 +2024-08-06 16:19:15,383 INFO [trainer.py:765] (7/8) Epoch 9, batch 1100, train_loss[loss=3.523, NarTop10Accuracy=0.6253, over 6774.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6457, over 5934.99 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 16:19:53,878 INFO [trainer.py:765] (7/8) Epoch 9, batch 1200, train_loss[loss=3.721, NarTop10Accuracy=0.5681, over 7305.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6447, over 5936.73 frames. ], batch size: 31, lr: 9.48e-03 +2024-08-06 16:20:24,907 INFO [trainer.py:765] (7/8) Epoch 9, batch 1300, train_loss[loss=3.217, NarTop10Accuracy=0.6887, over 5025.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6467, over 5995.91 frames. ], batch size: 6, lr: 9.46e-03 +2024-08-06 16:20:56,580 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 16:21:04,483 INFO [trainer.py:811] (7/8) Epoch 9, validation: loss=3.266, NarTop10Accuracy=0.6725, over 1905321.00 frames. +2024-08-06 16:21:04,484 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27199MB +2024-08-06 16:21:05,035 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 1.808e+02 1.967e+02 2.142e+02 6.126e+02, threshold=3.935e+02, percent-clipped=0.5 +2024-08-06 16:21:06,691 INFO [trainer.py:765] (7/8) Epoch 9, batch 1400, train_loss[loss=3.586, NarTop10Accuracy=0.6057, over 6054.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6455, over 6019.07 frames. ], batch size: 11, lr: 9.44e-03 +2024-08-06 16:21:38,895 INFO [trainer.py:765] (7/8) Epoch 9, batch 1500, train_loss[loss=3.465, NarTop10Accuracy=0.6355, over 6168.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6507, over 5952.91 frames. ], batch size: 51, lr: 9.42e-03 +2024-08-06 16:22:06,721 INFO [trainer.py:765] (7/8) Epoch 9, batch 1600, train_loss[loss=3.377, NarTop10Accuracy=0.6561, over 7059.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6518, over 5926.83 frames. ], batch size: 22, lr: 9.40e-03 +2024-08-06 16:22:33,470 INFO [trainer.py:765] (7/8) Epoch 9, batch 1700, train_loss[loss=3.361, NarTop10Accuracy=0.6427, over 6234.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6484, over 5912.39 frames. ], batch size: 13, lr: 9.38e-03 +2024-08-06 16:23:00,064 INFO [trainer.py:765] (7/8) Epoch 9, batch 1800, train_loss[loss=3.156, NarTop10Accuracy=0.6969, over 7188.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6508, over 5983.20 frames. ], batch size: 22, lr: 9.36e-03 +2024-08-06 16:23:26,783 INFO [trainer.py:765] (7/8) Epoch 9, batch 1900, train_loss[loss=3.366, NarTop10Accuracy=0.6545, over 6051.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6479, over 6035.25 frames. ], batch size: 51, lr: 9.34e-03 +2024-08-06 16:23:52,486 INFO [trainer.py:765] (7/8) Epoch 9, batch 2000, train_loss[loss=4.064, NarTop10Accuracy=0.5093, over 5700.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6498, over 5993.98 frames. ], batch size: 50, lr: 9.32e-03 +2024-08-06 16:24:17,963 INFO [trainer.py:765] (7/8) Epoch 9, batch 2100, train_loss[loss=3.097, NarTop10Accuracy=0.7099, over 4731.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6497, over 5983.66 frames. ], batch size: 5, lr: 9.30e-03 +2024-08-06 16:24:43,421 INFO [trainer.py:765] (7/8) Epoch 9, batch 2200, train_loss[loss=3.636, NarTop10Accuracy=0.5877, over 7536.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6487, over 6014.92 frames. ], batch size: 31, lr: 9.28e-03 +2024-08-06 16:25:08,721 INFO [trainer.py:765] (7/8) Epoch 9, batch 2300, train_loss[loss=3.168, NarTop10Accuracy=0.6901, over 5742.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.645, over 6030.66 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 16:25:33,164 INFO [trainer.py:765] (7/8) Epoch 9, batch 2400, train_loss[loss=3.129, NarTop10Accuracy=0.6907, over 5238.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6457, over 5781.75 frames. ], batch size: 7, lr: 9.25e-03 +2024-08-06 16:25:56,768 INFO [trainer.py:765] (7/8) Epoch 9, batch 2500, train_loss[loss=3.19, NarTop10Accuracy=0.6846, over 5136.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6535, over 5484.32 frames. ], batch size: 7, lr: 9.23e-03 +2024-08-06 16:26:16,487 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 16:27:19,584 INFO [trainer.py:765] (7/8) Epoch 10, batch 100, train_loss[loss=3.211, NarTop10Accuracy=0.6819, over 7380.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6518, over 2372.49 frames. ], batch size: 31, lr: 8.76e-03 +2024-08-06 16:27:52,628 INFO [trainer.py:765] (7/8) Epoch 10, batch 200, train_loss[loss=3.208, NarTop10Accuracy=0.6822, over 6816.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6558, over 3859.47 frames. ], batch size: 17, lr: 8.74e-03 +2024-08-06 16:28:23,057 INFO [trainer.py:765] (7/8) Epoch 10, batch 300, train_loss[loss=3.135, NarTop10Accuracy=0.7037, over 7206.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6553, over 4670.29 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 16:28:59,200 INFO [trainer.py:765] (7/8) Epoch 10, batch 400, train_loss[loss=3.361, NarTop10Accuracy=0.6503, over 5154.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6566, over 5124.41 frames. ], batch size: 7, lr: 8.71e-03 +2024-08-06 16:29:29,218 INFO [trainer.py:765] (7/8) Epoch 10, batch 500, train_loss[loss=3.073, NarTop10Accuracy=0.7111, over 6099.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6579, over 5387.69 frames. ], batch size: 11, lr: 8.69e-03 +2024-08-06 16:30:02,765 INFO [trainer.py:765] (7/8) Epoch 10, batch 600, train_loss[loss=3.745, NarTop10Accuracy=0.579, over 6177.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6553, over 5657.94 frames. ], batch size: 10, lr: 8.67e-03 +2024-08-06 16:30:34,265 INFO [trainer.py:765] (7/8) Epoch 10, batch 700, train_loss[loss=3.29, NarTop10Accuracy=0.668, over 5070.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6548, over 5733.93 frames. ], batch size: 6, lr: 8.65e-03 +2024-08-06 16:31:09,843 INFO [trainer.py:765] (7/8) Epoch 10, batch 800, train_loss[loss=3.468, NarTop10Accuracy=0.6264, over 4947.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6547, over 5781.97 frames. ], batch size: 6, lr: 8.64e-03 +2024-08-06 16:31:16,259 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 16:31:24,565 INFO [trainer.py:811] (7/8) Epoch 10, validation: loss=3.184, NarTop10Accuracy=0.6898, over 1905321.00 frames. +2024-08-06 16:31:24,566 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27199MB +2024-08-06 16:31:25,154 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 1.851e+02 2.012e+02 2.196e+02 4.599e+02, threshold=4.024e+02, percent-clipped=0.1 +2024-08-06 16:31:50,345 INFO [trainer.py:765] (7/8) Epoch 10, batch 900, train_loss[loss=3.123, NarTop10Accuracy=0.7016, over 6675.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.66, over 5788.55 frames. ], batch size: 14, lr: 8.62e-03 +2024-08-06 16:32:28,589 INFO [trainer.py:765] (7/8) Epoch 10, batch 1000, train_loss[loss=3.073, NarTop10Accuracy=0.7185, over 6084.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6593, over 5887.93 frames. ], batch size: 13, lr: 8.60e-03 +2024-08-06 16:33:06,376 INFO [trainer.py:765] (7/8) Epoch 10, batch 1100, train_loss[loss=3.249, NarTop10Accuracy=0.6779, over 6747.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6571, over 5934.78 frames. ], batch size: 17, lr: 8.59e-03 +2024-08-06 16:33:40,960 INFO [trainer.py:765] (7/8) Epoch 10, batch 1200, train_loss[loss=3.251, NarTop10Accuracy=0.6791, over 7380.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6585, over 5922.22 frames. ], batch size: 32, lr: 8.57e-03 +2024-08-06 16:34:16,169 INFO [trainer.py:765] (7/8) Epoch 10, batch 1300, train_loss[loss=3.226, NarTop10Accuracy=0.6672, over 5073.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6583, over 5988.57 frames. ], batch size: 6, lr: 8.55e-03 +2024-08-06 16:34:51,200 INFO [trainer.py:765] (7/8) Epoch 10, batch 1400, train_loss[loss=3.277, NarTop10Accuracy=0.6705, over 6009.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6529, over 6032.47 frames. ], batch size: 11, lr: 8.54e-03 +2024-08-06 16:35:22,159 INFO [trainer.py:765] (7/8) Epoch 10, batch 1500, train_loss[loss=3.533, NarTop10Accuracy=0.6209, over 5982.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.657, over 5981.35 frames. ], batch size: 50, lr: 8.52e-03 +2024-08-06 16:35:50,136 INFO [trainer.py:765] (7/8) Epoch 10, batch 1600, train_loss[loss=3.594, NarTop10Accuracy=0.6061, over 6843.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.659, over 5937.98 frames. ], batch size: 22, lr: 8.50e-03 +2024-08-06 16:36:16,976 INFO [trainer.py:765] (7/8) Epoch 10, batch 1700, train_loss[loss=3.437, NarTop10Accuracy=0.639, over 6669.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6576, over 5918.51 frames. ], batch size: 14, lr: 8.49e-03 +2024-08-06 16:36:43,647 INFO [trainer.py:765] (7/8) Epoch 10, batch 1800, train_loss[loss=3.201, NarTop10Accuracy=0.6853, over 7203.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6591, over 5976.40 frames. ], batch size: 22, lr: 8.47e-03 +2024-08-06 16:37:10,290 INFO [trainer.py:765] (7/8) Epoch 10, batch 1900, train_loss[loss=3.235, NarTop10Accuracy=0.6846, over 5637.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6599, over 6014.91 frames. ], batch size: 50, lr: 8.45e-03 +2024-08-06 16:37:36,089 INFO [trainer.py:765] (7/8) Epoch 10, batch 2000, train_loss[loss=3.284, NarTop10Accuracy=0.6733, over 6285.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6615, over 5992.28 frames. ], batch size: 51, lr: 8.44e-03 +2024-08-06 16:38:01,650 INFO [trainer.py:765] (7/8) Epoch 10, batch 2100, train_loss[loss=3.489, NarTop10Accuracy=0.6248, over 3840.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6577, over 5956.81 frames. ], batch size: 4, lr: 8.42e-03 +2024-08-06 16:38:27,120 INFO [trainer.py:765] (7/8) Epoch 10, batch 2200, train_loss[loss=3.735, NarTop10Accuracy=0.5738, over 7287.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6566, over 6010.40 frames. ], batch size: 32, lr: 8.41e-03 +2024-08-06 16:38:52,447 INFO [trainer.py:765] (7/8) Epoch 10, batch 2300, train_loss[loss=3.167, NarTop10Accuracy=0.6926, over 5685.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6549, over 6012.99 frames. ], batch size: 9, lr: 8.39e-03 +2024-08-06 16:39:17,005 INFO [trainer.py:765] (7/8) Epoch 10, batch 2400, train_loss[loss=3.197, NarTop10Accuracy=0.6885, over 5001.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6608, over 5762.30 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 16:39:40,801 INFO [trainer.py:765] (7/8) Epoch 10, batch 2500, train_loss[loss=3.414, NarTop10Accuracy=0.6369, over 5205.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.666, over 5462.89 frames. ], batch size: 7, lr: 8.36e-03 +2024-08-06 16:40:00,497 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 16:41:06,235 INFO [trainer.py:765] (7/8) Epoch 11, batch 100, train_loss[loss=3.553, NarTop10Accuracy=0.6116, over 7314.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6568, over 2364.75 frames. ], batch size: 32, lr: 7.97e-03 +2024-08-06 16:41:39,021 INFO [trainer.py:765] (7/8) Epoch 11, batch 200, train_loss[loss=3.661, NarTop10Accuracy=0.5858, over 6744.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6604, over 3852.44 frames. ], batch size: 17, lr: 7.95e-03 +2024-08-06 16:41:53,190 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 16:42:01,355 INFO [trainer.py:811] (7/8) Epoch 11, validation: loss=3.116, NarTop10Accuracy=0.7034, over 1905321.00 frames. +2024-08-06 16:42:01,356 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27199MB +2024-08-06 16:42:01,879 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 1.889e+02 2.046e+02 2.249e+02 5.417e+02, threshold=4.093e+02, percent-clipped=0.2 +2024-08-06 16:42:17,976 INFO [trainer.py:765] (7/8) Epoch 11, batch 300, train_loss[loss=3.121, NarTop10Accuracy=0.706, over 7002.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6653, over 4653.89 frames. ], batch size: 22, lr: 7.94e-03 +2024-08-06 16:42:55,155 INFO [trainer.py:765] (7/8) Epoch 11, batch 400, train_loss[loss=3.216, NarTop10Accuracy=0.6812, over 5148.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6679, over 5115.93 frames. ], batch size: 7, lr: 7.92e-03 +2024-08-06 16:43:25,719 INFO [trainer.py:765] (7/8) Epoch 11, batch 500, train_loss[loss=3.151, NarTop10Accuracy=0.6939, over 6183.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6688, over 5383.26 frames. ], batch size: 11, lr: 7.91e-03 +2024-08-06 16:44:02,242 INFO [trainer.py:765] (7/8) Epoch 11, batch 600, train_loss[loss=3.457, NarTop10Accuracy=0.6283, over 5781.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6663, over 5644.32 frames. ], batch size: 9, lr: 7.89e-03 +2024-08-06 16:44:35,716 INFO [trainer.py:765] (7/8) Epoch 11, batch 700, train_loss[loss=3.674, NarTop10Accuracy=0.5859, over 5088.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6674, over 5735.03 frames. ], batch size: 6, lr: 7.88e-03 +2024-08-06 16:45:10,468 INFO [trainer.py:765] (7/8) Epoch 11, batch 800, train_loss[loss=3.085, NarTop10Accuracy=0.716, over 5157.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6638, over 5781.34 frames. ], batch size: 6, lr: 7.86e-03 +2024-08-06 16:45:46,457 INFO [trainer.py:765] (7/8) Epoch 11, batch 900, train_loss[loss=3.662, NarTop10Accuracy=0.5889, over 6234.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6648, over 5799.19 frames. ], batch size: 13, lr: 7.85e-03 +2024-08-06 16:46:20,310 INFO [trainer.py:765] (7/8) Epoch 11, batch 1000, train_loss[loss=3.449, NarTop10Accuracy=0.641, over 6558.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.666, over 5898.44 frames. ], batch size: 14, lr: 7.84e-03 +2024-08-06 16:46:53,457 INFO [trainer.py:765] (7/8) Epoch 11, batch 1100, train_loss[loss=3.105, NarTop10Accuracy=0.7106, over 6864.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.6671, over 5926.50 frames. ], batch size: 17, lr: 7.82e-03 +2024-08-06 16:47:33,030 INFO [trainer.py:765] (7/8) Epoch 11, batch 1200, train_loss[loss=3.441, NarTop10Accuracy=0.6334, over 7488.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6652, over 5923.43 frames. ], batch size: 32, lr: 7.81e-03 +2024-08-06 16:48:06,481 INFO [trainer.py:765] (7/8) Epoch 11, batch 1300, train_loss[loss=2.987, NarTop10Accuracy=0.7366, over 4923.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6631, over 6000.43 frames. ], batch size: 6, lr: 7.79e-03 +2024-08-06 16:48:41,353 INFO [trainer.py:765] (7/8) Epoch 11, batch 1400, train_loss[loss=3.39, NarTop10Accuracy=0.6501, over 6093.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6594, over 6033.30 frames. ], batch size: 11, lr: 7.78e-03 +2024-08-06 16:49:09,345 INFO [trainer.py:765] (7/8) Epoch 11, batch 1500, train_loss[loss=3.379, NarTop10Accuracy=0.6524, over 6213.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6596, over 5976.77 frames. ], batch size: 51, lr: 7.77e-03 +2024-08-06 16:49:37,103 INFO [trainer.py:765] (7/8) Epoch 11, batch 1600, train_loss[loss=3.298, NarTop10Accuracy=0.6732, over 7098.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6645, over 5958.49 frames. ], batch size: 22, lr: 7.75e-03 +2024-08-06 16:50:03,792 INFO [trainer.py:765] (7/8) Epoch 11, batch 1700, train_loss[loss=3.397, NarTop10Accuracy=0.642, over 6270.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6658, over 5952.14 frames. ], batch size: 13, lr: 7.74e-03 +2024-08-06 16:50:30,353 INFO [trainer.py:765] (7/8) Epoch 11, batch 1800, train_loss[loss=3.461, NarTop10Accuracy=0.6331, over 7023.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6622, over 5999.77 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 16:50:56,821 INFO [trainer.py:765] (7/8) Epoch 11, batch 1900, train_loss[loss=3.864, NarTop10Accuracy=0.5501, over 6450.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6603, over 6043.75 frames. ], batch size: 50, lr: 7.71e-03 +2024-08-06 16:51:22,405 INFO [trainer.py:765] (7/8) Epoch 11, batch 2000, train_loss[loss=3.761, NarTop10Accuracy=0.5667, over 5775.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6613, over 6008.73 frames. ], batch size: 51, lr: 7.70e-03 +2024-08-06 16:51:47,794 INFO [trainer.py:765] (7/8) Epoch 11, batch 2100, train_loss[loss=2.998, NarTop10Accuracy=0.7311, over 3957.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.662, over 5985.18 frames. ], batch size: 4, lr: 7.68e-03 +2024-08-06 16:52:13,118 INFO [trainer.py:765] (7/8) Epoch 11, batch 2200, train_loss[loss=3.282, NarTop10Accuracy=0.6728, over 7176.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6632, over 6012.87 frames. ], batch size: 31, lr: 7.67e-03 +2024-08-06 16:52:23,899 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 16:52:32,079 INFO [trainer.py:811] (7/8) Epoch 11, validation: loss=3.101, NarTop10Accuracy=0.7058, over 1905321.00 frames. +2024-08-06 16:52:32,079 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27199MB +2024-08-06 16:52:32,593 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.920e+02 2.088e+02 2.244e+02 3.599e+02, threshold=4.177e+02, percent-clipped=0.0 +2024-08-06 16:52:46,445 INFO [trainer.py:765] (7/8) Epoch 11, batch 2300, train_loss[loss=3.08, NarTop10Accuracy=0.7088, over 5616.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6614, over 6019.89 frames. ], batch size: 9, lr: 7.66e-03 +2024-08-06 16:53:10,887 INFO [trainer.py:765] (7/8) Epoch 11, batch 2400, train_loss[loss=3.495, NarTop10Accuracy=0.6381, over 4938.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6636, over 5776.81 frames. ], batch size: 7, lr: 7.64e-03 +2024-08-06 16:53:34,371 INFO [trainer.py:765] (7/8) Epoch 11, batch 2500, train_loss[loss=3.376, NarTop10Accuracy=0.6471, over 5208.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6652, over 5473.51 frames. ], batch size: 7, lr: 7.63e-03 +2024-08-06 16:53:54,308 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 16:54:58,525 INFO [trainer.py:765] (7/8) Epoch 12, batch 100, train_loss[loss=3.635, NarTop10Accuracy=0.5915, over 7158.00 frames. ], tot_loss[loss=3.296, NarTop10Accuracy=0.6671, over 2366.19 frames. ], batch size: 31, lr: 7.30e-03 +2024-08-06 16:55:32,432 INFO [trainer.py:765] (7/8) Epoch 12, batch 200, train_loss[loss=3.072, NarTop10Accuracy=0.7164, over 6777.00 frames. ], tot_loss[loss=3.265, NarTop10Accuracy=0.6735, over 3860.68 frames. ], batch size: 17, lr: 7.29e-03 +2024-08-06 16:56:05,096 INFO [trainer.py:765] (7/8) Epoch 12, batch 300, train_loss[loss=2.96, NarTop10Accuracy=0.7337, over 7032.00 frames. ], tot_loss[loss=3.245, NarTop10Accuracy=0.6774, over 4672.13 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 16:56:36,426 INFO [trainer.py:765] (7/8) Epoch 12, batch 400, train_loss[loss=3.033, NarTop10Accuracy=0.7186, over 5076.00 frames. ], tot_loss[loss=3.256, NarTop10Accuracy=0.6747, over 5122.57 frames. ], batch size: 7, lr: 7.26e-03 +2024-08-06 16:57:10,503 INFO [trainer.py:765] (7/8) Epoch 12, batch 500, train_loss[loss=3.601, NarTop10Accuracy=0.6007, over 6045.00 frames. ], tot_loss[loss=3.264, NarTop10Accuracy=0.673, over 5397.49 frames. ], batch size: 11, lr: 7.25e-03 +2024-08-06 16:57:45,483 INFO [trainer.py:765] (7/8) Epoch 12, batch 600, train_loss[loss=2.97, NarTop10Accuracy=0.7433, over 5835.00 frames. ], tot_loss[loss=3.274, NarTop10Accuracy=0.6711, over 5672.39 frames. ], batch size: 9, lr: 7.24e-03 +2024-08-06 16:58:17,005 INFO [trainer.py:765] (7/8) Epoch 12, batch 700, train_loss[loss=3.459, NarTop10Accuracy=0.6249, over 4260.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6672, over 5718.22 frames. ], batch size: 5, lr: 7.22e-03 +2024-08-06 16:58:53,469 INFO [trainer.py:765] (7/8) Epoch 12, batch 800, train_loss[loss=3.448, NarTop10Accuracy=0.6243, over 5058.00 frames. ], tot_loss[loss=3.289, NarTop10Accuracy=0.6676, over 5782.27 frames. ], batch size: 6, lr: 7.21e-03 +2024-08-06 16:59:27,206 INFO [trainer.py:765] (7/8) Epoch 12, batch 900, train_loss[loss=3.065, NarTop10Accuracy=0.7136, over 6714.00 frames. ], tot_loss[loss=3.27, NarTop10Accuracy=0.6716, over 5794.74 frames. ], batch size: 14, lr: 7.20e-03 +2024-08-06 17:00:01,574 INFO [trainer.py:765] (7/8) Epoch 12, batch 1000, train_loss[loss=2.974, NarTop10Accuracy=0.7333, over 6390.00 frames. ], tot_loss[loss=3.282, NarTop10Accuracy=0.6693, over 5903.95 frames. ], batch size: 13, lr: 7.19e-03 +2024-08-06 17:00:39,189 INFO [trainer.py:765] (7/8) Epoch 12, batch 1100, train_loss[loss=3.466, NarTop10Accuracy=0.6206, over 6789.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6658, over 5947.51 frames. ], batch size: 17, lr: 7.18e-03 +2024-08-06 17:01:13,964 INFO [trainer.py:765] (7/8) Epoch 12, batch 1200, train_loss[loss=3.019, NarTop10Accuracy=0.722, over 7221.00 frames. ], tot_loss[loss=3.265, NarTop10Accuracy=0.6726, over 5938.39 frames. ], batch size: 31, lr: 7.17e-03 +2024-08-06 17:01:48,108 INFO [trainer.py:765] (7/8) Epoch 12, batch 1300, train_loss[loss=3.281, NarTop10Accuracy=0.6677, over 5220.00 frames. ], tot_loss[loss=3.282, NarTop10Accuracy=0.6693, over 6000.94 frames. ], batch size: 6, lr: 7.15e-03 +2024-08-06 17:02:22,323 INFO [trainer.py:765] (7/8) Epoch 12, batch 1400, train_loss[loss=3.637, NarTop10Accuracy=0.598, over 5994.00 frames. ], tot_loss[loss=3.29, NarTop10Accuracy=0.6672, over 6017.36 frames. ], batch size: 11, lr: 7.14e-03 +2024-08-06 17:02:52,877 INFO [trainer.py:765] (7/8) Epoch 12, batch 1500, train_loss[loss=3.369, NarTop10Accuracy=0.6554, over 6189.00 frames. ], tot_loss[loss=3.27, NarTop10Accuracy=0.6715, over 5949.24 frames. ], batch size: 50, lr: 7.13e-03 +2024-08-06 17:03:20,691 INFO [trainer.py:765] (7/8) Epoch 12, batch 1600, train_loss[loss=3.294, NarTop10Accuracy=0.6688, over 7122.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6698, over 5921.89 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 17:03:38,297 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 17:03:46,474 INFO [trainer.py:811] (7/8) Epoch 12, validation: loss=3.054, NarTop10Accuracy=0.7153, over 1905321.00 frames. +2024-08-06 17:03:46,474 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27201MB +2024-08-06 17:03:46,988 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 1.899e+02 2.078e+02 2.276e+02 5.455e+02, threshold=4.157e+02, percent-clipped=0.1 +2024-08-06 17:03:55,604 INFO [trainer.py:765] (7/8) Epoch 12, batch 1700, train_loss[loss=3.458, NarTop10Accuracy=0.636, over 6366.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6678, over 5913.28 frames. ], batch size: 13, lr: 7.11e-03 +2024-08-06 17:04:22,121 INFO [trainer.py:765] (7/8) Epoch 12, batch 1800, train_loss[loss=3.477, NarTop10Accuracy=0.6241, over 6930.00 frames. ], tot_loss[loss=3.288, NarTop10Accuracy=0.6684, over 5987.09 frames. ], batch size: 22, lr: 7.10e-03 +2024-08-06 17:04:48,592 INFO [trainer.py:765] (7/8) Epoch 12, batch 1900, train_loss[loss=3.285, NarTop10Accuracy=0.671, over 6378.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6689, over 6029.56 frames. ], batch size: 50, lr: 7.08e-03 +2024-08-06 17:05:14,198 INFO [trainer.py:765] (7/8) Epoch 12, batch 2000, train_loss[loss=3.55, NarTop10Accuracy=0.6185, over 6393.00 frames. ], tot_loss[loss=3.273, NarTop10Accuracy=0.6717, over 6013.24 frames. ], batch size: 51, lr: 7.07e-03 +2024-08-06 17:05:39,468 INFO [trainer.py:765] (7/8) Epoch 12, batch 2100, train_loss[loss=3.401, NarTop10Accuracy=0.6441, over 3996.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6705, over 5987.38 frames. ], batch size: 4, lr: 7.06e-03 +2024-08-06 17:06:04,691 INFO [trainer.py:765] (7/8) Epoch 12, batch 2200, train_loss[loss=3.285, NarTop10Accuracy=0.6611, over 7305.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6674, over 6018.15 frames. ], batch size: 31, lr: 7.05e-03 +2024-08-06 17:06:29,847 INFO [trainer.py:765] (7/8) Epoch 12, batch 2300, train_loss[loss=3.483, NarTop10Accuracy=0.6321, over 5556.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6685, over 6029.72 frames. ], batch size: 9, lr: 7.04e-03 +2024-08-06 17:06:54,200 INFO [trainer.py:765] (7/8) Epoch 12, batch 2400, train_loss[loss=3.113, NarTop10Accuracy=0.7006, over 5715.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.6692, over 5774.13 frames. ], batch size: 8, lr: 7.03e-03 +2024-08-06 17:07:17,646 INFO [trainer.py:765] (7/8) Epoch 12, batch 2500, train_loss[loss=3.24, NarTop10Accuracy=0.6739, over 5052.00 frames. ], tot_loss[loss=3.256, NarTop10Accuracy=0.674, over 5490.55 frames. ], batch size: 7, lr: 7.02e-03 +2024-08-06 17:07:37,539 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 17:08:40,079 INFO [trainer.py:765] (7/8) Epoch 13, batch 100, train_loss[loss=2.92, NarTop10Accuracy=0.7456, over 7341.00 frames. ], tot_loss[loss=3.269, NarTop10Accuracy=0.6714, over 2372.50 frames. ], batch size: 31, lr: 6.73e-03 +2024-08-06 17:09:14,120 INFO [trainer.py:765] (7/8) Epoch 13, batch 200, train_loss[loss=3.087, NarTop10Accuracy=0.711, over 6774.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.669, over 3870.65 frames. ], batch size: 17, lr: 6.72e-03 +2024-08-06 17:09:46,276 INFO [trainer.py:765] (7/8) Epoch 13, batch 300, train_loss[loss=3.416, NarTop10Accuracy=0.6481, over 6978.00 frames. ], tot_loss[loss=3.266, NarTop10Accuracy=0.6725, over 4662.38 frames. ], batch size: 22, lr: 6.71e-03 +2024-08-06 17:10:19,164 INFO [trainer.py:765] (7/8) Epoch 13, batch 400, train_loss[loss=3.017, NarTop10Accuracy=0.7132, over 5160.00 frames. ], tot_loss[loss=3.254, NarTop10Accuracy=0.6751, over 5114.05 frames. ], batch size: 7, lr: 6.70e-03 +2024-08-06 17:10:49,335 INFO [trainer.py:765] (7/8) Epoch 13, batch 500, train_loss[loss=3.269, NarTop10Accuracy=0.6763, over 6009.00 frames. ], tot_loss[loss=3.249, NarTop10Accuracy=0.6762, over 5401.77 frames. ], batch size: 11, lr: 6.69e-03 +2024-08-06 17:11:26,244 INFO [trainer.py:765] (7/8) Epoch 13, batch 600, train_loss[loss=2.943, NarTop10Accuracy=0.7319, over 5577.00 frames. ], tot_loss[loss=3.24, NarTop10Accuracy=0.6781, over 5648.90 frames. ], batch size: 9, lr: 6.68e-03 +2024-08-06 17:11:57,381 INFO [trainer.py:765] (7/8) Epoch 13, batch 700, train_loss[loss=3.064, NarTop10Accuracy=0.7112, over 4989.00 frames. ], tot_loss[loss=3.242, NarTop10Accuracy=0.6777, over 5741.55 frames. ], batch size: 6, lr: 6.67e-03 +2024-08-06 17:12:33,442 INFO [trainer.py:765] (7/8) Epoch 13, batch 800, train_loss[loss=2.923, NarTop10Accuracy=0.737, over 4239.00 frames. ], tot_loss[loss=3.244, NarTop10Accuracy=0.6773, over 5770.80 frames. ], batch size: 5, lr: 6.66e-03 +2024-08-06 17:13:10,032 INFO [trainer.py:765] (7/8) Epoch 13, batch 900, train_loss[loss=3.104, NarTop10Accuracy=0.7044, over 6237.00 frames. ], tot_loss[loss=3.243, NarTop10Accuracy=0.6776, over 5784.78 frames. ], batch size: 13, lr: 6.65e-03 +2024-08-06 17:13:41,443 INFO [trainer.py:765] (7/8) Epoch 13, batch 1000, train_loss[loss=3.461, NarTop10Accuracy=0.6333, over 6201.00 frames. ], tot_loss[loss=3.238, NarTop10Accuracy=0.6785, over 5886.20 frames. ], batch size: 13, lr: 6.64e-03 +2024-08-06 17:14:15,537 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 17:14:23,644 INFO [trainer.py:811] (7/8) Epoch 13, validation: loss=3.099, NarTop10Accuracy=0.7062, over 1905321.00 frames. +2024-08-06 17:14:23,645 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27201MB +2024-08-06 17:14:24,470 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 1.948e+02 2.091e+02 2.295e+02 3.353e+02, threshold=4.181e+02, percent-clipped=0.0 +2024-08-06 17:14:26,697 INFO [trainer.py:765] (7/8) Epoch 13, batch 1100, train_loss[loss=3.337, NarTop10Accuracy=0.6504, over 6927.00 frames. ], tot_loss[loss=3.25, NarTop10Accuracy=0.6757, over 5925.71 frames. ], batch size: 17, lr: 6.63e-03 +2024-08-06 17:15:03,475 INFO [trainer.py:765] (7/8) Epoch 13, batch 1200, train_loss[loss=3.519, NarTop10Accuracy=0.622, over 7245.00 frames. ], tot_loss[loss=3.258, NarTop10Accuracy=0.6737, over 5923.87 frames. ], batch size: 32, lr: 6.62e-03 +2024-08-06 17:15:35,514 INFO [trainer.py:765] (7/8) Epoch 13, batch 1300, train_loss[loss=3.116, NarTop10Accuracy=0.7062, over 4341.00 frames. ], tot_loss[loss=3.258, NarTop10Accuracy=0.6737, over 5997.87 frames. ], batch size: 5, lr: 6.61e-03 +2024-08-06 17:16:11,782 INFO [trainer.py:765] (7/8) Epoch 13, batch 1400, train_loss[loss=3.041, NarTop10Accuracy=0.7241, over 6099.00 frames. ], tot_loss[loss=3.261, NarTop10Accuracy=0.6731, over 6033.19 frames. ], batch size: 11, lr: 6.60e-03 +2024-08-06 17:16:39,787 INFO [trainer.py:765] (7/8) Epoch 13, batch 1500, train_loss[loss=3.532, NarTop10Accuracy=0.6228, over 6204.00 frames. ], tot_loss[loss=3.258, NarTop10Accuracy=0.6743, over 5949.87 frames. ], batch size: 50, lr: 6.59e-03 +2024-08-06 17:17:07,603 INFO [trainer.py:765] (7/8) Epoch 13, batch 1600, train_loss[loss=3.018, NarTop10Accuracy=0.7294, over 6993.00 frames. ], tot_loss[loss=3.264, NarTop10Accuracy=0.6732, over 5936.19 frames. ], batch size: 22, lr: 6.58e-03 +2024-08-06 17:17:34,259 INFO [trainer.py:765] (7/8) Epoch 13, batch 1700, train_loss[loss=3.185, NarTop10Accuracy=0.6838, over 6213.00 frames. ], tot_loss[loss=3.265, NarTop10Accuracy=0.6727, over 5929.54 frames. ], batch size: 13, lr: 6.57e-03 +2024-08-06 17:18:00,762 INFO [trainer.py:765] (7/8) Epoch 13, batch 1800, train_loss[loss=3.133, NarTop10Accuracy=0.6959, over 7023.00 frames. ], tot_loss[loss=3.259, NarTop10Accuracy=0.6738, over 5984.90 frames. ], batch size: 22, lr: 6.56e-03 +2024-08-06 17:18:27,244 INFO [trainer.py:765] (7/8) Epoch 13, batch 1900, train_loss[loss=3.534, NarTop10Accuracy=0.6208, over 5505.00 frames. ], tot_loss[loss=3.253, NarTop10Accuracy=0.6754, over 6020.31 frames. ], batch size: 50, lr: 6.55e-03 +2024-08-06 17:18:52,777 INFO [trainer.py:765] (7/8) Epoch 13, batch 2000, train_loss[loss=3.489, NarTop10Accuracy=0.6213, over 6441.00 frames. ], tot_loss[loss=3.236, NarTop10Accuracy=0.679, over 6003.72 frames. ], batch size: 51, lr: 6.54e-03 +2024-08-06 17:19:18,147 INFO [trainer.py:765] (7/8) Epoch 13, batch 2100, train_loss[loss=2.881, NarTop10Accuracy=0.7497, over 4749.00 frames. ], tot_loss[loss=3.236, NarTop10Accuracy=0.6791, over 5981.96 frames. ], batch size: 5, lr: 6.53e-03 +2024-08-06 17:19:43,412 INFO [trainer.py:765] (7/8) Epoch 13, batch 2200, train_loss[loss=3.44, NarTop10Accuracy=0.6386, over 7275.00 frames. ], tot_loss[loss=3.247, NarTop10Accuracy=0.6766, over 6007.00 frames. ], batch size: 31, lr: 6.52e-03 +2024-08-06 17:20:08,543 INFO [trainer.py:765] (7/8) Epoch 13, batch 2300, train_loss[loss=3.577, NarTop10Accuracy=0.6046, over 5844.00 frames. ], tot_loss[loss=3.266, NarTop10Accuracy=0.6728, over 6008.47 frames. ], batch size: 9, lr: 6.51e-03 +2024-08-06 17:20:32,939 INFO [trainer.py:765] (7/8) Epoch 13, batch 2400, train_loss[loss=3.5, NarTop10Accuracy=0.617, over 5130.00 frames. ], tot_loss[loss=3.242, NarTop10Accuracy=0.6778, over 5776.63 frames. ], batch size: 7, lr: 6.50e-03 +2024-08-06 17:20:56,408 INFO [trainer.py:765] (7/8) Epoch 13, batch 2500, train_loss[loss=3.293, NarTop10Accuracy=0.6562, over 5052.00 frames. ], tot_loss[loss=3.219, NarTop10Accuracy=0.6818, over 5481.09 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 17:21:16,261 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 17:22:19,315 INFO [trainer.py:765] (7/8) Epoch 14, batch 100, train_loss[loss=3.042, NarTop10Accuracy=0.7187, over 7155.00 frames. ], tot_loss[loss=3.224, NarTop10Accuracy=0.6815, over 2381.06 frames. ], batch size: 31, lr: 6.24e-03 +2024-08-06 17:22:50,378 INFO [trainer.py:765] (7/8) Epoch 14, batch 200, train_loss[loss=3.194, NarTop10Accuracy=0.6836, over 6684.00 frames. ], tot_loss[loss=3.23, NarTop10Accuracy=0.6798, over 3878.97 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 17:23:23,880 INFO [trainer.py:765] (7/8) Epoch 14, batch 300, train_loss[loss=3.056, NarTop10Accuracy=0.7215, over 7251.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.6843, over 4664.71 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 17:23:57,484 INFO [trainer.py:765] (7/8) Epoch 14, batch 400, train_loss[loss=3.093, NarTop10Accuracy=0.7168, over 5127.00 frames. ], tot_loss[loss=3.225, NarTop10Accuracy=0.6809, over 5111.90 frames. ], batch size: 7, lr: 6.22e-03 +2024-08-06 17:24:32,114 INFO [trainer.py:765] (7/8) Epoch 14, batch 500, train_loss[loss=3.28, NarTop10Accuracy=0.6712, over 6162.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6793, over 5394.90 frames. ], batch size: 11, lr: 6.21e-03 +2024-08-06 17:24:36,213 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 17:24:44,275 INFO [trainer.py:811] (7/8) Epoch 14, validation: loss=3.004, NarTop10Accuracy=0.726, over 1905321.00 frames. +2024-08-06 17:24:44,275 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27201MB +2024-08-06 17:24:44,823 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 1.969e+02 2.114e+02 2.287e+02 4.406e+02, threshold=4.227e+02, percent-clipped=0.1 +2024-08-06 17:25:12,914 INFO [trainer.py:765] (7/8) Epoch 14, batch 600, train_loss[loss=2.987, NarTop10Accuracy=0.7349, over 5790.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.6793, over 5668.66 frames. ], batch size: 9, lr: 6.20e-03 +2024-08-06 17:25:48,548 INFO [trainer.py:765] (7/8) Epoch 14, batch 700, train_loss[loss=3.566, NarTop10Accuracy=0.6106, over 5112.00 frames. ], tot_loss[loss=3.225, NarTop10Accuracy=0.6805, over 5721.61 frames. ], batch size: 6, lr: 6.19e-03 +2024-08-06 17:26:25,280 INFO [trainer.py:765] (7/8) Epoch 14, batch 800, train_loss[loss=2.951, NarTop10Accuracy=0.7398, over 4350.00 frames. ], tot_loss[loss=3.217, NarTop10Accuracy=0.6827, over 5794.41 frames. ], batch size: 5, lr: 6.18e-03 +2024-08-06 17:26:57,660 INFO [trainer.py:765] (7/8) Epoch 14, batch 900, train_loss[loss=3.343, NarTop10Accuracy=0.6565, over 6276.00 frames. ], tot_loss[loss=3.211, NarTop10Accuracy=0.6842, over 5794.85 frames. ], batch size: 13, lr: 6.17e-03 +2024-08-06 17:27:31,717 INFO [trainer.py:765] (7/8) Epoch 14, batch 1000, train_loss[loss=3.407, NarTop10Accuracy=0.6445, over 6327.00 frames. ], tot_loss[loss=3.223, NarTop10Accuracy=0.6814, over 5897.79 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 17:28:11,597 INFO [trainer.py:765] (7/8) Epoch 14, batch 1100, train_loss[loss=2.878, NarTop10Accuracy=0.7518, over 6840.00 frames. ], tot_loss[loss=3.218, NarTop10Accuracy=0.6825, over 5960.40 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 17:28:40,734 INFO [trainer.py:765] (7/8) Epoch 14, batch 1200, train_loss[loss=3.501, NarTop10Accuracy=0.6246, over 6891.00 frames. ], tot_loss[loss=3.213, NarTop10Accuracy=0.6831, over 5941.08 frames. ], batch size: 31, lr: 6.15e-03 +2024-08-06 17:29:16,215 INFO [trainer.py:765] (7/8) Epoch 14, batch 1300, train_loss[loss=3.529, NarTop10Accuracy=0.6139, over 5082.00 frames. ], tot_loss[loss=3.218, NarTop10Accuracy=0.6823, over 5988.05 frames. ], batch size: 6, lr: 6.14e-03 +2024-08-06 17:29:54,602 INFO [trainer.py:765] (7/8) Epoch 14, batch 1400, train_loss[loss=3.358, NarTop10Accuracy=0.6442, over 5892.00 frames. ], tot_loss[loss=3.227, NarTop10Accuracy=0.6803, over 6004.75 frames. ], batch size: 11, lr: 6.13e-03 +2024-08-06 17:30:25,315 INFO [trainer.py:765] (7/8) Epoch 14, batch 1500, train_loss[loss=3.755, NarTop10Accuracy=0.5768, over 5778.00 frames. ], tot_loss[loss=3.233, NarTop10Accuracy=0.6791, over 5957.73 frames. ], batch size: 50, lr: 6.12e-03 +2024-08-06 17:30:53,043 INFO [trainer.py:765] (7/8) Epoch 14, batch 1600, train_loss[loss=2.955, NarTop10Accuracy=0.7351, over 7041.00 frames. ], tot_loss[loss=3.221, NarTop10Accuracy=0.6816, over 5925.61 frames. ], batch size: 22, lr: 6.11e-03 +2024-08-06 17:31:19,728 INFO [trainer.py:765] (7/8) Epoch 14, batch 1700, train_loss[loss=3.06, NarTop10Accuracy=0.7214, over 6318.00 frames. ], tot_loss[loss=3.207, NarTop10Accuracy=0.6852, over 5899.34 frames. ], batch size: 13, lr: 6.10e-03 +2024-08-06 17:31:46,290 INFO [trainer.py:765] (7/8) Epoch 14, batch 1800, train_loss[loss=2.97, NarTop10Accuracy=0.729, over 7074.00 frames. ], tot_loss[loss=3.185, NarTop10Accuracy=0.6893, over 5965.46 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 17:32:12,728 INFO [trainer.py:765] (7/8) Epoch 14, batch 1900, train_loss[loss=3.686, NarTop10Accuracy=0.5871, over 6399.00 frames. ], tot_loss[loss=3.207, NarTop10Accuracy=0.6848, over 6021.86 frames. ], batch size: 50, lr: 6.09e-03 +2024-08-06 17:32:38,283 INFO [trainer.py:765] (7/8) Epoch 14, batch 2000, train_loss[loss=3.252, NarTop10Accuracy=0.6852, over 6189.00 frames. ], tot_loss[loss=3.217, NarTop10Accuracy=0.6827, over 6003.36 frames. ], batch size: 53, lr: 6.08e-03 +2024-08-06 17:33:03,646 INFO [trainer.py:765] (7/8) Epoch 14, batch 2100, train_loss[loss=2.907, NarTop10Accuracy=0.7468, over 4782.00 frames. ], tot_loss[loss=3.217, NarTop10Accuracy=0.6827, over 5981.26 frames. ], batch size: 5, lr: 6.07e-03 +2024-08-06 17:33:28,999 INFO [trainer.py:765] (7/8) Epoch 14, batch 2200, train_loss[loss=3.312, NarTop10Accuracy=0.6693, over 7242.00 frames. ], tot_loss[loss=3.211, NarTop10Accuracy=0.6838, over 5986.10 frames. ], batch size: 32, lr: 6.06e-03 +2024-08-06 17:33:54,089 INFO [trainer.py:765] (7/8) Epoch 14, batch 2300, train_loss[loss=2.99, NarTop10Accuracy=0.728, over 5679.00 frames. ], tot_loss[loss=3.231, NarTop10Accuracy=0.68, over 5990.52 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 17:34:18,534 INFO [trainer.py:765] (7/8) Epoch 14, batch 2400, train_loss[loss=2.934, NarTop10Accuracy=0.7459, over 5235.00 frames. ], tot_loss[loss=3.232, NarTop10Accuracy=0.6793, over 5753.90 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:42,116 INFO [trainer.py:765] (7/8) Epoch 14, batch 2500, train_loss[loss=2.903, NarTop10Accuracy=0.7442, over 5094.00 frames. ], tot_loss[loss=3.203, NarTop10Accuracy=0.6849, over 5461.85 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 17:34:45,395 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 17:34:53,209 INFO [trainer.py:811] (7/8) Epoch 14, validation: loss=3.062, NarTop10Accuracy=0.7136, over 1905321.00 frames. +2024-08-06 17:34:53,209 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27201MB +2024-08-06 17:34:53,680 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 1.975e+02 2.132e+02 2.304e+02 3.875e+02, threshold=4.265e+02, percent-clipped=0.0 +2024-08-06 17:35:09,823 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 17:36:11,738 INFO [trainer.py:765] (7/8) Epoch 15, batch 100, train_loss[loss=3.191, NarTop10Accuracy=0.6849, over 7092.00 frames. ], tot_loss[loss=3.229, NarTop10Accuracy=0.6806, over 2357.71 frames. ], batch size: 31, lr: 5.82e-03 +2024-08-06 17:36:44,334 INFO [trainer.py:765] (7/8) Epoch 15, batch 200, train_loss[loss=3.41, NarTop10Accuracy=0.6413, over 6759.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6888, over 3846.23 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 17:37:17,714 INFO [trainer.py:765] (7/8) Epoch 15, batch 300, train_loss[loss=3.425, NarTop10Accuracy=0.6377, over 7005.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.6878, over 4655.63 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 17:37:48,904 INFO [trainer.py:765] (7/8) Epoch 15, batch 400, train_loss[loss=2.876, NarTop10Accuracy=0.7587, over 5151.00 frames. ], tot_loss[loss=3.191, NarTop10Accuracy=0.6881, over 5107.48 frames. ], batch size: 7, lr: 5.80e-03 +2024-08-06 17:38:22,354 INFO [trainer.py:765] (7/8) Epoch 15, batch 500, train_loss[loss=2.803, NarTop10Accuracy=0.7673, over 6003.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6887, over 5391.20 frames. ], batch size: 11, lr: 5.79e-03 +2024-08-06 17:38:53,094 INFO [trainer.py:765] (7/8) Epoch 15, batch 600, train_loss[loss=2.929, NarTop10Accuracy=0.7416, over 5676.00 frames. ], tot_loss[loss=3.201, NarTop10Accuracy=0.6858, over 5664.61 frames. ], batch size: 9, lr: 5.78e-03 +2024-08-06 17:39:27,922 INFO [trainer.py:765] (7/8) Epoch 15, batch 700, train_loss[loss=2.947, NarTop10Accuracy=0.7441, over 5106.00 frames. ], tot_loss[loss=3.209, NarTop10Accuracy=0.6846, over 5753.05 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 17:40:05,565 INFO [trainer.py:765] (7/8) Epoch 15, batch 800, train_loss[loss=3.322, NarTop10Accuracy=0.6647, over 5187.00 frames. ], tot_loss[loss=3.227, NarTop10Accuracy=0.6807, over 5806.45 frames. ], batch size: 6, lr: 5.76e-03 +2024-08-06 17:40:35,791 INFO [trainer.py:765] (7/8) Epoch 15, batch 900, train_loss[loss=3.393, NarTop10Accuracy=0.6465, over 6267.00 frames. ], tot_loss[loss=3.206, NarTop10Accuracy=0.6848, over 5807.82 frames. ], batch size: 13, lr: 5.76e-03 +2024-08-06 17:41:11,251 INFO [trainer.py:765] (7/8) Epoch 15, batch 1000, train_loss[loss=3.196, NarTop10Accuracy=0.6862, over 6231.00 frames. ], tot_loss[loss=3.198, NarTop10Accuracy=0.6865, over 5919.13 frames. ], batch size: 13, lr: 5.75e-03 +2024-08-06 17:41:46,452 INFO [trainer.py:765] (7/8) Epoch 15, batch 1100, train_loss[loss=3.143, NarTop10Accuracy=0.7015, over 6780.00 frames. ], tot_loss[loss=3.197, NarTop10Accuracy=0.6868, over 5938.63 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 17:42:19,456 INFO [trainer.py:765] (7/8) Epoch 15, batch 1200, train_loss[loss=3.445, NarTop10Accuracy=0.6353, over 7269.00 frames. ], tot_loss[loss=3.228, NarTop10Accuracy=0.6807, over 5930.30 frames. ], batch size: 31, lr: 5.73e-03 +2024-08-06 17:42:54,428 INFO [trainer.py:765] (7/8) Epoch 15, batch 1300, train_loss[loss=3, NarTop10Accuracy=0.729, over 5073.00 frames. ], tot_loss[loss=3.213, NarTop10Accuracy=0.6833, over 6003.65 frames. ], batch size: 6, lr: 5.73e-03 +2024-08-06 17:43:26,607 INFO [trainer.py:765] (7/8) Epoch 15, batch 1400, train_loss[loss=3.275, NarTop10Accuracy=0.6678, over 6066.00 frames. ], tot_loss[loss=3.222, NarTop10Accuracy=0.6816, over 6026.75 frames. ], batch size: 11, lr: 5.72e-03 +2024-08-06 17:43:56,558 INFO [trainer.py:765] (7/8) Epoch 15, batch 1500, train_loss[loss=3.132, NarTop10Accuracy=0.7024, over 5784.00 frames. ], tot_loss[loss=3.229, NarTop10Accuracy=0.6798, over 5952.20 frames. ], batch size: 50, lr: 5.71e-03 +2024-08-06 17:44:24,241 INFO [trainer.py:765] (7/8) Epoch 15, batch 1600, train_loss[loss=3.643, NarTop10Accuracy=0.597, over 6945.00 frames. ], tot_loss[loss=3.205, NarTop10Accuracy=0.6849, over 5940.72 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 17:44:50,856 INFO [trainer.py:765] (7/8) Epoch 15, batch 1700, train_loss[loss=3.008, NarTop10Accuracy=0.7301, over 6714.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.6871, over 5914.53 frames. ], batch size: 14, lr: 5.70e-03 +2024-08-06 17:45:17,294 INFO [trainer.py:765] (7/8) Epoch 15, batch 1800, train_loss[loss=3.181, NarTop10Accuracy=0.6871, over 7011.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.6883, over 5957.57 frames. ], batch size: 22, lr: 5.69e-03 +2024-08-06 17:45:43,679 INFO [trainer.py:765] (7/8) Epoch 15, batch 1900, train_loss[loss=3.133, NarTop10Accuracy=0.7038, over 5961.00 frames. ], tot_loss[loss=3.215, NarTop10Accuracy=0.6829, over 6016.40 frames. ], batch size: 50, lr: 5.68e-03 +2024-08-06 17:45:53,541 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 17:46:01,743 INFO [trainer.py:811] (7/8) Epoch 15, validation: loss=3.006, NarTop10Accuracy=0.725, over 1905321.00 frames. +2024-08-06 17:46:01,743 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27201MB +2024-08-06 17:46:02,217 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.004e+02 2.149e+02 2.324e+02 3.721e+02, threshold=4.298e+02, percent-clipped=0.0 +2024-08-06 17:46:17,372 INFO [trainer.py:765] (7/8) Epoch 15, batch 2000, train_loss[loss=3.226, NarTop10Accuracy=0.6789, over 6333.00 frames. ], tot_loss[loss=3.209, NarTop10Accuracy=0.684, over 5998.71 frames. ], batch size: 52, lr: 5.67e-03 +2024-08-06 17:46:42,773 INFO [trainer.py:765] (7/8) Epoch 15, batch 2100, train_loss[loss=3.256, NarTop10Accuracy=0.6742, over 4833.00 frames. ], tot_loss[loss=3.201, NarTop10Accuracy=0.6853, over 5973.07 frames. ], batch size: 5, lr: 5.67e-03 +2024-08-06 17:47:08,033 INFO [trainer.py:765] (7/8) Epoch 15, batch 2200, train_loss[loss=2.933, NarTop10Accuracy=0.7423, over 7218.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.6837, over 6008.79 frames. ], batch size: 31, lr: 5.66e-03 +2024-08-06 17:47:33,292 INFO [trainer.py:765] (7/8) Epoch 15, batch 2300, train_loss[loss=3.564, NarTop10Accuracy=0.6108, over 5619.00 frames. ], tot_loss[loss=3.212, NarTop10Accuracy=0.6832, over 6035.68 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 17:47:57,640 INFO [trainer.py:765] (7/8) Epoch 15, batch 2400, train_loss[loss=3.322, NarTop10Accuracy=0.6625, over 5190.00 frames. ], tot_loss[loss=3.186, NarTop10Accuracy=0.6885, over 5768.49 frames. ], batch size: 7, lr: 5.65e-03 +2024-08-06 17:48:21,162 INFO [trainer.py:765] (7/8) Epoch 15, batch 2500, train_loss[loss=2.909, NarTop10Accuracy=0.738, over 5079.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6921, over 5469.49 frames. ], batch size: 7, lr: 5.64e-03 +2024-08-06 17:48:41,220 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 17:49:41,222 INFO [trainer.py:765] (7/8) Epoch 16, batch 100, train_loss[loss=3.485, NarTop10Accuracy=0.6287, over 7815.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6969, over 2364.28 frames. ], batch size: 32, lr: 5.45e-03 +2024-08-06 17:50:12,158 INFO [trainer.py:765] (7/8) Epoch 16, batch 200, train_loss[loss=2.873, NarTop10Accuracy=0.7493, over 6948.00 frames. ], tot_loss[loss=3.197, NarTop10Accuracy=0.687, over 3858.91 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 17:50:45,159 INFO [trainer.py:765] (7/8) Epoch 16, batch 300, train_loss[loss=3.151, NarTop10Accuracy=0.6947, over 7122.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6885, over 4673.96 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 17:51:15,976 INFO [trainer.py:765] (7/8) Epoch 16, batch 400, train_loss[loss=3.414, NarTop10Accuracy=0.6372, over 5100.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.6879, over 5116.52 frames. ], batch size: 7, lr: 5.43e-03 +2024-08-06 17:51:50,324 INFO [trainer.py:765] (7/8) Epoch 16, batch 500, train_loss[loss=2.99, NarTop10Accuracy=0.7397, over 6225.00 frames. ], tot_loss[loss=3.185, NarTop10Accuracy=0.6891, over 5388.69 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 17:52:24,252 INFO [trainer.py:765] (7/8) Epoch 16, batch 600, train_loss[loss=2.938, NarTop10Accuracy=0.7439, over 5769.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.688, over 5654.65 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 17:52:55,387 INFO [trainer.py:765] (7/8) Epoch 16, batch 700, train_loss[loss=2.99, NarTop10Accuracy=0.7294, over 5046.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.688, over 5715.45 frames. ], batch size: 6, lr: 5.41e-03 +2024-08-06 17:53:33,816 INFO [trainer.py:765] (7/8) Epoch 16, batch 800, train_loss[loss=3.285, NarTop10Accuracy=0.673, over 5115.00 frames. ], tot_loss[loss=3.184, NarTop10Accuracy=0.6894, over 5760.78 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 17:54:03,923 INFO [trainer.py:765] (7/8) Epoch 16, batch 900, train_loss[loss=3.256, NarTop10Accuracy=0.6768, over 6045.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.692, over 5787.84 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 17:54:37,608 INFO [trainer.py:765] (7/8) Epoch 16, batch 1000, train_loss[loss=2.978, NarTop10Accuracy=0.7275, over 6573.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6939, over 5912.69 frames. ], batch size: 14, lr: 5.39e-03 +2024-08-06 17:55:17,197 INFO [trainer.py:765] (7/8) Epoch 16, batch 1100, train_loss[loss=3.138, NarTop10Accuracy=0.6962, over 6609.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6876, over 5958.90 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 17:55:46,210 INFO [trainer.py:765] (7/8) Epoch 16, batch 1200, train_loss[loss=3.385, NarTop10Accuracy=0.6453, over 7188.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6866, over 5942.01 frames. ], batch size: 31, lr: 5.37e-03 +2024-08-06 17:56:22,776 INFO [trainer.py:765] (7/8) Epoch 16, batch 1300, train_loss[loss=3.433, NarTop10Accuracy=0.6395, over 5109.00 frames. ], tot_loss[loss=3.192, NarTop10Accuracy=0.6873, over 5989.95 frames. ], batch size: 6, lr: 5.37e-03 +2024-08-06 17:56:44,648 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 17:56:53,428 INFO [trainer.py:811] (7/8) Epoch 16, validation: loss=3.112, NarTop10Accuracy=0.703, over 1905321.00 frames. +2024-08-06 17:56:53,429 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27201MB +2024-08-06 17:56:54,007 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 1.974e+02 2.136e+02 2.310e+02 5.351e+02, threshold=4.271e+02, percent-clipped=0.2 +2024-08-06 17:57:06,171 INFO [trainer.py:765] (7/8) Epoch 16, batch 1400, train_loss[loss=3.209, NarTop10Accuracy=0.6872, over 6096.00 frames. ], tot_loss[loss=3.186, NarTop10Accuracy=0.6887, over 6018.76 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 17:57:34,033 INFO [trainer.py:765] (7/8) Epoch 16, batch 1500, train_loss[loss=3.239, NarTop10Accuracy=0.6804, over 6225.00 frames. ], tot_loss[loss=3.182, NarTop10Accuracy=0.6896, over 5934.26 frames. ], batch size: 50, lr: 5.35e-03 +2024-08-06 17:58:01,775 INFO [trainer.py:765] (7/8) Epoch 16, batch 1600, train_loss[loss=2.968, NarTop10Accuracy=0.7373, over 7188.00 frames. ], tot_loss[loss=3.179, NarTop10Accuracy=0.6904, over 5929.32 frames. ], batch size: 22, lr: 5.35e-03 +2024-08-06 17:58:28,475 INFO [trainer.py:765] (7/8) Epoch 16, batch 1700, train_loss[loss=2.964, NarTop10Accuracy=0.7398, over 6579.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6866, over 5913.34 frames. ], batch size: 14, lr: 5.34e-03 +2024-08-06 17:58:54,976 INFO [trainer.py:765] (7/8) Epoch 16, batch 1800, train_loss[loss=3.05, NarTop10Accuracy=0.7213, over 7344.00 frames. ], tot_loss[loss=3.184, NarTop10Accuracy=0.6893, over 5975.07 frames. ], batch size: 23, lr: 5.33e-03 +2024-08-06 17:59:21,360 INFO [trainer.py:765] (7/8) Epoch 16, batch 1900, train_loss[loss=3.397, NarTop10Accuracy=0.6471, over 6486.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.6846, over 6011.08 frames. ], batch size: 58, lr: 5.33e-03 +2024-08-06 17:59:46,857 INFO [trainer.py:765] (7/8) Epoch 16, batch 2000, train_loss[loss=3.083, NarTop10Accuracy=0.7159, over 5799.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.6915, over 5987.46 frames. ], batch size: 50, lr: 5.32e-03 +2024-08-06 18:00:12,117 INFO [trainer.py:765] (7/8) Epoch 16, batch 2100, train_loss[loss=3.676, NarTop10Accuracy=0.5803, over 3969.00 frames. ], tot_loss[loss=3.201, NarTop10Accuracy=0.6859, over 5976.73 frames. ], batch size: 4, lr: 5.32e-03 +2024-08-06 18:00:37,333 INFO [trainer.py:765] (7/8) Epoch 16, batch 2200, train_loss[loss=3.204, NarTop10Accuracy=0.695, over 7158.00 frames. ], tot_loss[loss=3.208, NarTop10Accuracy=0.6845, over 6019.45 frames. ], batch size: 31, lr: 5.31e-03 +2024-08-06 18:01:02,502 INFO [trainer.py:765] (7/8) Epoch 16, batch 2300, train_loss[loss=3.03, NarTop10Accuracy=0.7247, over 5694.00 frames. ], tot_loss[loss=3.209, NarTop10Accuracy=0.684, over 6041.75 frames. ], batch size: 9, lr: 5.30e-03 +2024-08-06 18:01:26,883 INFO [trainer.py:765] (7/8) Epoch 16, batch 2400, train_loss[loss=3.069, NarTop10Accuracy=0.7172, over 5058.00 frames. ], tot_loss[loss=3.193, NarTop10Accuracy=0.6876, over 5782.82 frames. ], batch size: 7, lr: 5.30e-03 +2024-08-06 18:01:50,406 INFO [trainer.py:765] (7/8) Epoch 16, batch 2500, train_loss[loss=3.05, NarTop10Accuracy=0.7224, over 5100.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6935, over 5481.86 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 18:02:10,737 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 18:03:08,531 INFO [trainer.py:765] (7/8) Epoch 17, batch 100, train_loss[loss=3.092, NarTop10Accuracy=0.7072, over 7338.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.7, over 2352.40 frames. ], batch size: 31, lr: 5.12e-03 +2024-08-06 18:03:45,145 INFO [trainer.py:765] (7/8) Epoch 17, batch 200, train_loss[loss=3.485, NarTop10Accuracy=0.6214, over 6717.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6973, over 3846.94 frames. ], batch size: 17, lr: 5.12e-03 +2024-08-06 18:04:19,591 INFO [trainer.py:765] (7/8) Epoch 17, batch 300, train_loss[loss=3.326, NarTop10Accuracy=0.658, over 7122.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6931, over 4643.88 frames. ], batch size: 22, lr: 5.11e-03 +2024-08-06 18:04:48,402 INFO [trainer.py:765] (7/8) Epoch 17, batch 400, train_loss[loss=3.227, NarTop10Accuracy=0.6771, over 5217.00 frames. ], tot_loss[loss=3.174, NarTop10Accuracy=0.6923, over 5093.64 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 18:05:24,680 INFO [trainer.py:765] (7/8) Epoch 17, batch 500, train_loss[loss=2.942, NarTop10Accuracy=0.7412, over 6147.00 frames. ], tot_loss[loss=3.156, NarTop10Accuracy=0.6959, over 5374.72 frames. ], batch size: 11, lr: 5.10e-03 +2024-08-06 18:05:58,739 INFO [trainer.py:765] (7/8) Epoch 17, batch 600, train_loss[loss=3.032, NarTop10Accuracy=0.7228, over 5697.00 frames. ], tot_loss[loss=3.171, NarTop10Accuracy=0.6923, over 5631.62 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 18:06:32,475 INFO [trainer.py:765] (7/8) Epoch 17, batch 700, train_loss[loss=3.042, NarTop10Accuracy=0.7149, over 5247.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.6919, over 5711.95 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 18:07:02,725 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 18:07:10,763 INFO [trainer.py:811] (7/8) Epoch 17, validation: loss=3.018, NarTop10Accuracy=0.7223, over 1905321.00 frames. +2024-08-06 18:07:10,764 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27201MB +2024-08-06 18:07:11,312 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.005e+02 2.161e+02 2.341e+02 3.806e+02, threshold=4.323e+02, percent-clipped=0.0 +2024-08-06 18:07:14,354 INFO [trainer.py:765] (7/8) Epoch 17, batch 800, train_loss[loss=3.097, NarTop10Accuracy=0.7101, over 5013.00 frames. ], tot_loss[loss=3.177, NarTop10Accuracy=0.6906, over 5784.21 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 18:07:49,721 INFO [trainer.py:765] (7/8) Epoch 17, batch 900, train_loss[loss=3.504, NarTop10Accuracy=0.6231, over 6138.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6949, over 5806.17 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 18:08:21,598 INFO [trainer.py:765] (7/8) Epoch 17, batch 1000, train_loss[loss=3.366, NarTop10Accuracy=0.6604, over 6852.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6922, over 5906.11 frames. ], batch size: 14, lr: 5.07e-03 +2024-08-06 18:09:03,106 INFO [trainer.py:765] (7/8) Epoch 17, batch 1100, train_loss[loss=2.918, NarTop10Accuracy=0.7487, over 6843.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6924, over 5927.78 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 18:09:36,746 INFO [trainer.py:765] (7/8) Epoch 17, batch 1200, train_loss[loss=3.164, NarTop10Accuracy=0.6931, over 7122.00 frames. ], tot_loss[loss=3.173, NarTop10Accuracy=0.6911, over 5934.70 frames. ], batch size: 31, lr: 5.06e-03 +2024-08-06 18:10:10,688 INFO [trainer.py:765] (7/8) Epoch 17, batch 1300, train_loss[loss=3.38, NarTop10Accuracy=0.6451, over 4920.00 frames. ], tot_loss[loss=3.176, NarTop10Accuracy=0.6904, over 5991.19 frames. ], batch size: 6, lr: 5.05e-03 +2024-08-06 18:10:48,027 INFO [trainer.py:765] (7/8) Epoch 17, batch 1400, train_loss[loss=3.216, NarTop10Accuracy=0.6795, over 6144.00 frames. ], tot_loss[loss=3.181, NarTop10Accuracy=0.6895, over 6009.19 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 18:11:19,105 INFO [trainer.py:765] (7/8) Epoch 17, batch 1500, train_loss[loss=3.382, NarTop10Accuracy=0.6397, over 5979.00 frames. ], tot_loss[loss=3.169, NarTop10Accuracy=0.6923, over 5941.77 frames. ], batch size: 50, lr: 5.04e-03 +2024-08-06 18:11:46,855 INFO [trainer.py:765] (7/8) Epoch 17, batch 1600, train_loss[loss=3.052, NarTop10Accuracy=0.7154, over 6936.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6945, over 5924.73 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 18:12:13,509 INFO [trainer.py:765] (7/8) Epoch 17, batch 1700, train_loss[loss=3.451, NarTop10Accuracy=0.6288, over 6123.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.6916, over 5935.75 frames. ], batch size: 13, lr: 5.03e-03 +2024-08-06 18:12:40,002 INFO [trainer.py:765] (7/8) Epoch 17, batch 1800, train_loss[loss=2.935, NarTop10Accuracy=0.7377, over 7185.00 frames. ], tot_loss[loss=3.179, NarTop10Accuracy=0.6897, over 5982.36 frames. ], batch size: 22, lr: 5.02e-03 +2024-08-06 18:13:06,380 INFO [trainer.py:765] (7/8) Epoch 17, batch 1900, train_loss[loss=3.14, NarTop10Accuracy=0.6938, over 5589.00 frames. ], tot_loss[loss=3.188, NarTop10Accuracy=0.6882, over 6021.57 frames. ], batch size: 50, lr: 5.01e-03 +2024-08-06 18:13:31,923 INFO [trainer.py:765] (7/8) Epoch 17, batch 2000, train_loss[loss=3.589, NarTop10Accuracy=0.6068, over 5697.00 frames. ], tot_loss[loss=3.162, NarTop10Accuracy=0.6936, over 5997.42 frames. ], batch size: 52, lr: 5.01e-03 +2024-08-06 18:13:57,228 INFO [trainer.py:765] (7/8) Epoch 17, batch 2100, train_loss[loss=3.094, NarTop10Accuracy=0.7189, over 3870.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.6916, over 5970.49 frames. ], batch size: 4, lr: 5.00e-03 +2024-08-06 18:14:22,434 INFO [trainer.py:765] (7/8) Epoch 17, batch 2200, train_loss[loss=3.021, NarTop10Accuracy=0.7332, over 7659.00 frames. ], tot_loss[loss=3.196, NarTop10Accuracy=0.6864, over 6003.08 frames. ], batch size: 31, lr: 5.00e-03 +2024-08-06 18:14:47,592 INFO [trainer.py:765] (7/8) Epoch 17, batch 2300, train_loss[loss=2.959, NarTop10Accuracy=0.73, over 5673.00 frames. ], tot_loss[loss=3.19, NarTop10Accuracy=0.6878, over 6021.38 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 18:15:12,061 INFO [trainer.py:765] (7/8) Epoch 17, batch 2400, train_loss[loss=3.083, NarTop10Accuracy=0.7111, over 5130.00 frames. ], tot_loss[loss=3.189, NarTop10Accuracy=0.6877, over 5765.27 frames. ], batch size: 7, lr: 4.99e-03 +2024-08-06 18:15:35,515 INFO [trainer.py:765] (7/8) Epoch 17, batch 2500, train_loss[loss=2.757, NarTop10Accuracy=0.7759, over 5211.00 frames. ], tot_loss[loss=3.172, NarTop10Accuracy=0.691, over 5463.09 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 18:15:55,360 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 18:16:49,908 INFO [trainer.py:765] (7/8) Epoch 18, batch 100, train_loss[loss=3.096, NarTop10Accuracy=0.7121, over 7041.00 frames. ], tot_loss[loss=3.181, NarTop10Accuracy=0.6905, over 2384.36 frames. ], batch size: 31, lr: 4.83e-03 +2024-08-06 18:17:24,749 INFO [trainer.py:765] (7/8) Epoch 18, batch 200, train_loss[loss=2.967, NarTop10Accuracy=0.732, over 6957.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6937, over 3876.03 frames. ], batch size: 17, lr: 4.83e-03 +2024-08-06 18:17:27,716 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 18:17:35,927 INFO [trainer.py:811] (7/8) Epoch 18, validation: loss=3.062, NarTop10Accuracy=0.7137, over 1905321.00 frames. +2024-08-06 18:17:35,927 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27201MB +2024-08-06 18:17:36,529 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.024e+02 2.164e+02 2.334e+02 7.024e+02, threshold=4.329e+02, percent-clipped=0.1 +2024-08-06 18:18:06,912 INFO [trainer.py:765] (7/8) Epoch 18, batch 300, train_loss[loss=3.456, NarTop10Accuracy=0.6349, over 7215.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6928, over 4675.78 frames. ], batch size: 22, lr: 4.82e-03 +2024-08-06 18:18:38,183 INFO [trainer.py:765] (7/8) Epoch 18, batch 400, train_loss[loss=3.352, NarTop10Accuracy=0.6522, over 5733.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6961, over 5125.25 frames. ], batch size: 8, lr: 4.81e-03 +2024-08-06 18:19:13,599 INFO [trainer.py:765] (7/8) Epoch 18, batch 500, train_loss[loss=2.99, NarTop10Accuracy=0.731, over 6042.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6962, over 5394.47 frames. ], batch size: 11, lr: 4.81e-03 +2024-08-06 18:19:48,151 INFO [trainer.py:765] (7/8) Epoch 18, batch 600, train_loss[loss=3.463, NarTop10Accuracy=0.6274, over 5739.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.696, over 5639.57 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 18:20:23,870 INFO [trainer.py:765] (7/8) Epoch 18, batch 700, train_loss[loss=3.331, NarTop10Accuracy=0.6533, over 4875.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.6945, over 5708.38 frames. ], batch size: 6, lr: 4.80e-03 +2024-08-06 18:21:01,026 INFO [trainer.py:765] (7/8) Epoch 18, batch 800, train_loss[loss=2.759, NarTop10Accuracy=0.7739, over 5034.00 frames. ], tot_loss[loss=3.17, NarTop10Accuracy=0.6915, over 5788.78 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 18:21:32,409 INFO [trainer.py:765] (7/8) Epoch 18, batch 900, train_loss[loss=2.992, NarTop10Accuracy=0.729, over 6363.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6957, over 5801.80 frames. ], batch size: 13, lr: 4.79e-03 +2024-08-06 18:22:11,192 INFO [trainer.py:765] (7/8) Epoch 18, batch 1000, train_loss[loss=2.957, NarTop10Accuracy=0.7335, over 6687.00 frames. ], tot_loss[loss=3.167, NarTop10Accuracy=0.6925, over 5904.16 frames. ], batch size: 14, lr: 4.78e-03 +2024-08-06 18:22:46,969 INFO [trainer.py:765] (7/8) Epoch 18, batch 1100, train_loss[loss=3.378, NarTop10Accuracy=0.6458, over 6762.00 frames. ], tot_loss[loss=3.165, NarTop10Accuracy=0.6928, over 5954.59 frames. ], batch size: 17, lr: 4.78e-03 +2024-08-06 18:23:18,605 INFO [trainer.py:765] (7/8) Epoch 18, batch 1200, train_loss[loss=3.67, NarTop10Accuracy=0.5889, over 7443.00 frames. ], tot_loss[loss=3.178, NarTop10Accuracy=0.6899, over 5941.77 frames. ], batch size: 31, lr: 4.77e-03 +2024-08-06 18:24:00,099 INFO [trainer.py:765] (7/8) Epoch 18, batch 1300, train_loss[loss=2.899, NarTop10Accuracy=0.7487, over 5019.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6935, over 6009.95 frames. ], batch size: 6, lr: 4.77e-03 +2024-08-06 18:24:29,574 INFO [trainer.py:765] (7/8) Epoch 18, batch 1400, train_loss[loss=2.955, NarTop10Accuracy=0.7424, over 6051.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6937, over 6032.54 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 18:25:00,307 INFO [trainer.py:765] (7/8) Epoch 18, batch 1500, train_loss[loss=3.124, NarTop10Accuracy=0.7048, over 5967.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.695, over 5958.76 frames. ], batch size: 50, lr: 4.76e-03 +2024-08-06 18:25:28,085 INFO [trainer.py:765] (7/8) Epoch 18, batch 1600, train_loss[loss=3.012, NarTop10Accuracy=0.7264, over 7128.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6933, over 5933.80 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 18:25:54,688 INFO [trainer.py:765] (7/8) Epoch 18, batch 1700, train_loss[loss=3.134, NarTop10Accuracy=0.7055, over 6528.00 frames. ], tot_loss[loss=3.157, NarTop10Accuracy=0.6942, over 5916.73 frames. ], batch size: 14, lr: 4.75e-03 +2024-08-06 18:26:21,196 INFO [trainer.py:765] (7/8) Epoch 18, batch 1800, train_loss[loss=3.426, NarTop10Accuracy=0.6324, over 7278.00 frames. ], tot_loss[loss=3.156, NarTop10Accuracy=0.6941, over 5976.35 frames. ], batch size: 23, lr: 4.74e-03 +2024-08-06 18:26:47,567 INFO [trainer.py:765] (7/8) Epoch 18, batch 1900, train_loss[loss=3.17, NarTop10Accuracy=0.6958, over 6099.00 frames. ], tot_loss[loss=3.17, NarTop10Accuracy=0.6916, over 6034.37 frames. ], batch size: 50, lr: 4.74e-03 +2024-08-06 18:27:13,176 INFO [trainer.py:765] (7/8) Epoch 18, batch 2000, train_loss[loss=3.143, NarTop10Accuracy=0.7025, over 6267.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6923, over 6021.81 frames. ], batch size: 50, lr: 4.73e-03 +2024-08-06 18:27:38,529 INFO [trainer.py:765] (7/8) Epoch 18, batch 2100, train_loss[loss=3.32, NarTop10Accuracy=0.6537, over 3918.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6941, over 5999.59 frames. ], batch size: 4, lr: 4.73e-03 +2024-08-06 18:28:03,812 INFO [trainer.py:765] (7/8) Epoch 18, batch 2200, train_loss[loss=2.986, NarTop10Accuracy=0.7299, over 7029.00 frames. ], tot_loss[loss=3.163, NarTop10Accuracy=0.6935, over 6023.94 frames. ], batch size: 31, lr: 4.72e-03 +2024-08-06 18:28:06,571 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 18:28:14,649 INFO [trainer.py:811] (7/8) Epoch 18, validation: loss=3.028, NarTop10Accuracy=0.7201, over 1905321.00 frames. +2024-08-06 18:28:14,650 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27201MB +2024-08-06 18:28:15,147 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.054e+02 2.220e+02 2.384e+02 3.992e+02, threshold=4.441e+02, percent-clipped=0.0 +2024-08-06 18:28:37,096 INFO [trainer.py:765] (7/8) Epoch 18, batch 2300, train_loss[loss=2.736, NarTop10Accuracy=0.787, over 5814.00 frames. ], tot_loss[loss=3.175, NarTop10Accuracy=0.691, over 6040.00 frames. ], batch size: 9, lr: 4.72e-03 +2024-08-06 18:29:01,592 INFO [trainer.py:765] (7/8) Epoch 18, batch 2400, train_loss[loss=2.884, NarTop10Accuracy=0.7402, over 5070.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6957, over 5775.97 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 18:29:25,027 INFO [trainer.py:765] (7/8) Epoch 18, batch 2500, train_loss[loss=2.981, NarTop10Accuracy=0.7269, over 5328.00 frames. ], tot_loss[loss=3.129, NarTop10Accuracy=0.6995, over 5467.81 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 18:29:45,270 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 18:30:41,232 INFO [trainer.py:765] (7/8) Epoch 19, batch 100, train_loss[loss=2.966, NarTop10Accuracy=0.7307, over 7503.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6938, over 2366.76 frames. ], batch size: 32, lr: 4.57e-03 +2024-08-06 18:31:15,603 INFO [trainer.py:765] (7/8) Epoch 19, batch 200, train_loss[loss=2.857, NarTop10Accuracy=0.753, over 6816.00 frames. ], tot_loss[loss=3.159, NarTop10Accuracy=0.6938, over 3848.53 frames. ], batch size: 17, lr: 4.57e-03 +2024-08-06 18:31:47,468 INFO [trainer.py:765] (7/8) Epoch 19, batch 300, train_loss[loss=3.417, NarTop10Accuracy=0.6451, over 7275.00 frames. ], tot_loss[loss=3.14, NarTop10Accuracy=0.698, over 4649.18 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 18:32:20,355 INFO [trainer.py:765] (7/8) Epoch 19, batch 400, train_loss[loss=3.171, NarTop10Accuracy=0.6975, over 5259.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6971, over 5096.51 frames. ], batch size: 7, lr: 4.56e-03 +2024-08-06 18:32:50,335 INFO [trainer.py:765] (7/8) Epoch 19, batch 500, train_loss[loss=3.025, NarTop10Accuracy=0.7204, over 6012.00 frames. ], tot_loss[loss=3.137, NarTop10Accuracy=0.6984, over 5371.32 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 18:33:29,610 INFO [trainer.py:765] (7/8) Epoch 19, batch 600, train_loss[loss=2.921, NarTop10Accuracy=0.7388, over 5742.00 frames. ], tot_loss[loss=3.143, NarTop10Accuracy=0.6969, over 5639.91 frames. ], batch size: 9, lr: 4.55e-03 +2024-08-06 18:34:03,592 INFO [trainer.py:765] (7/8) Epoch 19, batch 700, train_loss[loss=2.802, NarTop10Accuracy=0.7763, over 5094.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6966, over 5719.34 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 18:34:35,179 INFO [trainer.py:765] (7/8) Epoch 19, batch 800, train_loss[loss=3.114, NarTop10Accuracy=0.6985, over 4362.00 frames. ], tot_loss[loss=3.153, NarTop10Accuracy=0.6954, over 5774.86 frames. ], batch size: 5, lr: 4.54e-03 +2024-08-06 18:35:10,263 INFO [trainer.py:765] (7/8) Epoch 19, batch 900, train_loss[loss=2.975, NarTop10Accuracy=0.7331, over 6702.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6972, over 5802.55 frames. ], batch size: 14, lr: 4.53e-03 +2024-08-06 18:35:48,638 INFO [trainer.py:765] (7/8) Epoch 19, batch 1000, train_loss[loss=3.346, NarTop10Accuracy=0.6622, over 6729.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6964, over 5901.33 frames. ], batch size: 14, lr: 4.53e-03 +2024-08-06 18:36:20,939 INFO [trainer.py:765] (7/8) Epoch 19, batch 1100, train_loss[loss=3.027, NarTop10Accuracy=0.7363, over 6873.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6942, over 5933.50 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 18:36:57,130 INFO [trainer.py:765] (7/8) Epoch 19, batch 1200, train_loss[loss=2.953, NarTop10Accuracy=0.7376, over 7320.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6922, over 5931.50 frames. ], batch size: 32, lr: 4.52e-03 +2024-08-06 18:37:35,315 INFO [trainer.py:765] (7/8) Epoch 19, batch 1300, train_loss[loss=2.773, NarTop10Accuracy=0.7719, over 4395.00 frames. ], tot_loss[loss=3.161, NarTop10Accuracy=0.6937, over 5996.48 frames. ], batch size: 5, lr: 4.51e-03 +2024-08-06 18:38:04,680 INFO [trainer.py:765] (7/8) Epoch 19, batch 1400, train_loss[loss=2.879, NarTop10Accuracy=0.7558, over 6048.00 frames. ], tot_loss[loss=3.164, NarTop10Accuracy=0.6929, over 6000.74 frames. ], batch size: 11, lr: 4.51e-03 +2024-08-06 18:38:34,551 INFO [trainer.py:765] (7/8) Epoch 19, batch 1500, train_loss[loss=3.369, NarTop10Accuracy=0.6585, over 5928.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6969, over 5953.15 frames. ], batch size: 50, lr: 4.50e-03 +2024-08-06 18:39:02,311 INFO [trainer.py:765] (7/8) Epoch 19, batch 1600, train_loss[loss=3.336, NarTop10Accuracy=0.6555, over 7077.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6967, over 5959.05 frames. ], batch size: 22, lr: 4.50e-03 +2024-08-06 18:39:11,591 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 18:39:19,795 INFO [trainer.py:811] (7/8) Epoch 19, validation: loss=2.958, NarTop10Accuracy=0.7345, over 1905321.00 frames. +2024-08-06 18:39:19,795 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27201MB +2024-08-06 18:39:20,378 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.040e+02 2.194e+02 2.364e+02 6.410e+02, threshold=4.387e+02, percent-clipped=0.2 +2024-08-06 18:39:37,191 INFO [trainer.py:765] (7/8) Epoch 19, batch 1700, train_loss[loss=3.545, NarTop10Accuracy=0.6131, over 6255.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6961, over 5925.74 frames. ], batch size: 13, lr: 4.49e-03 +2024-08-06 18:40:03,789 INFO [trainer.py:765] (7/8) Epoch 19, batch 1800, train_loss[loss=3.603, NarTop10Accuracy=0.6016, over 7125.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6953, over 5978.46 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 18:40:30,217 INFO [trainer.py:765] (7/8) Epoch 19, batch 1900, train_loss[loss=3.086, NarTop10Accuracy=0.7117, over 6378.00 frames. ], tot_loss[loss=3.15, NarTop10Accuracy=0.6956, over 6028.62 frames. ], batch size: 52, lr: 4.49e-03 +2024-08-06 18:40:55,793 INFO [trainer.py:765] (7/8) Epoch 19, batch 2000, train_loss[loss=3.349, NarTop10Accuracy=0.6532, over 6513.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6952, over 5997.64 frames. ], batch size: 50, lr: 4.48e-03 +2024-08-06 18:41:21,183 INFO [trainer.py:765] (7/8) Epoch 19, batch 2100, train_loss[loss=3.009, NarTop10Accuracy=0.7213, over 3996.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6973, over 5976.71 frames. ], batch size: 4, lr: 4.48e-03 +2024-08-06 18:41:46,455 INFO [trainer.py:765] (7/8) Epoch 19, batch 2200, train_loss[loss=3.197, NarTop10Accuracy=0.6853, over 7419.00 frames. ], tot_loss[loss=3.151, NarTop10Accuracy=0.6958, over 6018.63 frames. ], batch size: 31, lr: 4.47e-03 +2024-08-06 18:42:11,559 INFO [trainer.py:765] (7/8) Epoch 19, batch 2300, train_loss[loss=3.132, NarTop10Accuracy=0.6989, over 5712.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6922, over 6029.06 frames. ], batch size: 9, lr: 4.47e-03 +2024-08-06 18:42:35,986 INFO [trainer.py:765] (7/8) Epoch 19, batch 2400, train_loss[loss=2.826, NarTop10Accuracy=0.7546, over 5088.00 frames. ], tot_loss[loss=3.147, NarTop10Accuracy=0.6967, over 5788.15 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:42:59,689 INFO [trainer.py:765] (7/8) Epoch 19, batch 2500, train_loss[loss=2.713, NarTop10Accuracy=0.778, over 5043.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7007, over 5491.02 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 18:43:19,345 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 18:44:22,974 INFO [trainer.py:765] (7/8) Epoch 20, batch 100, train_loss[loss=3.204, NarTop10Accuracy=0.6864, over 7194.00 frames. ], tot_loss[loss=3.175, NarTop10Accuracy=0.6906, over 2367.58 frames. ], batch size: 31, lr: 4.34e-03 +2024-08-06 18:44:58,379 INFO [trainer.py:765] (7/8) Epoch 20, batch 200, train_loss[loss=3.423, NarTop10Accuracy=0.6344, over 6819.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7004, over 3857.76 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 18:45:32,279 INFO [trainer.py:765] (7/8) Epoch 20, batch 300, train_loss[loss=3.408, NarTop10Accuracy=0.6385, over 6888.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7027, over 4663.88 frames. ], batch size: 22, lr: 4.33e-03 +2024-08-06 18:46:05,128 INFO [trainer.py:765] (7/8) Epoch 20, batch 400, train_loss[loss=2.84, NarTop10Accuracy=0.7613, over 5106.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7021, over 5101.36 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 18:46:35,770 INFO [trainer.py:765] (7/8) Epoch 20, batch 500, train_loss[loss=2.91, NarTop10Accuracy=0.7495, over 6036.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.6999, over 5375.02 frames. ], batch size: 11, lr: 4.32e-03 +2024-08-06 18:47:13,255 INFO [trainer.py:765] (7/8) Epoch 20, batch 600, train_loss[loss=2.965, NarTop10Accuracy=0.7254, over 5751.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7007, over 5646.12 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 18:47:44,481 INFO [trainer.py:765] (7/8) Epoch 20, batch 700, train_loss[loss=2.871, NarTop10Accuracy=0.7615, over 4290.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7028, over 5728.64 frames. ], batch size: 5, lr: 4.31e-03 +2024-08-06 18:48:21,016 INFO [trainer.py:765] (7/8) Epoch 20, batch 800, train_loss[loss=2.752, NarTop10Accuracy=0.7785, over 5187.00 frames. ], tot_loss[loss=3.126, NarTop10Accuracy=0.7003, over 5786.39 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 18:48:56,535 INFO [trainer.py:765] (7/8) Epoch 20, batch 900, train_loss[loss=2.907, NarTop10Accuracy=0.751, over 6423.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.701, over 5800.54 frames. ], batch size: 13, lr: 4.30e-03 +2024-08-06 18:49:29,805 INFO [trainer.py:765] (7/8) Epoch 20, batch 1000, train_loss[loss=3.255, NarTop10Accuracy=0.6706, over 6570.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6958, over 5909.02 frames. ], batch size: 14, lr: 4.30e-03 +2024-08-06 18:49:52,237 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 18:50:00,326 INFO [trainer.py:811] (7/8) Epoch 20, validation: loss=2.962, NarTop10Accuracy=0.7336, over 1905321.00 frames. +2024-08-06 18:50:00,327 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27201MB +2024-08-06 18:50:00,875 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.061e+02 2.223e+02 2.401e+02 3.871e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 18:50:15,427 INFO [trainer.py:765] (7/8) Epoch 20, batch 1100, train_loss[loss=3.191, NarTop10Accuracy=0.6885, over 6879.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6968, over 5931.50 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 18:50:53,776 INFO [trainer.py:765] (7/8) Epoch 20, batch 1200, train_loss[loss=2.941, NarTop10Accuracy=0.7376, over 7266.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6965, over 5936.47 frames. ], batch size: 32, lr: 4.29e-03 +2024-08-06 18:51:25,130 INFO [trainer.py:765] (7/8) Epoch 20, batch 1300, train_loss[loss=3.158, NarTop10Accuracy=0.6864, over 5010.00 frames. ], tot_loss[loss=3.141, NarTop10Accuracy=0.6974, over 6000.29 frames. ], batch size: 6, lr: 4.29e-03 +2024-08-06 18:51:59,315 INFO [trainer.py:765] (7/8) Epoch 20, batch 1400, train_loss[loss=2.972, NarTop10Accuracy=0.7252, over 6009.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.6987, over 6007.65 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 18:52:32,806 INFO [trainer.py:765] (7/8) Epoch 20, batch 1500, train_loss[loss=3.345, NarTop10Accuracy=0.6616, over 6354.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.6964, over 5950.98 frames. ], batch size: 50, lr: 4.28e-03 +2024-08-06 18:53:00,635 INFO [trainer.py:765] (7/8) Epoch 20, batch 1600, train_loss[loss=2.948, NarTop10Accuracy=0.7434, over 7209.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6959, over 5916.44 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 18:53:27,328 INFO [trainer.py:765] (7/8) Epoch 20, batch 1700, train_loss[loss=3.415, NarTop10Accuracy=0.6414, over 6720.00 frames. ], tot_loss[loss=3.146, NarTop10Accuracy=0.6966, over 5904.26 frames. ], batch size: 14, lr: 4.27e-03 +2024-08-06 18:53:53,851 INFO [trainer.py:765] (7/8) Epoch 20, batch 1800, train_loss[loss=3.118, NarTop10Accuracy=0.7061, over 7290.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6994, over 5965.84 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 18:54:20,316 INFO [trainer.py:765] (7/8) Epoch 20, batch 1900, train_loss[loss=3.082, NarTop10Accuracy=0.7153, over 5940.00 frames. ], tot_loss[loss=3.155, NarTop10Accuracy=0.695, over 6001.11 frames. ], batch size: 50, lr: 4.26e-03 +2024-08-06 18:54:45,890 INFO [trainer.py:765] (7/8) Epoch 20, batch 2000, train_loss[loss=3.653, NarTop10Accuracy=0.5897, over 6102.00 frames. ], tot_loss[loss=3.16, NarTop10Accuracy=0.6937, over 5995.70 frames. ], batch size: 50, lr: 4.26e-03 +2024-08-06 18:55:11,182 INFO [trainer.py:765] (7/8) Epoch 20, batch 2100, train_loss[loss=3.363, NarTop10Accuracy=0.6481, over 4842.00 frames. ], tot_loss[loss=3.148, NarTop10Accuracy=0.696, over 5970.58 frames. ], batch size: 5, lr: 4.25e-03 +2024-08-06 18:55:36,414 INFO [trainer.py:765] (7/8) Epoch 20, batch 2200, train_loss[loss=2.881, NarTop10Accuracy=0.7531, over 7212.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6951, over 6005.16 frames. ], batch size: 31, lr: 4.25e-03 +2024-08-06 18:56:01,636 INFO [trainer.py:765] (7/8) Epoch 20, batch 2300, train_loss[loss=3.003, NarTop10Accuracy=0.7207, over 5715.00 frames. ], tot_loss[loss=3.168, NarTop10Accuracy=0.6922, over 6034.43 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 18:56:26,050 INFO [trainer.py:765] (7/8) Epoch 20, batch 2400, train_loss[loss=2.869, NarTop10Accuracy=0.7532, over 5148.00 frames. ], tot_loss[loss=3.152, NarTop10Accuracy=0.6953, over 5780.03 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:56:49,566 INFO [trainer.py:765] (7/8) Epoch 20, batch 2500, train_loss[loss=2.919, NarTop10Accuracy=0.7419, over 5046.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7025, over 5463.22 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 18:57:09,582 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 18:58:09,585 INFO [trainer.py:765] (7/8) Epoch 21, batch 100, train_loss[loss=3.321, NarTop10Accuracy=0.665, over 7629.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7043, over 2366.29 frames. ], batch size: 32, lr: 4.13e-03 +2024-08-06 18:58:40,418 INFO [trainer.py:765] (7/8) Epoch 21, batch 200, train_loss[loss=2.861, NarTop10Accuracy=0.7631, over 6687.00 frames. ], tot_loss[loss=3.122, NarTop10Accuracy=0.7015, over 3849.21 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 18:59:13,334 INFO [trainer.py:765] (7/8) Epoch 21, batch 300, train_loss[loss=2.831, NarTop10Accuracy=0.7584, over 7125.00 frames. ], tot_loss[loss=3.121, NarTop10Accuracy=0.7013, over 4669.50 frames. ], batch size: 22, lr: 4.12e-03 +2024-08-06 18:59:48,151 INFO [trainer.py:765] (7/8) Epoch 21, batch 400, train_loss[loss=2.886, NarTop10Accuracy=0.7567, over 5040.00 frames. ], tot_loss[loss=3.105, NarTop10Accuracy=0.7046, over 5106.75 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 19:00:16,841 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 19:00:25,075 INFO [trainer.py:811] (7/8) Epoch 21, validation: loss=2.992, NarTop10Accuracy=0.7268, over 1905321.00 frames. +2024-08-06 19:00:25,076 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27201MB +2024-08-06 19:00:25,622 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.071e+02 2.224e+02 2.387e+02 3.839e+02, threshold=4.447e+02, percent-clipped=0.0 +2024-08-06 19:00:29,890 INFO [trainer.py:765] (7/8) Epoch 21, batch 500, train_loss[loss=2.827, NarTop10Accuracy=0.7581, over 6183.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7026, over 5382.68 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 19:01:03,328 INFO [trainer.py:765] (7/8) Epoch 21, batch 600, train_loss[loss=3.382, NarTop10Accuracy=0.6417, over 5658.00 frames. ], tot_loss[loss=3.106, NarTop10Accuracy=0.705, over 5641.98 frames. ], batch size: 9, lr: 4.11e-03 +2024-08-06 19:01:39,388 INFO [trainer.py:765] (7/8) Epoch 21, batch 700, train_loss[loss=2.838, NarTop10Accuracy=0.7588, over 5136.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7028, over 5729.39 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 19:02:18,047 INFO [trainer.py:765] (7/8) Epoch 21, batch 800, train_loss[loss=3.071, NarTop10Accuracy=0.7127, over 5112.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7007, over 5776.56 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 19:02:48,663 INFO [trainer.py:765] (7/8) Epoch 21, batch 900, train_loss[loss=2.943, NarTop10Accuracy=0.7348, over 6699.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7007, over 5796.95 frames. ], batch size: 14, lr: 4.09e-03 +2024-08-06 19:03:25,800 INFO [trainer.py:765] (7/8) Epoch 21, batch 1000, train_loss[loss=3.09, NarTop10Accuracy=0.7077, over 6543.00 frames. ], tot_loss[loss=3.132, NarTop10Accuracy=0.6992, over 5889.21 frames. ], batch size: 14, lr: 4.09e-03 +2024-08-06 19:04:07,206 INFO [trainer.py:765] (7/8) Epoch 21, batch 1100, train_loss[loss=3.446, NarTop10Accuracy=0.6386, over 6819.00 frames. ], tot_loss[loss=3.145, NarTop10Accuracy=0.6965, over 5941.68 frames. ], batch size: 17, lr: 4.09e-03 +2024-08-06 19:04:38,462 INFO [trainer.py:765] (7/8) Epoch 21, batch 1200, train_loss[loss=3.365, NarTop10Accuracy=0.6514, over 7116.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.6997, over 5942.32 frames. ], batch size: 31, lr: 4.08e-03 +2024-08-06 19:05:15,316 INFO [trainer.py:765] (7/8) Epoch 21, batch 1300, train_loss[loss=2.696, NarTop10Accuracy=0.789, over 5199.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7046, over 6005.52 frames. ], batch size: 6, lr: 4.08e-03 +2024-08-06 19:05:55,559 INFO [trainer.py:765] (7/8) Epoch 21, batch 1400, train_loss[loss=3.482, NarTop10Accuracy=0.633, over 6039.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7036, over 6011.71 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 19:06:23,600 INFO [trainer.py:765] (7/8) Epoch 21, batch 1500, train_loss[loss=3.275, NarTop10Accuracy=0.6643, over 6249.00 frames. ], tot_loss[loss=3.133, NarTop10Accuracy=0.6993, over 5965.56 frames. ], batch size: 50, lr: 4.07e-03 +2024-08-06 19:06:51,461 INFO [trainer.py:765] (7/8) Epoch 21, batch 1600, train_loss[loss=3.004, NarTop10Accuracy=0.7259, over 7158.00 frames. ], tot_loss[loss=3.134, NarTop10Accuracy=0.6991, over 5940.59 frames. ], batch size: 22, lr: 4.07e-03 +2024-08-06 19:07:18,211 INFO [trainer.py:765] (7/8) Epoch 21, batch 1700, train_loss[loss=3.205, NarTop10Accuracy=0.6824, over 6540.00 frames. ], tot_loss[loss=3.141, NarTop10Accuracy=0.6978, over 5934.42 frames. ], batch size: 14, lr: 4.06e-03 +2024-08-06 19:07:44,809 INFO [trainer.py:765] (7/8) Epoch 21, batch 1800, train_loss[loss=2.797, NarTop10Accuracy=0.7716, over 7206.00 frames. ], tot_loss[loss=3.135, NarTop10Accuracy=0.699, over 5994.56 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 19:08:11,369 INFO [trainer.py:765] (7/8) Epoch 21, batch 1900, train_loss[loss=3.695, NarTop10Accuracy=0.5825, over 5793.00 frames. ], tot_loss[loss=3.144, NarTop10Accuracy=0.6972, over 6033.93 frames. ], batch size: 50, lr: 4.06e-03 +2024-08-06 19:08:37,105 INFO [trainer.py:765] (7/8) Epoch 21, batch 2000, train_loss[loss=3.506, NarTop10Accuracy=0.6297, over 5997.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.6997, over 6014.99 frames. ], batch size: 50, lr: 4.05e-03 +2024-08-06 19:09:02,507 INFO [trainer.py:765] (7/8) Epoch 21, batch 2100, train_loss[loss=2.912, NarTop10Accuracy=0.743, over 3915.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.6969, over 5988.29 frames. ], batch size: 4, lr: 4.05e-03 +2024-08-06 19:09:27,891 INFO [trainer.py:765] (7/8) Epoch 21, batch 2200, train_loss[loss=3, NarTop10Accuracy=0.7402, over 7056.00 frames. ], tot_loss[loss=3.149, NarTop10Accuracy=0.6957, over 6016.10 frames. ], batch size: 31, lr: 4.04e-03 +2024-08-06 19:09:53,222 INFO [trainer.py:765] (7/8) Epoch 21, batch 2300, train_loss[loss=2.926, NarTop10Accuracy=0.7447, over 5688.00 frames. ], tot_loss[loss=3.158, NarTop10Accuracy=0.694, over 6023.30 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 19:10:17,596 INFO [trainer.py:765] (7/8) Epoch 21, batch 2400, train_loss[loss=3.436, NarTop10Accuracy=0.635, over 5130.00 frames. ], tot_loss[loss=3.142, NarTop10Accuracy=0.6972, over 5768.75 frames. ], batch size: 7, lr: 4.04e-03 +2024-08-06 19:10:37,229 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 19:10:45,275 INFO [trainer.py:811] (7/8) Epoch 21, validation: loss=2.971, NarTop10Accuracy=0.7316, over 1905321.00 frames. +2024-08-06 19:10:45,275 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27201MB +2024-08-06 19:10:45,741 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.100e+02 2.242e+02 2.407e+02 6.546e+02, threshold=4.484e+02, percent-clipped=0.1 +2024-08-06 19:10:49,272 INFO [trainer.py:765] (7/8) Epoch 21, batch 2500, train_loss[loss=3.372, NarTop10Accuracy=0.6538, over 5145.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7051, over 5483.22 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 19:11:08,992 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 19:12:09,054 INFO [trainer.py:765] (7/8) Epoch 22, batch 100, train_loss[loss=2.853, NarTop10Accuracy=0.762, over 7284.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7099, over 2367.91 frames. ], batch size: 31, lr: 3.93e-03 +2024-08-06 19:12:44,462 INFO [trainer.py:765] (7/8) Epoch 22, batch 200, train_loss[loss=3.248, NarTop10Accuracy=0.6748, over 6861.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7069, over 3859.88 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 19:13:14,533 INFO [trainer.py:765] (7/8) Epoch 22, batch 300, train_loss[loss=2.912, NarTop10Accuracy=0.7441, over 7137.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7071, over 4650.27 frames. ], batch size: 22, lr: 3.93e-03 +2024-08-06 19:13:49,229 INFO [trainer.py:765] (7/8) Epoch 22, batch 400, train_loss[loss=2.846, NarTop10Accuracy=0.7442, over 4980.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7082, over 5100.47 frames. ], batch size: 7, lr: 3.92e-03 +2024-08-06 19:14:24,850 INFO [trainer.py:765] (7/8) Epoch 22, batch 500, train_loss[loss=3.138, NarTop10Accuracy=0.6982, over 6189.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.708, over 5385.14 frames. ], batch size: 11, lr: 3.92e-03 +2024-08-06 19:14:55,701 INFO [trainer.py:765] (7/8) Epoch 22, batch 600, train_loss[loss=2.912, NarTop10Accuracy=0.7312, over 5727.00 frames. ], tot_loss[loss=3.119, NarTop10Accuracy=0.7013, over 5645.81 frames. ], batch size: 9, lr: 3.92e-03 +2024-08-06 19:15:30,867 INFO [trainer.py:765] (7/8) Epoch 22, batch 700, train_loss[loss=3.505, NarTop10Accuracy=0.6228, over 5046.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7005, over 5706.51 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 19:16:10,665 INFO [trainer.py:765] (7/8) Epoch 22, batch 800, train_loss[loss=2.975, NarTop10Accuracy=0.7295, over 4212.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7023, over 5768.12 frames. ], batch size: 5, lr: 3.91e-03 +2024-08-06 19:16:40,952 INFO [trainer.py:765] (7/8) Epoch 22, batch 900, train_loss[loss=2.924, NarTop10Accuracy=0.7478, over 6657.00 frames. ], tot_loss[loss=3.116, NarTop10Accuracy=0.7025, over 5783.60 frames. ], batch size: 14, lr: 3.90e-03 +2024-08-06 19:17:16,433 INFO [trainer.py:765] (7/8) Epoch 22, batch 1000, train_loss[loss=3.045, NarTop10Accuracy=0.7222, over 6222.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7034, over 5902.74 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 19:17:52,085 INFO [trainer.py:765] (7/8) Epoch 22, batch 1100, train_loss[loss=3, NarTop10Accuracy=0.7246, over 6693.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.7011, over 5918.97 frames. ], batch size: 17, lr: 3.90e-03 +2024-08-06 19:18:25,927 INFO [trainer.py:765] (7/8) Epoch 22, batch 1200, train_loss[loss=2.902, NarTop10Accuracy=0.7471, over 7296.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.704, over 5915.46 frames. ], batch size: 31, lr: 3.89e-03 +2024-08-06 19:19:01,253 INFO [trainer.py:765] (7/8) Epoch 22, batch 1300, train_loss[loss=2.887, NarTop10Accuracy=0.7497, over 5130.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7057, over 5985.62 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 19:19:33,317 INFO [trainer.py:765] (7/8) Epoch 22, batch 1400, train_loss[loss=2.821, NarTop10Accuracy=0.767, over 6132.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.7022, over 6010.17 frames. ], batch size: 11, lr: 3.89e-03 +2024-08-06 19:20:03,830 INFO [trainer.py:765] (7/8) Epoch 22, batch 1500, train_loss[loss=3.541, NarTop10Accuracy=0.6224, over 6357.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7037, over 5934.92 frames. ], batch size: 50, lr: 3.88e-03 +2024-08-06 19:20:31,647 INFO [trainer.py:765] (7/8) Epoch 22, batch 1600, train_loss[loss=3.145, NarTop10Accuracy=0.6953, over 6975.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.701, over 5919.47 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 19:20:58,418 INFO [trainer.py:765] (7/8) Epoch 22, batch 1700, train_loss[loss=3.165, NarTop10Accuracy=0.6885, over 6621.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7007, over 5915.21 frames. ], batch size: 14, lr: 3.88e-03 +2024-08-06 19:21:25,010 INFO [trainer.py:765] (7/8) Epoch 22, batch 1800, train_loss[loss=2.968, NarTop10Accuracy=0.7354, over 7053.00 frames. ], tot_loss[loss=3.118, NarTop10Accuracy=0.7018, over 5990.38 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 19:21:51,372 INFO [trainer.py:765] (7/8) Epoch 22, batch 1900, train_loss[loss=3.071, NarTop10Accuracy=0.7154, over 5742.00 frames. ], tot_loss[loss=3.138, NarTop10Accuracy=0.698, over 6018.61 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 19:21:53,110 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 19:22:01,088 INFO [trainer.py:811] (7/8) Epoch 22, validation: loss=3.009, NarTop10Accuracy=0.7241, over 1905321.00 frames. +2024-08-06 19:22:01,089 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27201MB +2024-08-06 19:22:01,575 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.114e+02 2.276e+02 2.445e+02 4.438e+02, threshold=4.551e+02, percent-clipped=0.0 +2024-08-06 19:22:24,819 INFO [trainer.py:765] (7/8) Epoch 22, batch 2000, train_loss[loss=3.459, NarTop10Accuracy=0.6346, over 6306.00 frames. ], tot_loss[loss=3.121, NarTop10Accuracy=0.7018, over 6006.22 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 19:22:50,041 INFO [trainer.py:765] (7/8) Epoch 22, batch 2100, train_loss[loss=2.795, NarTop10Accuracy=0.7569, over 3969.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7035, over 5987.54 frames. ], batch size: 4, lr: 3.86e-03 +2024-08-06 19:23:15,230 INFO [trainer.py:765] (7/8) Epoch 22, batch 2200, train_loss[loss=2.951, NarTop10Accuracy=0.7316, over 7398.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.704, over 6023.76 frames. ], batch size: 31, lr: 3.86e-03 +2024-08-06 19:23:40,315 INFO [trainer.py:765] (7/8) Epoch 22, batch 2300, train_loss[loss=3.028, NarTop10Accuracy=0.7158, over 5712.00 frames. ], tot_loss[loss=3.131, NarTop10Accuracy=0.7002, over 6033.13 frames. ], batch size: 9, lr: 3.86e-03 +2024-08-06 19:24:04,602 INFO [trainer.py:765] (7/8) Epoch 22, batch 2400, train_loss[loss=3.214, NarTop10Accuracy=0.6818, over 5208.00 frames. ], tot_loss[loss=3.121, NarTop10Accuracy=0.7018, over 5764.19 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:28,024 INFO [trainer.py:765] (7/8) Epoch 22, batch 2500, train_loss[loss=3.172, NarTop10Accuracy=0.6927, over 5070.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.704, over 5485.17 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 19:24:47,187 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 19:25:45,385 INFO [trainer.py:765] (7/8) Epoch 23, batch 100, train_loss[loss=2.987, NarTop10Accuracy=0.7296, over 7470.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.7002, over 2371.06 frames. ], batch size: 31, lr: 3.76e-03 +2024-08-06 19:26:21,309 INFO [trainer.py:765] (7/8) Epoch 23, batch 200, train_loss[loss=3.383, NarTop10Accuracy=0.659, over 6915.00 frames. ], tot_loss[loss=3.127, NarTop10Accuracy=0.7006, over 3862.39 frames. ], batch size: 17, lr: 3.76e-03 +2024-08-06 19:26:57,603 INFO [trainer.py:765] (7/8) Epoch 23, batch 300, train_loss[loss=2.944, NarTop10Accuracy=0.7344, over 7581.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7054, over 4657.72 frames. ], batch size: 23, lr: 3.75e-03 +2024-08-06 19:27:26,540 INFO [trainer.py:765] (7/8) Epoch 23, batch 400, train_loss[loss=3.261, NarTop10Accuracy=0.679, over 5103.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7035, over 5110.18 frames. ], batch size: 7, lr: 3.75e-03 +2024-08-06 19:27:59,712 INFO [trainer.py:765] (7/8) Epoch 23, batch 500, train_loss[loss=3.213, NarTop10Accuracy=0.6743, over 6003.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7029, over 5385.07 frames. ], batch size: 11, lr: 3.75e-03 +2024-08-06 19:28:35,882 INFO [trainer.py:765] (7/8) Epoch 23, batch 600, train_loss[loss=3.318, NarTop10Accuracy=0.66, over 5748.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7047, over 5657.40 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 19:29:11,367 INFO [trainer.py:765] (7/8) Epoch 23, batch 700, train_loss[loss=3.354, NarTop10Accuracy=0.6473, over 5010.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7071, over 5735.92 frames. ], batch size: 6, lr: 3.74e-03 +2024-08-06 19:29:43,613 INFO [trainer.py:765] (7/8) Epoch 23, batch 800, train_loss[loss=3.007, NarTop10Accuracy=0.7289, over 4386.00 frames. ], tot_loss[loss=3.102, NarTop10Accuracy=0.7051, over 5791.00 frames. ], batch size: 5, lr: 3.74e-03 +2024-08-06 19:30:19,390 INFO [trainer.py:765] (7/8) Epoch 23, batch 900, train_loss[loss=3.234, NarTop10Accuracy=0.6779, over 6066.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7057, over 5775.86 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 19:30:58,195 INFO [trainer.py:765] (7/8) Epoch 23, batch 1000, train_loss[loss=3.007, NarTop10Accuracy=0.7251, over 6750.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7079, over 5892.64 frames. ], batch size: 14, lr: 3.73e-03 +2024-08-06 19:31:31,520 INFO [trainer.py:765] (7/8) Epoch 23, batch 1100, train_loss[loss=3.114, NarTop10Accuracy=0.6998, over 6807.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.7062, over 5923.35 frames. ], batch size: 17, lr: 3.73e-03 +2024-08-06 19:32:08,518 INFO [trainer.py:765] (7/8) Epoch 23, batch 1200, train_loss[loss=2.965, NarTop10Accuracy=0.7369, over 7371.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7035, over 5917.52 frames. ], batch size: 31, lr: 3.72e-03 +2024-08-06 19:32:46,937 INFO [trainer.py:765] (7/8) Epoch 23, batch 1300, train_loss[loss=3.065, NarTop10Accuracy=0.7201, over 5001.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7039, over 5980.46 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 19:32:56,402 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 19:33:04,722 INFO [trainer.py:811] (7/8) Epoch 23, validation: loss=2.893, NarTop10Accuracy=0.7468, over 1905321.00 frames. +2024-08-06 19:33:04,723 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 19:33:05,262 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.108e+02 2.273e+02 2.457e+02 3.966e+02, threshold=4.546e+02, percent-clipped=0.0 +2024-08-06 19:33:27,407 INFO [trainer.py:765] (7/8) Epoch 23, batch 1400, train_loss[loss=2.67, NarTop10Accuracy=0.7932, over 6165.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7029, over 6010.98 frames. ], batch size: 11, lr: 3.72e-03 +2024-08-06 19:33:58,215 INFO [trainer.py:765] (7/8) Epoch 23, batch 1500, train_loss[loss=3.224, NarTop10Accuracy=0.6845, over 6252.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.706, over 5943.72 frames. ], batch size: 50, lr: 3.71e-03 +2024-08-06 19:34:26,014 INFO [trainer.py:765] (7/8) Epoch 23, batch 1600, train_loss[loss=2.929, NarTop10Accuracy=0.7411, over 7053.00 frames. ], tot_loss[loss=3.106, NarTop10Accuracy=0.7046, over 5938.68 frames. ], batch size: 22, lr: 3.71e-03 +2024-08-06 19:34:52,783 INFO [trainer.py:765] (7/8) Epoch 23, batch 1700, train_loss[loss=3.266, NarTop10Accuracy=0.6576, over 6198.00 frames. ], tot_loss[loss=3.121, NarTop10Accuracy=0.7014, over 5933.00 frames. ], batch size: 13, lr: 3.71e-03 +2024-08-06 19:35:19,261 INFO [trainer.py:765] (7/8) Epoch 23, batch 1800, train_loss[loss=2.913, NarTop10Accuracy=0.745, over 7173.00 frames. ], tot_loss[loss=3.114, NarTop10Accuracy=0.7027, over 5978.88 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 19:35:45,596 INFO [trainer.py:765] (7/8) Epoch 23, batch 1900, train_loss[loss=3.326, NarTop10Accuracy=0.659, over 5832.00 frames. ], tot_loss[loss=3.124, NarTop10Accuracy=0.7009, over 6012.02 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 19:36:11,170 INFO [trainer.py:765] (7/8) Epoch 23, batch 2000, train_loss[loss=3.61, NarTop10Accuracy=0.603, over 6441.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7033, over 5978.80 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 19:36:36,517 INFO [trainer.py:765] (7/8) Epoch 23, batch 2100, train_loss[loss=3.307, NarTop10Accuracy=0.6598, over 4620.00 frames. ], tot_loss[loss=3.113, NarTop10Accuracy=0.7029, over 5956.30 frames. ], batch size: 5, lr: 3.69e-03 +2024-08-06 19:37:01,908 INFO [trainer.py:765] (7/8) Epoch 23, batch 2200, train_loss[loss=3.167, NarTop10Accuracy=0.6942, over 7551.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.6996, over 6022.55 frames. ], batch size: 31, lr: 3.69e-03 +2024-08-06 19:37:27,060 INFO [trainer.py:765] (7/8) Epoch 23, batch 2300, train_loss[loss=2.976, NarTop10Accuracy=0.7315, over 5715.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.701, over 6019.88 frames. ], batch size: 9, lr: 3.69e-03 +2024-08-06 19:37:51,424 INFO [trainer.py:765] (7/8) Epoch 23, batch 2400, train_loss[loss=3.021, NarTop10Accuracy=0.7209, over 5118.00 frames. ], tot_loss[loss=3.12, NarTop10Accuracy=0.702, over 5769.05 frames. ], batch size: 7, lr: 3.69e-03 +2024-08-06 19:38:15,052 INFO [trainer.py:765] (7/8) Epoch 23, batch 2500, train_loss[loss=3.369, NarTop10Accuracy=0.6593, over 5100.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7063, over 5462.37 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 19:38:35,063 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 19:39:37,631 INFO [trainer.py:765] (7/8) Epoch 24, batch 100, train_loss[loss=3.529, NarTop10Accuracy=0.6205, over 7344.00 frames. ], tot_loss[loss=3.115, NarTop10Accuracy=0.7022, over 2367.39 frames. ], batch size: 31, lr: 3.60e-03 +2024-08-06 19:40:10,190 INFO [trainer.py:765] (7/8) Epoch 24, batch 200, train_loss[loss=2.93, NarTop10Accuracy=0.744, over 6774.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7066, over 3858.40 frames. ], batch size: 17, lr: 3.60e-03 +2024-08-06 19:40:40,555 INFO [trainer.py:765] (7/8) Epoch 24, batch 300, train_loss[loss=2.903, NarTop10Accuracy=0.7484, over 7041.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.706, over 4673.71 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 19:41:18,234 INFO [trainer.py:765] (7/8) Epoch 24, batch 400, train_loss[loss=2.922, NarTop10Accuracy=0.7516, over 5130.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.706, over 5115.71 frames. ], batch size: 7, lr: 3.59e-03 +2024-08-06 19:41:50,322 INFO [trainer.py:765] (7/8) Epoch 24, batch 500, train_loss[loss=2.944, NarTop10Accuracy=0.7443, over 6141.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7081, over 5395.74 frames. ], batch size: 11, lr: 3.59e-03 +2024-08-06 19:42:21,451 INFO [trainer.py:765] (7/8) Epoch 24, batch 600, train_loss[loss=2.77, NarTop10Accuracy=0.7671, over 5718.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7078, over 5646.85 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 19:42:52,843 INFO [trainer.py:765] (7/8) Epoch 24, batch 700, train_loss[loss=2.902, NarTop10Accuracy=0.7435, over 5214.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7071, over 5718.23 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 19:43:17,381 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 19:43:25,410 INFO [trainer.py:811] (7/8) Epoch 24, validation: loss=3.021, NarTop10Accuracy=0.7204, over 1905321.00 frames. +2024-08-06 19:43:25,411 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 19:43:28,562 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.113e+02 2.282e+02 2.472e+02 2.357e+03, threshold=4.564e+02, percent-clipped=0.2 +2024-08-06 19:43:40,815 INFO [trainer.py:765] (7/8) Epoch 24, batch 800, train_loss[loss=2.755, NarTop10Accuracy=0.7736, over 5052.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7076, over 5803.12 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 19:44:11,410 INFO [trainer.py:765] (7/8) Epoch 24, batch 900, train_loss[loss=2.879, NarTop10Accuracy=0.7595, over 6570.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7088, over 5810.75 frames. ], batch size: 14, lr: 3.57e-03 +2024-08-06 19:44:47,490 INFO [trainer.py:765] (7/8) Epoch 24, batch 1000, train_loss[loss=3.051, NarTop10Accuracy=0.7082, over 6699.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7072, over 5913.18 frames. ], batch size: 14, lr: 3.57e-03 +2024-08-06 19:45:27,108 INFO [trainer.py:765] (7/8) Epoch 24, batch 1100, train_loss[loss=3.343, NarTop10Accuracy=0.6635, over 6690.00 frames. ], tot_loss[loss=3.098, NarTop10Accuracy=0.7058, over 5928.80 frames. ], batch size: 17, lr: 3.57e-03 +2024-08-06 19:45:58,437 INFO [trainer.py:765] (7/8) Epoch 24, batch 1200, train_loss[loss=2.927, NarTop10Accuracy=0.7511, over 7119.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7066, over 5925.18 frames. ], batch size: 31, lr: 3.57e-03 +2024-08-06 19:46:30,295 INFO [trainer.py:765] (7/8) Epoch 24, batch 1300, train_loss[loss=3.351, NarTop10Accuracy=0.6424, over 5022.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7066, over 5988.85 frames. ], batch size: 6, lr: 3.56e-03 +2024-08-06 19:47:07,860 INFO [trainer.py:765] (7/8) Epoch 24, batch 1400, train_loss[loss=3.193, NarTop10Accuracy=0.6856, over 6012.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7036, over 5998.92 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 19:47:40,957 INFO [trainer.py:765] (7/8) Epoch 24, batch 1500, train_loss[loss=3.373, NarTop10Accuracy=0.6561, over 6021.00 frames. ], tot_loss[loss=3.119, NarTop10Accuracy=0.7015, over 5948.64 frames. ], batch size: 50, lr: 3.56e-03 +2024-08-06 19:48:08,676 INFO [trainer.py:765] (7/8) Epoch 24, batch 1600, train_loss[loss=3.427, NarTop10Accuracy=0.6278, over 7191.00 frames. ], tot_loss[loss=3.123, NarTop10Accuracy=0.7003, over 5912.92 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:48:35,267 INFO [trainer.py:765] (7/8) Epoch 24, batch 1700, train_loss[loss=2.873, NarTop10Accuracy=0.7558, over 6579.00 frames. ], tot_loss[loss=3.128, NarTop10Accuracy=0.6996, over 5908.41 frames. ], batch size: 14, lr: 3.55e-03 +2024-08-06 19:49:01,638 INFO [trainer.py:765] (7/8) Epoch 24, batch 1800, train_loss[loss=2.973, NarTop10Accuracy=0.7226, over 7098.00 frames. ], tot_loss[loss=3.13, NarTop10Accuracy=0.6996, over 5948.82 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 19:49:28,042 INFO [trainer.py:765] (7/8) Epoch 24, batch 1900, train_loss[loss=3.537, NarTop10Accuracy=0.6168, over 6276.00 frames. ], tot_loss[loss=3.139, NarTop10Accuracy=0.6976, over 6009.39 frames. ], batch size: 50, lr: 3.55e-03 +2024-08-06 19:49:53,533 INFO [trainer.py:765] (7/8) Epoch 24, batch 2000, train_loss[loss=3.511, NarTop10Accuracy=0.6271, over 6135.00 frames. ], tot_loss[loss=3.11, NarTop10Accuracy=0.7036, over 5994.50 frames. ], batch size: 50, lr: 3.54e-03 +2024-08-06 19:50:18,820 INFO [trainer.py:765] (7/8) Epoch 24, batch 2100, train_loss[loss=2.799, NarTop10Accuracy=0.7717, over 4770.00 frames. ], tot_loss[loss=3.109, NarTop10Accuracy=0.7036, over 5977.50 frames. ], batch size: 5, lr: 3.54e-03 +2024-08-06 19:50:43,943 INFO [trainer.py:765] (7/8) Epoch 24, batch 2200, train_loss[loss=3.434, NarTop10Accuracy=0.633, over 7455.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7034, over 6020.84 frames. ], batch size: 31, lr: 3.54e-03 +2024-08-06 19:51:09,024 INFO [trainer.py:765] (7/8) Epoch 24, batch 2300, train_loss[loss=2.707, NarTop10Accuracy=0.7802, over 5760.00 frames. ], tot_loss[loss=3.108, NarTop10Accuracy=0.7043, over 6029.55 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 19:51:33,349 INFO [trainer.py:765] (7/8) Epoch 24, batch 2400, train_loss[loss=2.971, NarTop10Accuracy=0.7261, over 5208.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7068, over 5776.42 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 19:51:56,783 INFO [trainer.py:765] (7/8) Epoch 24, batch 2500, train_loss[loss=2.91, NarTop10Accuracy=0.7408, over 5232.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7109, over 5480.74 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 19:52:17,080 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 19:53:22,197 INFO [trainer.py:765] (7/8) Epoch 25, batch 100, train_loss[loss=3.437, NarTop10Accuracy=0.6449, over 7251.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.711, over 2372.47 frames. ], batch size: 31, lr: 3.45e-03 +2024-08-06 19:53:47,261 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 19:53:55,329 INFO [trainer.py:811] (7/8) Epoch 25, validation: loss=2.96, NarTop10Accuracy=0.7332, over 1905321.00 frames. +2024-08-06 19:53:55,329 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 19:53:55,916 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.155e+02 2.306e+02 2.475e+02 6.485e+02, threshold=4.611e+02, percent-clipped=0.1 +2024-08-06 19:54:01,177 INFO [trainer.py:765] (7/8) Epoch 25, batch 200, train_loss[loss=2.823, NarTop10Accuracy=0.7616, over 7023.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7072, over 3870.09 frames. ], batch size: 17, lr: 3.45e-03 +2024-08-06 19:54:35,647 INFO [trainer.py:765] (7/8) Epoch 25, batch 300, train_loss[loss=3.174, NarTop10Accuracy=0.6962, over 7137.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7101, over 4662.27 frames. ], batch size: 22, lr: 3.45e-03 +2024-08-06 19:55:12,958 INFO [trainer.py:765] (7/8) Epoch 25, batch 400, train_loss[loss=2.83, NarTop10Accuracy=0.7572, over 5109.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7082, over 5114.58 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 19:55:43,738 INFO [trainer.py:765] (7/8) Epoch 25, batch 500, train_loss[loss=2.75, NarTop10Accuracy=0.7749, over 6051.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.71, over 5389.96 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 19:56:14,815 INFO [trainer.py:765] (7/8) Epoch 25, batch 600, train_loss[loss=2.683, NarTop10Accuracy=0.7877, over 5808.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7094, over 5647.75 frames. ], batch size: 9, lr: 3.44e-03 +2024-08-06 19:56:55,497 INFO [trainer.py:765] (7/8) Epoch 25, batch 700, train_loss[loss=2.641, NarTop10Accuracy=0.8065, over 5127.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7095, over 5725.84 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 19:57:30,136 INFO [trainer.py:765] (7/8) Epoch 25, batch 800, train_loss[loss=2.93, NarTop10Accuracy=0.7338, over 4908.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.708, over 5787.87 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 19:58:00,679 INFO [trainer.py:765] (7/8) Epoch 25, batch 900, train_loss[loss=3.224, NarTop10Accuracy=0.6822, over 6189.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7093, over 5814.51 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 19:58:37,640 INFO [trainer.py:765] (7/8) Epoch 25, batch 1000, train_loss[loss=2.72, NarTop10Accuracy=0.7829, over 6153.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7063, over 5921.27 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 19:59:14,856 INFO [trainer.py:765] (7/8) Epoch 25, batch 1100, train_loss[loss=3.467, NarTop10Accuracy=0.6225, over 6663.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.7053, over 5942.48 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 19:59:49,040 INFO [trainer.py:765] (7/8) Epoch 25, batch 1200, train_loss[loss=3.406, NarTop10Accuracy=0.6465, over 7248.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7067, over 5926.59 frames. ], batch size: 31, lr: 3.42e-03 +2024-08-06 20:00:25,599 INFO [trainer.py:765] (7/8) Epoch 25, batch 1300, train_loss[loss=2.813, NarTop10Accuracy=0.7479, over 4263.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7082, over 5985.01 frames. ], batch size: 5, lr: 3.42e-03 +2024-08-06 20:01:02,016 INFO [trainer.py:765] (7/8) Epoch 25, batch 1400, train_loss[loss=2.852, NarTop10Accuracy=0.755, over 6144.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7099, over 6007.74 frames. ], batch size: 11, lr: 3.42e-03 +2024-08-06 20:01:32,823 INFO [trainer.py:765] (7/8) Epoch 25, batch 1500, train_loss[loss=3.257, NarTop10Accuracy=0.6814, over 6324.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7085, over 5937.43 frames. ], batch size: 50, lr: 3.41e-03 +2024-08-06 20:02:00,625 INFO [trainer.py:765] (7/8) Epoch 25, batch 1600, train_loss[loss=2.946, NarTop10Accuracy=0.738, over 6957.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7086, over 5923.05 frames. ], batch size: 22, lr: 3.41e-03 +2024-08-06 20:02:27,360 INFO [trainer.py:765] (7/8) Epoch 25, batch 1700, train_loss[loss=3.062, NarTop10Accuracy=0.7134, over 6378.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7079, over 5915.67 frames. ], batch size: 13, lr: 3.41e-03 +2024-08-06 20:02:53,854 INFO [trainer.py:765] (7/8) Epoch 25, batch 1800, train_loss[loss=3.365, NarTop10Accuracy=0.6439, over 7359.00 frames. ], tot_loss[loss=3.103, NarTop10Accuracy=0.705, over 5982.54 frames. ], batch size: 22, lr: 3.40e-03 +2024-08-06 20:03:20,341 INFO [trainer.py:765] (7/8) Epoch 25, batch 1900, train_loss[loss=3.265, NarTop10Accuracy=0.6796, over 6402.00 frames. ], tot_loss[loss=3.117, NarTop10Accuracy=0.7025, over 6019.53 frames. ], batch size: 50, lr: 3.40e-03 +2024-08-06 20:03:45,936 INFO [trainer.py:765] (7/8) Epoch 25, batch 2000, train_loss[loss=3.467, NarTop10Accuracy=0.6304, over 5820.00 frames. ], tot_loss[loss=3.125, NarTop10Accuracy=0.7005, over 6003.96 frames. ], batch size: 50, lr: 3.40e-03 +2024-08-06 20:04:11,246 INFO [trainer.py:765] (7/8) Epoch 25, batch 2100, train_loss[loss=2.724, NarTop10Accuracy=0.784, over 4053.00 frames. ], tot_loss[loss=3.107, NarTop10Accuracy=0.7039, over 5958.05 frames. ], batch size: 4, lr: 3.40e-03 +2024-08-06 20:04:31,410 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 20:04:39,343 INFO [trainer.py:811] (7/8) Epoch 25, validation: loss=2.999, NarTop10Accuracy=0.7251, over 1905321.00 frames. +2024-08-06 20:04:39,344 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 20:04:39,840 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.185e+02 2.339e+02 2.507e+02 3.640e+02, threshold=4.678e+02, percent-clipped=0.0 +2024-08-06 20:04:44,512 INFO [trainer.py:765] (7/8) Epoch 25, batch 2200, train_loss[loss=3.287, NarTop10Accuracy=0.6681, over 7239.00 frames. ], tot_loss[loss=3.111, NarTop10Accuracy=0.7035, over 6013.47 frames. ], batch size: 31, lr: 3.39e-03 +2024-08-06 20:05:09,645 INFO [trainer.py:765] (7/8) Epoch 25, batch 2300, train_loss[loss=3.01, NarTop10Accuracy=0.7292, over 5730.00 frames. ], tot_loss[loss=3.112, NarTop10Accuracy=0.7034, over 6021.42 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 20:05:34,141 INFO [trainer.py:765] (7/8) Epoch 25, batch 2400, train_loss[loss=2.926, NarTop10Accuracy=0.7518, over 5136.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7065, over 5791.16 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:05:57,845 INFO [trainer.py:765] (7/8) Epoch 25, batch 2500, train_loss[loss=2.832, NarTop10Accuracy=0.7645, over 5205.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7114, over 5493.88 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 20:06:18,068 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 20:07:19,303 INFO [trainer.py:765] (7/8) Epoch 26, batch 100, train_loss[loss=3.15, NarTop10Accuracy=0.7025, over 7239.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7088, over 2374.93 frames. ], batch size: 31, lr: 3.32e-03 +2024-08-06 20:07:52,381 INFO [trainer.py:765] (7/8) Epoch 26, batch 200, train_loss[loss=2.766, NarTop10Accuracy=0.7705, over 6786.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7077, over 3861.10 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 20:08:24,732 INFO [trainer.py:765] (7/8) Epoch 26, batch 300, train_loss[loss=2.95, NarTop10Accuracy=0.739, over 7137.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7084, over 4666.91 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 20:08:58,184 INFO [trainer.py:765] (7/8) Epoch 26, batch 400, train_loss[loss=3.039, NarTop10Accuracy=0.7286, over 5094.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7084, over 5124.72 frames. ], batch size: 7, lr: 3.31e-03 +2024-08-06 20:09:33,146 INFO [trainer.py:765] (7/8) Epoch 26, batch 500, train_loss[loss=2.854, NarTop10Accuracy=0.75, over 6087.00 frames. ], tot_loss[loss=3.096, NarTop10Accuracy=0.7059, over 5374.75 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 20:10:03,889 INFO [trainer.py:765] (7/8) Epoch 26, batch 600, train_loss[loss=2.755, NarTop10Accuracy=0.78, over 6231.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7103, over 5632.77 frames. ], batch size: 10, lr: 3.30e-03 +2024-08-06 20:10:39,871 INFO [trainer.py:765] (7/8) Epoch 26, batch 700, train_loss[loss=3.213, NarTop10Accuracy=0.6907, over 4323.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7066, over 5732.11 frames. ], batch size: 5, lr: 3.30e-03 +2024-08-06 20:11:19,060 INFO [trainer.py:765] (7/8) Epoch 26, batch 800, train_loss[loss=2.981, NarTop10Accuracy=0.7361, over 5076.00 frames. ], tot_loss[loss=3.09, NarTop10Accuracy=0.7074, over 5805.22 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 20:11:49,314 INFO [trainer.py:765] (7/8) Epoch 26, batch 900, train_loss[loss=2.787, NarTop10Accuracy=0.7684, over 6597.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7084, over 5812.61 frames. ], batch size: 14, lr: 3.29e-03 +2024-08-06 20:12:25,972 INFO [trainer.py:765] (7/8) Epoch 26, batch 1000, train_loss[loss=2.955, NarTop10Accuracy=0.7388, over 6567.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.707, over 5907.41 frames. ], batch size: 14, lr: 3.29e-03 +2024-08-06 20:13:06,376 INFO [trainer.py:765] (7/8) Epoch 26, batch 1100, train_loss[loss=3.264, NarTop10Accuracy=0.6679, over 6744.00 frames. ], tot_loss[loss=3.1, NarTop10Accuracy=0.7055, over 5944.69 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 20:13:37,535 INFO [trainer.py:765] (7/8) Epoch 26, batch 1200, train_loss[loss=3.34, NarTop10Accuracy=0.6588, over 7263.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7073, over 5937.22 frames. ], batch size: 31, lr: 3.29e-03 +2024-08-06 20:14:13,695 INFO [trainer.py:765] (7/8) Epoch 26, batch 1300, train_loss[loss=2.711, NarTop10Accuracy=0.7857, over 5055.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7074, over 6002.53 frames. ], batch size: 6, lr: 3.28e-03 +2024-08-06 20:14:50,537 INFO [trainer.py:765] (7/8) Epoch 26, batch 1400, train_loss[loss=2.639, NarTop10Accuracy=0.7925, over 6108.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7077, over 6018.93 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 20:15:21,154 INFO [trainer.py:765] (7/8) Epoch 26, batch 1500, train_loss[loss=3.15, NarTop10Accuracy=0.6988, over 6270.00 frames. ], tot_loss[loss=3.092, NarTop10Accuracy=0.7067, over 5956.04 frames. ], batch size: 52, lr: 3.28e-03 +2024-08-06 20:15:48,978 INFO [trainer.py:765] (7/8) Epoch 26, batch 1600, train_loss[loss=2.983, NarTop10Accuracy=0.7238, over 7065.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7082, over 5927.41 frames. ], batch size: 22, lr: 3.28e-03 +2024-08-06 20:15:50,001 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 20:15:58,239 INFO [trainer.py:811] (7/8) Epoch 26, validation: loss=2.899, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 20:15:58,239 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 20:15:58,779 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.166e+02 2.322e+02 2.511e+02 3.952e+02, threshold=4.644e+02, percent-clipped=0.0 +2024-08-06 20:16:23,952 INFO [trainer.py:765] (7/8) Epoch 26, batch 1700, train_loss[loss=3.153, NarTop10Accuracy=0.702, over 6111.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7121, over 5927.13 frames. ], batch size: 13, lr: 3.28e-03 +2024-08-06 20:16:50,427 INFO [trainer.py:765] (7/8) Epoch 26, batch 1800, train_loss[loss=2.893, NarTop10Accuracy=0.7545, over 6939.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7108, over 5993.97 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 20:17:16,840 INFO [trainer.py:765] (7/8) Epoch 26, batch 1900, train_loss[loss=3.059, NarTop10Accuracy=0.7197, over 5913.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7085, over 6024.86 frames. ], batch size: 52, lr: 3.27e-03 +2024-08-06 20:17:42,379 INFO [trainer.py:765] (7/8) Epoch 26, batch 2000, train_loss[loss=3.578, NarTop10Accuracy=0.6092, over 5772.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.707, over 6005.73 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 20:18:07,563 INFO [trainer.py:765] (7/8) Epoch 26, batch 2100, train_loss[loss=2.914, NarTop10Accuracy=0.7515, over 4932.00 frames. ], tot_loss[loss=3.099, NarTop10Accuracy=0.7055, over 5966.27 frames. ], batch size: 5, lr: 3.27e-03 +2024-08-06 20:18:32,777 INFO [trainer.py:765] (7/8) Epoch 26, batch 2200, train_loss[loss=2.836, NarTop10Accuracy=0.7532, over 7284.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.7069, over 6016.72 frames. ], batch size: 31, lr: 3.26e-03 +2024-08-06 20:18:57,897 INFO [trainer.py:765] (7/8) Epoch 26, batch 2300, train_loss[loss=3.134, NarTop10Accuracy=0.6979, over 5739.00 frames. ], tot_loss[loss=3.095, NarTop10Accuracy=0.7062, over 6020.06 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 20:19:22,205 INFO [trainer.py:765] (7/8) Epoch 26, batch 2400, train_loss[loss=2.833, NarTop10Accuracy=0.7601, over 5280.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7115, over 5780.95 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:19:45,651 INFO [trainer.py:765] (7/8) Epoch 26, batch 2500, train_loss[loss=2.879, NarTop10Accuracy=0.749, over 5034.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7154, over 5479.68 frames. ], batch size: 7, lr: 3.26e-03 +2024-08-06 20:20:05,863 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 20:21:04,874 INFO [trainer.py:765] (7/8) Epoch 27, batch 100, train_loss[loss=3.243, NarTop10Accuracy=0.67, over 7092.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7103, over 2366.56 frames. ], batch size: 31, lr: 3.19e-03 +2024-08-06 20:21:39,784 INFO [trainer.py:765] (7/8) Epoch 27, batch 200, train_loss[loss=2.806, NarTop10Accuracy=0.7521, over 6813.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7085, over 3862.14 frames. ], batch size: 17, lr: 3.19e-03 +2024-08-06 20:22:13,050 INFO [trainer.py:765] (7/8) Epoch 27, batch 300, train_loss[loss=2.867, NarTop10Accuracy=0.7615, over 7293.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7094, over 4665.00 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 20:22:43,557 INFO [trainer.py:765] (7/8) Epoch 27, batch 400, train_loss[loss=2.893, NarTop10Accuracy=0.7442, over 5226.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7118, over 5104.69 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 20:23:18,084 INFO [trainer.py:765] (7/8) Epoch 27, batch 500, train_loss[loss=2.895, NarTop10Accuracy=0.743, over 6036.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7147, over 5377.99 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 20:23:51,435 INFO [trainer.py:765] (7/8) Epoch 27, batch 600, train_loss[loss=3.253, NarTop10Accuracy=0.6661, over 5730.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7151, over 5655.43 frames. ], batch size: 9, lr: 3.18e-03 +2024-08-06 20:24:24,976 INFO [trainer.py:765] (7/8) Epoch 27, batch 700, train_loss[loss=2.871, NarTop10Accuracy=0.7563, over 5064.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7161, over 5724.78 frames. ], batch size: 6, lr: 3.18e-03 +2024-08-06 20:25:03,407 INFO [trainer.py:765] (7/8) Epoch 27, batch 800, train_loss[loss=3.123, NarTop10Accuracy=0.6957, over 4359.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7106, over 5778.41 frames. ], batch size: 5, lr: 3.17e-03 +2024-08-06 20:25:34,176 INFO [trainer.py:765] (7/8) Epoch 27, batch 900, train_loss[loss=3.309, NarTop10Accuracy=0.6715, over 6303.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7096, over 5778.17 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 20:26:10,097 INFO [trainer.py:765] (7/8) Epoch 27, batch 1000, train_loss[loss=2.844, NarTop10Accuracy=0.7711, over 6168.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7101, over 5884.82 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 20:26:18,316 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 20:26:26,346 INFO [trainer.py:811] (7/8) Epoch 27, validation: loss=2.95, NarTop10Accuracy=0.735, over 1905321.00 frames. +2024-08-06 20:26:26,347 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 20:26:26,877 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.166e+02 2.331e+02 2.512e+02 4.284e+02, threshold=4.663e+02, percent-clipped=0.0 +2024-08-06 20:26:50,899 INFO [trainer.py:765] (7/8) Epoch 27, batch 1100, train_loss[loss=2.96, NarTop10Accuracy=0.7288, over 6771.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7098, over 5923.61 frames. ], batch size: 17, lr: 3.17e-03 +2024-08-06 20:27:24,545 INFO [trainer.py:765] (7/8) Epoch 27, batch 1200, train_loss[loss=2.899, NarTop10Accuracy=0.7445, over 7227.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7107, over 5927.32 frames. ], batch size: 31, lr: 3.16e-03 +2024-08-06 20:27:58,568 INFO [trainer.py:765] (7/8) Epoch 27, batch 1300, train_loss[loss=2.658, NarTop10Accuracy=0.7978, over 5094.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7128, over 6000.61 frames. ], batch size: 6, lr: 3.16e-03 +2024-08-06 20:28:36,745 INFO [trainer.py:765] (7/8) Epoch 27, batch 1400, train_loss[loss=3.372, NarTop10Accuracy=0.6422, over 5994.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.709, over 6028.94 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 20:29:04,632 INFO [trainer.py:765] (7/8) Epoch 27, batch 1500, train_loss[loss=3.026, NarTop10Accuracy=0.7293, over 6096.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7108, over 5964.81 frames. ], batch size: 50, lr: 3.16e-03 +2024-08-06 20:29:32,362 INFO [trainer.py:765] (7/8) Epoch 27, batch 1600, train_loss[loss=2.94, NarTop10Accuracy=0.7431, over 7050.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7085, over 5932.01 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:29:58,977 INFO [trainer.py:765] (7/8) Epoch 27, batch 1700, train_loss[loss=3.249, NarTop10Accuracy=0.6826, over 6735.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7089, over 5938.63 frames. ], batch size: 14, lr: 3.15e-03 +2024-08-06 20:30:25,463 INFO [trainer.py:765] (7/8) Epoch 27, batch 1800, train_loss[loss=3.312, NarTop10Accuracy=0.66, over 6840.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7083, over 6004.53 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 20:30:51,845 INFO [trainer.py:765] (7/8) Epoch 27, batch 1900, train_loss[loss=3.087, NarTop10Accuracy=0.7068, over 5781.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.709, over 6047.62 frames. ], batch size: 50, lr: 3.15e-03 +2024-08-06 20:31:17,390 INFO [trainer.py:765] (7/8) Epoch 27, batch 2000, train_loss[loss=3.118, NarTop10Accuracy=0.6907, over 6225.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7112, over 6017.32 frames. ], batch size: 51, lr: 3.15e-03 +2024-08-06 20:31:42,660 INFO [trainer.py:765] (7/8) Epoch 27, batch 2100, train_loss[loss=2.956, NarTop10Accuracy=0.7369, over 3903.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7093, over 5980.60 frames. ], batch size: 4, lr: 3.14e-03 +2024-08-06 20:32:07,804 INFO [trainer.py:765] (7/8) Epoch 27, batch 2200, train_loss[loss=3.444, NarTop10Accuracy=0.6381, over 7164.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7083, over 6016.87 frames. ], batch size: 31, lr: 3.14e-03 +2024-08-06 20:32:32,941 INFO [trainer.py:765] (7/8) Epoch 27, batch 2300, train_loss[loss=2.899, NarTop10Accuracy=0.746, over 5706.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7071, over 6031.97 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 20:32:57,246 INFO [trainer.py:765] (7/8) Epoch 27, batch 2400, train_loss[loss=2.788, NarTop10Accuracy=0.7758, over 5130.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7065, over 5794.32 frames. ], batch size: 7, lr: 3.14e-03 +2024-08-06 20:33:20,615 INFO [trainer.py:765] (7/8) Epoch 27, batch 2500, train_loss[loss=3.374, NarTop10Accuracy=0.6459, over 5112.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.713, over 5497.56 frames. ], batch size: 7, lr: 3.13e-03 +2024-08-06 20:33:40,790 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 20:34:35,829 INFO [trainer.py:765] (7/8) Epoch 28, batch 100, train_loss[loss=2.88, NarTop10Accuracy=0.7489, over 7314.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7113, over 2366.91 frames. ], batch size: 31, lr: 3.07e-03 +2024-08-06 20:35:07,394 INFO [trainer.py:765] (7/8) Epoch 28, batch 200, train_loss[loss=2.78, NarTop10Accuracy=0.7757, over 6759.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7093, over 3853.79 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 20:35:45,423 INFO [trainer.py:765] (7/8) Epoch 28, batch 300, train_loss[loss=3.082, NarTop10Accuracy=0.7171, over 7125.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7115, over 4659.01 frames. ], batch size: 22, lr: 3.07e-03 +2024-08-06 20:36:15,865 INFO [trainer.py:765] (7/8) Epoch 28, batch 400, train_loss[loss=3.155, NarTop10Accuracy=0.6832, over 5259.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7091, over 5095.94 frames. ], batch size: 7, lr: 3.07e-03 +2024-08-06 20:36:32,407 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 20:36:40,530 INFO [trainer.py:811] (7/8) Epoch 28, validation: loss=2.963, NarTop10Accuracy=0.7327, over 1905321.00 frames. +2024-08-06 20:36:40,531 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 20:36:41,103 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.179e+02 2.348e+02 2.536e+02 3.573e+02, threshold=4.696e+02, percent-clipped=0.0 +2024-08-06 20:36:56,664 INFO [trainer.py:765] (7/8) Epoch 28, batch 500, train_loss[loss=3.365, NarTop10Accuracy=0.6561, over 6138.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7115, over 5375.21 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 20:37:29,462 INFO [trainer.py:765] (7/8) Epoch 28, batch 600, train_loss[loss=3.061, NarTop10Accuracy=0.7149, over 5712.00 frames. ], tot_loss[loss=3.078, NarTop10Accuracy=0.7098, over 5642.95 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 20:38:08,891 INFO [trainer.py:765] (7/8) Epoch 28, batch 700, train_loss[loss=3.059, NarTop10Accuracy=0.6993, over 5145.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7086, over 5694.31 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 20:38:42,489 INFO [trainer.py:765] (7/8) Epoch 28, batch 800, train_loss[loss=2.884, NarTop10Accuracy=0.7404, over 5091.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7134, over 5760.11 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 20:39:15,506 INFO [trainer.py:765] (7/8) Epoch 28, batch 900, train_loss[loss=3.291, NarTop10Accuracy=0.6702, over 6261.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7132, over 5799.41 frames. ], batch size: 13, lr: 3.06e-03 +2024-08-06 20:39:53,240 INFO [trainer.py:765] (7/8) Epoch 28, batch 1000, train_loss[loss=3.376, NarTop10Accuracy=0.6557, over 6183.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7128, over 5906.35 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 20:40:25,867 INFO [trainer.py:765] (7/8) Epoch 28, batch 1100, train_loss[loss=2.797, NarTop10Accuracy=0.7671, over 6807.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7093, over 5945.76 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 20:40:59,418 INFO [trainer.py:765] (7/8) Epoch 28, batch 1200, train_loss[loss=3.349, NarTop10Accuracy=0.6634, over 7227.00 frames. ], tot_loss[loss=3.088, NarTop10Accuracy=0.7076, over 5938.23 frames. ], batch size: 32, lr: 3.05e-03 +2024-08-06 20:41:38,681 INFO [trainer.py:765] (7/8) Epoch 28, batch 1300, train_loss[loss=3.232, NarTop10Accuracy=0.6766, over 4965.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7086, over 5982.52 frames. ], batch size: 6, lr: 3.05e-03 +2024-08-06 20:42:13,047 INFO [trainer.py:765] (7/8) Epoch 28, batch 1400, train_loss[loss=2.946, NarTop10Accuracy=0.7387, over 6204.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7076, over 5996.93 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 20:42:43,171 INFO [trainer.py:765] (7/8) Epoch 28, batch 1500, train_loss[loss=3.449, NarTop10Accuracy=0.6344, over 6579.00 frames. ], tot_loss[loss=3.075, NarTop10Accuracy=0.7106, over 5946.56 frames. ], batch size: 50, lr: 3.04e-03 +2024-08-06 20:43:11,080 INFO [trainer.py:765] (7/8) Epoch 28, batch 1600, train_loss[loss=2.791, NarTop10Accuracy=0.774, over 7377.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7109, over 5918.59 frames. ], batch size: 23, lr: 3.04e-03 +2024-08-06 20:43:37,785 INFO [trainer.py:765] (7/8) Epoch 28, batch 1700, train_loss[loss=2.994, NarTop10Accuracy=0.7218, over 6234.00 frames. ], tot_loss[loss=3.085, NarTop10Accuracy=0.7089, over 5907.25 frames. ], batch size: 13, lr: 3.04e-03 +2024-08-06 20:44:04,326 INFO [trainer.py:765] (7/8) Epoch 28, batch 1800, train_loss[loss=3.137, NarTop10Accuracy=0.7019, over 7071.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.7087, over 5975.20 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 20:44:30,757 INFO [trainer.py:765] (7/8) Epoch 28, batch 1900, train_loss[loss=3.075, NarTop10Accuracy=0.7103, over 6156.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7097, over 6014.10 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 20:44:56,328 INFO [trainer.py:765] (7/8) Epoch 28, batch 2000, train_loss[loss=3.018, NarTop10Accuracy=0.725, over 6045.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7134, over 5995.78 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 20:45:21,651 INFO [trainer.py:765] (7/8) Epoch 28, batch 2100, train_loss[loss=2.749, NarTop10Accuracy=0.7711, over 3936.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7142, over 5963.26 frames. ], batch size: 4, lr: 3.03e-03 +2024-08-06 20:45:47,076 INFO [trainer.py:765] (7/8) Epoch 28, batch 2200, train_loss[loss=2.882, NarTop10Accuracy=0.7484, over 7140.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7115, over 5996.77 frames. ], batch size: 31, lr: 3.03e-03 +2024-08-06 20:46:12,307 INFO [trainer.py:765] (7/8) Epoch 28, batch 2300, train_loss[loss=3.314, NarTop10Accuracy=0.661, over 5727.00 frames. ], tot_loss[loss=3.097, NarTop10Accuracy=0.7065, over 6027.52 frames. ], batch size: 9, lr: 3.03e-03 +2024-08-06 20:46:36,806 INFO [trainer.py:765] (7/8) Epoch 28, batch 2400, train_loss[loss=2.956, NarTop10Accuracy=0.7298, over 5073.00 frames. ], tot_loss[loss=3.091, NarTop10Accuracy=0.707, over 5787.43 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:46:48,595 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 20:46:56,604 INFO [trainer.py:811] (7/8) Epoch 28, validation: loss=2.931, NarTop10Accuracy=0.7396, over 1905321.00 frames. +2024-08-06 20:46:56,605 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 20:46:57,082 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.201e+02 2.381e+02 2.551e+02 4.872e+02, threshold=4.762e+02, percent-clipped=0.1 +2024-08-06 20:47:08,293 INFO [trainer.py:765] (7/8) Epoch 28, batch 2500, train_loss[loss=3.042, NarTop10Accuracy=0.72, over 5028.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.7119, over 5482.48 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 20:47:28,122 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 20:48:21,053 INFO [trainer.py:765] (7/8) Epoch 29, batch 100, train_loss[loss=3.041, NarTop10Accuracy=0.7181, over 7344.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7122, over 2377.54 frames. ], batch size: 32, lr: 2.96e-03 +2024-08-06 20:48:53,407 INFO [trainer.py:765] (7/8) Epoch 29, batch 200, train_loss[loss=3.285, NarTop10Accuracy=0.662, over 6834.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7173, over 3861.31 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 20:49:27,477 INFO [trainer.py:765] (7/8) Epoch 29, batch 300, train_loss[loss=3.213, NarTop10Accuracy=0.6787, over 7116.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7173, over 4668.16 frames. ], batch size: 22, lr: 2.96e-03 +2024-08-06 20:49:56,054 INFO [trainer.py:765] (7/8) Epoch 29, batch 400, train_loss[loss=3.333, NarTop10Accuracy=0.6693, over 5016.00 frames. ], tot_loss[loss=3.069, NarTop10Accuracy=0.7122, over 5108.20 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 20:50:29,436 INFO [trainer.py:765] (7/8) Epoch 29, batch 500, train_loss[loss=3.292, NarTop10Accuracy=0.6678, over 6126.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7143, over 5402.00 frames. ], batch size: 11, lr: 2.96e-03 +2024-08-06 20:51:00,025 INFO [trainer.py:765] (7/8) Epoch 29, batch 600, train_loss[loss=2.795, NarTop10Accuracy=0.765, over 5580.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7147, over 5655.41 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 20:51:35,678 INFO [trainer.py:765] (7/8) Epoch 29, batch 700, train_loss[loss=2.793, NarTop10Accuracy=0.7565, over 4431.00 frames. ], tot_loss[loss=3.079, NarTop10Accuracy=0.7099, over 5725.48 frames. ], batch size: 5, lr: 2.95e-03 +2024-08-06 20:52:10,725 INFO [trainer.py:765] (7/8) Epoch 29, batch 800, train_loss[loss=2.663, NarTop10Accuracy=0.7841, over 5115.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7106, over 5789.50 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 20:52:40,743 INFO [trainer.py:765] (7/8) Epoch 29, batch 900, train_loss[loss=2.762, NarTop10Accuracy=0.7841, over 6297.00 frames. ], tot_loss[loss=3.077, NarTop10Accuracy=0.7098, over 5795.48 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 20:53:16,862 INFO [trainer.py:765] (7/8) Epoch 29, batch 1000, train_loss[loss=3.384, NarTop10Accuracy=0.6505, over 6363.00 frames. ], tot_loss[loss=3.086, NarTop10Accuracy=0.708, over 5894.01 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 20:53:52,903 INFO [trainer.py:765] (7/8) Epoch 29, batch 1100, train_loss[loss=3.194, NarTop10Accuracy=0.6828, over 6684.00 frames. ], tot_loss[loss=3.089, NarTop10Accuracy=0.7074, over 5925.55 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 20:54:23,691 INFO [trainer.py:765] (7/8) Epoch 29, batch 1200, train_loss[loss=3.186, NarTop10Accuracy=0.6888, over 7317.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7087, over 5927.39 frames. ], batch size: 31, lr: 2.94e-03 +2024-08-06 20:55:01,429 INFO [trainer.py:765] (7/8) Epoch 29, batch 1300, train_loss[loss=2.852, NarTop10Accuracy=0.7586, over 4443.00 frames. ], tot_loss[loss=3.08, NarTop10Accuracy=0.7091, over 5996.31 frames. ], batch size: 5, lr: 2.94e-03 +2024-08-06 20:55:32,558 INFO [trainer.py:765] (7/8) Epoch 29, batch 1400, train_loss[loss=3.457, NarTop10Accuracy=0.632, over 6048.00 frames. ], tot_loss[loss=3.084, NarTop10Accuracy=0.7087, over 6012.33 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 20:56:04,360 INFO [trainer.py:765] (7/8) Epoch 29, batch 1500, train_loss[loss=3.399, NarTop10Accuracy=0.6443, over 6351.00 frames. ], tot_loss[loss=3.082, NarTop10Accuracy=0.7093, over 5940.89 frames. ], batch size: 51, lr: 2.94e-03 +2024-08-06 20:56:32,041 INFO [trainer.py:765] (7/8) Epoch 29, batch 1600, train_loss[loss=3.29, NarTop10Accuracy=0.6614, over 6966.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7081, over 5920.69 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:56:58,640 INFO [trainer.py:765] (7/8) Epoch 29, batch 1700, train_loss[loss=2.814, NarTop10Accuracy=0.758, over 6243.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7112, over 5901.66 frames. ], batch size: 13, lr: 2.93e-03 +2024-08-06 20:57:25,001 INFO [trainer.py:765] (7/8) Epoch 29, batch 1800, train_loss[loss=3.087, NarTop10Accuracy=0.7163, over 7227.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7121, over 5962.67 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 20:57:44,622 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 20:57:52,863 INFO [trainer.py:811] (7/8) Epoch 29, validation: loss=2.897, NarTop10Accuracy=0.7458, over 1905321.00 frames. +2024-08-06 20:57:52,864 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 20:57:53,424 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.206e+02 2.380e+02 2.554e+02 4.464e+02, threshold=4.759e+02, percent-clipped=0.0 +2024-08-06 20:57:59,756 INFO [trainer.py:765] (7/8) Epoch 29, batch 1900, train_loss[loss=3.022, NarTop10Accuracy=0.7189, over 6447.00 frames. ], tot_loss[loss=3.087, NarTop10Accuracy=0.7083, over 6020.71 frames. ], batch size: 50, lr: 2.93e-03 +2024-08-06 20:58:25,309 INFO [trainer.py:765] (7/8) Epoch 29, batch 2000, train_loss[loss=3.574, NarTop10Accuracy=0.6094, over 5865.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7089, over 5999.10 frames. ], batch size: 50, lr: 2.93e-03 +2024-08-06 20:58:50,630 INFO [trainer.py:765] (7/8) Epoch 29, batch 2100, train_loss[loss=2.958, NarTop10Accuracy=0.7304, over 4890.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7089, over 5971.44 frames. ], batch size: 5, lr: 2.92e-03 +2024-08-06 20:59:15,806 INFO [trainer.py:765] (7/8) Epoch 29, batch 2200, train_loss[loss=2.849, NarTop10Accuracy=0.7552, over 7215.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7115, over 6004.23 frames. ], batch size: 31, lr: 2.92e-03 +2024-08-06 20:59:40,911 INFO [trainer.py:765] (7/8) Epoch 29, batch 2300, train_loss[loss=2.766, NarTop10Accuracy=0.7794, over 5556.00 frames. ], tot_loss[loss=3.093, NarTop10Accuracy=0.7071, over 6025.68 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 21:00:05,156 INFO [trainer.py:765] (7/8) Epoch 29, batch 2400, train_loss[loss=2.829, NarTop10Accuracy=0.7663, over 4989.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7106, over 5773.17 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 21:00:28,742 INFO [trainer.py:765] (7/8) Epoch 29, batch 2500, train_loss[loss=3.272, NarTop10Accuracy=0.6728, over 5163.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.716, over 5475.70 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 21:00:48,776 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 21:01:41,717 INFO [trainer.py:765] (7/8) Epoch 30, batch 100, train_loss[loss=2.94, NarTop10Accuracy=0.7383, over 7056.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7194, over 2364.30 frames. ], batch size: 31, lr: 2.86e-03 +2024-08-06 21:02:17,014 INFO [trainer.py:765] (7/8) Epoch 30, batch 200, train_loss[loss=2.857, NarTop10Accuracy=0.7508, over 6858.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7225, over 3856.52 frames. ], batch size: 17, lr: 2.86e-03 +2024-08-06 21:02:51,344 INFO [trainer.py:765] (7/8) Epoch 30, batch 300, train_loss[loss=2.834, NarTop10Accuracy=0.7568, over 7296.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.724, over 4650.80 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 21:03:21,644 INFO [trainer.py:765] (7/8) Epoch 30, batch 400, train_loss[loss=2.862, NarTop10Accuracy=0.7637, over 5181.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7203, over 5113.08 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 21:03:58,546 INFO [trainer.py:765] (7/8) Epoch 30, batch 500, train_loss[loss=3.444, NarTop10Accuracy=0.6284, over 6006.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7183, over 5389.86 frames. ], batch size: 11, lr: 2.86e-03 +2024-08-06 21:04:31,656 INFO [trainer.py:765] (7/8) Epoch 30, batch 600, train_loss[loss=2.978, NarTop10Accuracy=0.7333, over 5781.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7164, over 5655.17 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 21:05:03,526 INFO [trainer.py:765] (7/8) Epoch 30, batch 700, train_loss[loss=2.914, NarTop10Accuracy=0.7379, over 4212.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7192, over 5743.42 frames. ], batch size: 5, lr: 2.85e-03 +2024-08-06 21:05:44,132 INFO [trainer.py:765] (7/8) Epoch 30, batch 800, train_loss[loss=2.859, NarTop10Accuracy=0.7458, over 5229.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7194, over 5783.65 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 21:06:14,844 INFO [trainer.py:765] (7/8) Epoch 30, batch 900, train_loss[loss=2.836, NarTop10Accuracy=0.756, over 6705.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7196, over 5785.31 frames. ], batch size: 14, lr: 2.85e-03 +2024-08-06 21:06:48,952 INFO [trainer.py:765] (7/8) Epoch 30, batch 1000, train_loss[loss=2.888, NarTop10Accuracy=0.7414, over 6285.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7123, over 5892.18 frames. ], batch size: 13, lr: 2.85e-03 +2024-08-06 21:07:25,937 INFO [trainer.py:765] (7/8) Epoch 30, batch 1100, train_loss[loss=3.405, NarTop10Accuracy=0.6433, over 7098.00 frames. ], tot_loss[loss=3.083, NarTop10Accuracy=0.7083, over 5918.31 frames. ], batch size: 18, lr: 2.84e-03 +2024-08-06 21:08:02,381 INFO [trainer.py:765] (7/8) Epoch 30, batch 1200, train_loss[loss=2.867, NarTop10Accuracy=0.7548, over 7164.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7112, over 5911.28 frames. ], batch size: 31, lr: 2.84e-03 +2024-08-06 21:08:35,371 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 21:08:43,457 INFO [trainer.py:811] (7/8) Epoch 30, validation: loss=2.93, NarTop10Accuracy=0.7391, over 1905321.00 frames. +2024-08-06 21:08:43,457 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 21:08:44,197 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.209e+02 2.377e+02 2.553e+02 3.956e+02, threshold=4.754e+02, percent-clipped=0.0 +2024-08-06 21:08:44,203 INFO [trainer.py:765] (7/8) Epoch 30, batch 1300, train_loss[loss=3.155, NarTop10Accuracy=0.6844, over 5064.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7112, over 5979.96 frames. ], batch size: 6, lr: 2.84e-03 +2024-08-06 21:09:22,397 INFO [trainer.py:765] (7/8) Epoch 30, batch 1400, train_loss[loss=2.922, NarTop10Accuracy=0.7431, over 6027.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7116, over 5990.32 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 21:09:52,373 INFO [trainer.py:765] (7/8) Epoch 30, batch 1500, train_loss[loss=2.991, NarTop10Accuracy=0.7245, over 6303.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7129, over 5955.86 frames. ], batch size: 50, lr: 2.84e-03 +2024-08-06 21:10:20,083 INFO [trainer.py:765] (7/8) Epoch 30, batch 1600, train_loss[loss=3.066, NarTop10Accuracy=0.7143, over 7062.00 frames. ], tot_loss[loss=3.066, NarTop10Accuracy=0.7123, over 5941.59 frames. ], batch size: 22, lr: 2.84e-03 +2024-08-06 21:10:46,679 INFO [trainer.py:765] (7/8) Epoch 30, batch 1700, train_loss[loss=3.123, NarTop10Accuracy=0.6982, over 6252.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7109, over 5928.77 frames. ], batch size: 13, lr: 2.83e-03 +2024-08-06 21:11:13,059 INFO [trainer.py:765] (7/8) Epoch 30, batch 1800, train_loss[loss=3.279, NarTop10Accuracy=0.663, over 6921.00 frames. ], tot_loss[loss=3.071, NarTop10Accuracy=0.7111, over 5981.22 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 21:11:39,418 INFO [trainer.py:765] (7/8) Epoch 30, batch 1900, train_loss[loss=3.087, NarTop10Accuracy=0.7129, over 6516.00 frames. ], tot_loss[loss=3.076, NarTop10Accuracy=0.7105, over 6023.37 frames. ], batch size: 50, lr: 2.83e-03 +2024-08-06 21:12:04,826 INFO [trainer.py:765] (7/8) Epoch 30, batch 2000, train_loss[loss=3.382, NarTop10Accuracy=0.652, over 6810.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7112, over 6013.17 frames. ], batch size: 50, lr: 2.83e-03 +2024-08-06 21:12:30,088 INFO [trainer.py:765] (7/8) Epoch 30, batch 2100, train_loss[loss=2.92, NarTop10Accuracy=0.7454, over 4809.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.711, over 5988.19 frames. ], batch size: 5, lr: 2.83e-03 +2024-08-06 21:12:55,225 INFO [trainer.py:765] (7/8) Epoch 30, batch 2200, train_loss[loss=2.914, NarTop10Accuracy=0.7482, over 7386.00 frames. ], tot_loss[loss=3.07, NarTop10Accuracy=0.7116, over 6007.69 frames. ], batch size: 31, lr: 2.82e-03 +2024-08-06 21:13:20,297 INFO [trainer.py:765] (7/8) Epoch 30, batch 2300, train_loss[loss=2.706, NarTop10Accuracy=0.7924, over 5745.00 frames. ], tot_loss[loss=3.094, NarTop10Accuracy=0.7068, over 6019.48 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 21:13:44,491 INFO [trainer.py:765] (7/8) Epoch 30, batch 2400, train_loss[loss=2.603, NarTop10Accuracy=0.8025, over 5049.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7136, over 5782.69 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:07,987 INFO [trainer.py:765] (7/8) Epoch 30, batch 2500, train_loss[loss=2.966, NarTop10Accuracy=0.7364, over 5205.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.716, over 5503.50 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 21:14:27,936 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 21:15:23,633 INFO [trainer.py:765] (7/8) Epoch 31, batch 100, train_loss[loss=3.395, NarTop10Accuracy=0.6508, over 7014.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7115, over 2369.12 frames. ], batch size: 31, lr: 2.77e-03 +2024-08-06 21:15:55,127 INFO [trainer.py:765] (7/8) Epoch 31, batch 200, train_loss[loss=2.821, NarTop10Accuracy=0.7579, over 6768.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7178, over 3867.95 frames. ], batch size: 17, lr: 2.77e-03 +2024-08-06 21:16:31,216 INFO [trainer.py:765] (7/8) Epoch 31, batch 300, train_loss[loss=2.886, NarTop10Accuracy=0.7451, over 7101.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7173, over 4657.99 frames. ], batch size: 22, lr: 2.77e-03 +2024-08-06 21:17:01,625 INFO [trainer.py:765] (7/8) Epoch 31, batch 400, train_loss[loss=3.131, NarTop10Accuracy=0.6947, over 5214.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.715, over 5115.65 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 21:17:35,725 INFO [trainer.py:765] (7/8) Epoch 31, batch 500, train_loss[loss=2.647, NarTop10Accuracy=0.7946, over 6186.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7169, over 5398.09 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 21:18:07,084 INFO [trainer.py:765] (7/8) Epoch 31, batch 600, train_loss[loss=2.767, NarTop10Accuracy=0.7797, over 5742.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7141, over 5652.23 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 21:18:44,610 INFO [trainer.py:765] (7/8) Epoch 31, batch 700, train_loss[loss=3.367, NarTop10Accuracy=0.6503, over 5145.00 frames. ], tot_loss[loss=3.067, NarTop10Accuracy=0.712, over 5726.00 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 21:18:51,095 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 21:18:59,276 INFO [trainer.py:811] (7/8) Epoch 31, validation: loss=2.984, NarTop10Accuracy=0.7279, over 1905321.00 frames. +2024-08-06 21:18:59,276 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 21:18:59,986 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.222e+02 2.378e+02 2.557e+02 4.306e+02, threshold=4.755e+02, percent-clipped=0.0 +2024-08-06 21:19:24,246 INFO [trainer.py:765] (7/8) Epoch 31, batch 800, train_loss[loss=2.812, NarTop10Accuracy=0.7691, over 5061.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7153, over 5784.80 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 21:19:56,950 INFO [trainer.py:765] (7/8) Epoch 31, batch 900, train_loss[loss=3.412, NarTop10Accuracy=0.6443, over 6147.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7158, over 5809.12 frames. ], batch size: 13, lr: 2.76e-03 +2024-08-06 21:20:33,311 INFO [trainer.py:765] (7/8) Epoch 31, batch 1000, train_loss[loss=3.241, NarTop10Accuracy=0.676, over 6732.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.717, over 5911.48 frames. ], batch size: 14, lr: 2.75e-03 +2024-08-06 21:21:10,215 INFO [trainer.py:765] (7/8) Epoch 31, batch 1100, train_loss[loss=3.275, NarTop10Accuracy=0.6645, over 6888.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7157, over 5926.71 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 21:21:41,119 INFO [trainer.py:765] (7/8) Epoch 31, batch 1200, train_loss[loss=3.042, NarTop10Accuracy=0.7137, over 7098.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7195, over 5925.63 frames. ], batch size: 31, lr: 2.75e-03 +2024-08-06 21:22:19,741 INFO [trainer.py:765] (7/8) Epoch 31, batch 1300, train_loss[loss=2.909, NarTop10Accuracy=0.7501, over 5052.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7147, over 5997.14 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 21:22:53,534 INFO [trainer.py:765] (7/8) Epoch 31, batch 1400, train_loss[loss=2.864, NarTop10Accuracy=0.7469, over 6105.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7126, over 6012.40 frames. ], batch size: 11, lr: 2.75e-03 +2024-08-06 21:23:21,269 INFO [trainer.py:765] (7/8) Epoch 31, batch 1500, train_loss[loss=3.361, NarTop10Accuracy=0.6566, over 6351.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7132, over 5943.46 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 21:23:49,005 INFO [trainer.py:765] (7/8) Epoch 31, batch 1600, train_loss[loss=3.261, NarTop10Accuracy=0.6676, over 7056.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7133, over 5922.80 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 21:24:15,512 INFO [trainer.py:765] (7/8) Epoch 31, batch 1700, train_loss[loss=3.277, NarTop10Accuracy=0.6694, over 6195.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.7135, over 5895.52 frames. ], batch size: 13, lr: 2.74e-03 +2024-08-06 21:24:41,996 INFO [trainer.py:765] (7/8) Epoch 31, batch 1800, train_loss[loss=2.851, NarTop10Accuracy=0.7572, over 7140.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7149, over 5969.53 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 21:25:08,357 INFO [trainer.py:765] (7/8) Epoch 31, batch 1900, train_loss[loss=3.166, NarTop10Accuracy=0.6898, over 6372.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7134, over 6011.33 frames. ], batch size: 51, lr: 2.74e-03 +2024-08-06 21:25:33,773 INFO [trainer.py:765] (7/8) Epoch 31, batch 2000, train_loss[loss=3.059, NarTop10Accuracy=0.7199, over 5613.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7147, over 5997.85 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 21:25:59,106 INFO [trainer.py:765] (7/8) Epoch 31, batch 2100, train_loss[loss=2.726, NarTop10Accuracy=0.7792, over 3903.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7157, over 5981.71 frames. ], batch size: 4, lr: 2.73e-03 +2024-08-06 21:26:24,238 INFO [trainer.py:765] (7/8) Epoch 31, batch 2200, train_loss[loss=2.942, NarTop10Accuracy=0.7359, over 7050.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7178, over 6022.25 frames. ], batch size: 31, lr: 2.73e-03 +2024-08-06 21:26:49,322 INFO [trainer.py:765] (7/8) Epoch 31, batch 2300, train_loss[loss=2.785, NarTop10Accuracy=0.7659, over 5628.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7139, over 6026.59 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 21:27:13,608 INFO [trainer.py:765] (7/8) Epoch 31, batch 2400, train_loss[loss=2.835, NarTop10Accuracy=0.7507, over 5097.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7153, over 5784.11 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 21:27:37,027 INFO [trainer.py:765] (7/8) Epoch 31, batch 2500, train_loss[loss=2.995, NarTop10Accuracy=0.7351, over 5082.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7178, over 5491.06 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 21:27:57,305 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 21:28:49,393 INFO [trainer.py:765] (7/8) Epoch 32, batch 100, train_loss[loss=2.926, NarTop10Accuracy=0.7467, over 7299.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7131, over 2373.50 frames. ], batch size: 31, lr: 2.68e-03 +2024-08-06 21:29:08,161 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 21:29:16,392 INFO [trainer.py:811] (7/8) Epoch 32, validation: loss=2.919, NarTop10Accuracy=0.7409, over 1905321.00 frames. +2024-08-06 21:29:16,393 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 21:29:16,939 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.842e+02 2.253e+02 2.413e+02 2.600e+02 5.680e+02, threshold=4.826e+02, percent-clipped=0.1 +2024-08-06 21:29:32,273 INFO [trainer.py:765] (7/8) Epoch 32, batch 200, train_loss[loss=3.227, NarTop10Accuracy=0.6725, over 6948.00 frames. ], tot_loss[loss=3.068, NarTop10Accuracy=0.7117, over 3867.85 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 21:30:05,278 INFO [trainer.py:765] (7/8) Epoch 32, batch 300, train_loss[loss=3.029, NarTop10Accuracy=0.7196, over 7050.00 frames. ], tot_loss[loss=3.048, NarTop10Accuracy=0.7161, over 4665.13 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 21:30:34,103 INFO [trainer.py:765] (7/8) Epoch 32, batch 400, train_loss[loss=2.789, NarTop10Accuracy=0.7694, over 5100.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7129, over 5118.66 frames. ], batch size: 7, lr: 2.68e-03 +2024-08-06 21:31:13,530 INFO [trainer.py:765] (7/8) Epoch 32, batch 500, train_loss[loss=2.953, NarTop10Accuracy=0.7297, over 6030.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7155, over 5390.55 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 21:31:42,486 INFO [trainer.py:765] (7/8) Epoch 32, batch 600, train_loss[loss=3.203, NarTop10Accuracy=0.6876, over 5901.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7163, over 5667.18 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 21:32:17,029 INFO [trainer.py:765] (7/8) Epoch 32, batch 700, train_loss[loss=2.717, NarTop10Accuracy=0.7833, over 4995.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7176, over 5739.24 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 21:33:00,646 INFO [trainer.py:765] (7/8) Epoch 32, batch 800, train_loss[loss=3.546, NarTop10Accuracy=0.6217, over 4170.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7173, over 5777.54 frames. ], batch size: 5, lr: 2.67e-03 +2024-08-06 21:33:28,991 INFO [trainer.py:765] (7/8) Epoch 32, batch 900, train_loss[loss=2.82, NarTop10Accuracy=0.7615, over 6198.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7191, over 5803.29 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 21:34:04,049 INFO [trainer.py:765] (7/8) Epoch 32, batch 1000, train_loss[loss=3.261, NarTop10Accuracy=0.6762, over 6591.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7159, over 5903.71 frames. ], batch size: 14, lr: 2.67e-03 +2024-08-06 21:34:46,674 INFO [trainer.py:765] (7/8) Epoch 32, batch 1100, train_loss[loss=3.222, NarTop10Accuracy=0.6764, over 6882.00 frames. ], tot_loss[loss=3.052, NarTop10Accuracy=0.7147, over 5930.84 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 21:35:18,172 INFO [trainer.py:765] (7/8) Epoch 32, batch 1200, train_loss[loss=3.248, NarTop10Accuracy=0.6764, over 7104.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7137, over 5938.04 frames. ], batch size: 31, lr: 2.66e-03 +2024-08-06 21:35:52,801 INFO [trainer.py:765] (7/8) Epoch 32, batch 1300, train_loss[loss=3.343, NarTop10Accuracy=0.651, over 5046.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7123, over 6001.77 frames. ], batch size: 6, lr: 2.66e-03 +2024-08-06 21:36:29,478 INFO [trainer.py:765] (7/8) Epoch 32, batch 1400, train_loss[loss=3.386, NarTop10Accuracy=0.6446, over 6054.00 frames. ], tot_loss[loss=3.065, NarTop10Accuracy=0.7121, over 6004.18 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 21:37:04,733 INFO [trainer.py:765] (7/8) Epoch 32, batch 1500, train_loss[loss=3.465, NarTop10Accuracy=0.633, over 5976.00 frames. ], tot_loss[loss=3.06, NarTop10Accuracy=0.713, over 5945.07 frames. ], batch size: 51, lr: 2.66e-03 +2024-08-06 21:37:32,521 INFO [trainer.py:765] (7/8) Epoch 32, batch 1600, train_loss[loss=3.042, NarTop10Accuracy=0.7117, over 6975.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7142, over 5917.70 frames. ], batch size: 22, lr: 2.66e-03 +2024-08-06 21:37:59,160 INFO [trainer.py:765] (7/8) Epoch 32, batch 1700, train_loss[loss=3.096, NarTop10Accuracy=0.7082, over 6696.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7145, over 5912.66 frames. ], batch size: 14, lr: 2.65e-03 +2024-08-06 21:38:25,703 INFO [trainer.py:765] (7/8) Epoch 32, batch 1800, train_loss[loss=3.081, NarTop10Accuracy=0.7178, over 7125.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7144, over 5980.91 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 21:38:52,170 INFO [trainer.py:765] (7/8) Epoch 32, batch 1900, train_loss[loss=3.075, NarTop10Accuracy=0.6994, over 5730.00 frames. ], tot_loss[loss=3.072, NarTop10Accuracy=0.7106, over 6019.43 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 21:39:17,769 INFO [trainer.py:765] (7/8) Epoch 32, batch 2000, train_loss[loss=3.51, NarTop10Accuracy=0.6253, over 5997.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7137, over 5985.46 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 21:39:43,179 INFO [trainer.py:765] (7/8) Epoch 32, batch 2100, train_loss[loss=2.699, NarTop10Accuracy=0.7882, over 4836.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7144, over 5961.46 frames. ], batch size: 5, lr: 2.65e-03 +2024-08-06 21:39:54,782 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 21:40:02,941 INFO [trainer.py:811] (7/8) Epoch 32, validation: loss=2.886, NarTop10Accuracy=0.7482, over 1905321.00 frames. +2024-08-06 21:40:02,942 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 21:40:03,423 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.874e+02 2.278e+02 2.449e+02 2.609e+02 8.207e+02, threshold=4.898e+02, percent-clipped=0.3 +2024-08-06 21:40:16,629 INFO [trainer.py:765] (7/8) Epoch 32, batch 2200, train_loss[loss=3.04, NarTop10Accuracy=0.7169, over 7242.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7142, over 6006.19 frames. ], batch size: 31, lr: 2.65e-03 +2024-08-06 21:40:41,717 INFO [trainer.py:765] (7/8) Epoch 32, batch 2300, train_loss[loss=3.303, NarTop10Accuracy=0.6646, over 5691.00 frames. ], tot_loss[loss=3.081, NarTop10Accuracy=0.7097, over 6009.73 frames. ], batch size: 9, lr: 2.65e-03 +2024-08-06 21:41:06,072 INFO [trainer.py:765] (7/8) Epoch 32, batch 2400, train_loss[loss=3.216, NarTop10Accuracy=0.6758, over 5070.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7134, over 5781.42 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:29,538 INFO [trainer.py:765] (7/8) Epoch 32, batch 2500, train_loss[loss=2.741, NarTop10Accuracy=0.7778, over 5184.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.7207, over 5461.12 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 21:41:49,414 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 21:42:47,615 INFO [trainer.py:765] (7/8) Epoch 33, batch 100, train_loss[loss=3.004, NarTop10Accuracy=0.7183, over 7344.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7227, over 2367.45 frames. ], batch size: 31, lr: 2.60e-03 +2024-08-06 21:43:22,368 INFO [trainer.py:765] (7/8) Epoch 33, batch 200, train_loss[loss=2.833, NarTop10Accuracy=0.7635, over 7017.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7198, over 3849.85 frames. ], batch size: 18, lr: 2.60e-03 +2024-08-06 21:43:56,513 INFO [trainer.py:765] (7/8) Epoch 33, batch 300, train_loss[loss=3.417, NarTop10Accuracy=0.6426, over 6948.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7154, over 4642.62 frames. ], batch size: 22, lr: 2.60e-03 +2024-08-06 21:44:30,316 INFO [trainer.py:765] (7/8) Epoch 33, batch 400, train_loss[loss=2.915, NarTop10Accuracy=0.7426, over 5097.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7167, over 5095.28 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 21:45:02,870 INFO [trainer.py:765] (7/8) Epoch 33, batch 500, train_loss[loss=2.779, NarTop10Accuracy=0.7731, over 5949.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7208, over 5380.06 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 21:45:36,226 INFO [trainer.py:765] (7/8) Epoch 33, batch 600, train_loss[loss=3.48, NarTop10Accuracy=0.6282, over 5736.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7143, over 5653.67 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 21:46:11,317 INFO [trainer.py:765] (7/8) Epoch 33, batch 700, train_loss[loss=2.762, NarTop10Accuracy=0.7782, over 4920.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7137, over 5708.30 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:46:46,169 INFO [trainer.py:765] (7/8) Epoch 33, batch 800, train_loss[loss=2.573, NarTop10Accuracy=0.8067, over 4977.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7153, over 5790.89 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 21:47:18,908 INFO [trainer.py:765] (7/8) Epoch 33, batch 900, train_loss[loss=3.28, NarTop10Accuracy=0.669, over 6201.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.713, over 5796.95 frames. ], batch size: 13, lr: 2.59e-03 +2024-08-06 21:47:57,316 INFO [trainer.py:765] (7/8) Epoch 33, batch 1000, train_loss[loss=2.978, NarTop10Accuracy=0.7267, over 6240.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7131, over 5896.52 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 21:48:30,908 INFO [trainer.py:765] (7/8) Epoch 33, batch 1100, train_loss[loss=2.833, NarTop10Accuracy=0.7679, over 6708.00 frames. ], tot_loss[loss=3.074, NarTop10Accuracy=0.7101, over 5939.64 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 21:49:06,660 INFO [trainer.py:765] (7/8) Epoch 33, batch 1200, train_loss[loss=2.911, NarTop10Accuracy=0.7409, over 7311.00 frames. ], tot_loss[loss=3.061, NarTop10Accuracy=0.7125, over 5920.57 frames. ], batch size: 31, lr: 2.58e-03 +2024-08-06 21:49:42,816 INFO [trainer.py:765] (7/8) Epoch 33, batch 1300, train_loss[loss=2.934, NarTop10Accuracy=0.7397, over 5061.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7133, over 5986.05 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 21:50:17,310 INFO [trainer.py:765] (7/8) Epoch 33, batch 1400, train_loss[loss=3.201, NarTop10Accuracy=0.6814, over 6063.00 frames. ], tot_loss[loss=3.063, NarTop10Accuracy=0.7125, over 6011.37 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 21:50:45,370 INFO [trainer.py:765] (7/8) Epoch 33, batch 1500, train_loss[loss=2.96, NarTop10Accuracy=0.7292, over 5757.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7143, over 5934.06 frames. ], batch size: 50, lr: 2.58e-03 +2024-08-06 21:51:04,607 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 21:51:12,662 INFO [trainer.py:811] (7/8) Epoch 33, validation: loss=2.938, NarTop10Accuracy=0.7372, over 1905321.00 frames. +2024-08-06 21:51:12,662 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 21:51:13,180 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.834e+02 2.250e+02 2.409e+02 2.586e+02 3.975e+02, threshold=4.818e+02, percent-clipped=0.0 +2024-08-06 21:51:21,261 INFO [trainer.py:765] (7/8) Epoch 33, batch 1600, train_loss[loss=3.215, NarTop10Accuracy=0.6845, over 6966.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7168, over 5922.62 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:51:47,922 INFO [trainer.py:765] (7/8) Epoch 33, batch 1700, train_loss[loss=2.707, NarTop10Accuracy=0.795, over 6186.00 frames. ], tot_loss[loss=3.056, NarTop10Accuracy=0.7138, over 5906.55 frames. ], batch size: 13, lr: 2.57e-03 +2024-08-06 21:52:14,392 INFO [trainer.py:765] (7/8) Epoch 33, batch 1800, train_loss[loss=2.833, NarTop10Accuracy=0.7599, over 7104.00 frames. ], tot_loss[loss=3.053, NarTop10Accuracy=0.7146, over 5975.83 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 21:52:40,856 INFO [trainer.py:765] (7/8) Epoch 33, batch 1900, train_loss[loss=3.522, NarTop10Accuracy=0.6217, over 6045.00 frames. ], tot_loss[loss=3.073, NarTop10Accuracy=0.7106, over 6021.18 frames. ], batch size: 51, lr: 2.57e-03 +2024-08-06 21:53:06,352 INFO [trainer.py:765] (7/8) Epoch 33, batch 2000, train_loss[loss=3.477, NarTop10Accuracy=0.6294, over 6237.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7169, over 5980.85 frames. ], batch size: 51, lr: 2.57e-03 +2024-08-06 21:53:31,658 INFO [trainer.py:765] (7/8) Epoch 33, batch 2100, train_loss[loss=3.393, NarTop10Accuracy=0.6405, over 4827.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7153, over 5960.59 frames. ], batch size: 5, lr: 2.57e-03 +2024-08-06 21:53:56,890 INFO [trainer.py:765] (7/8) Epoch 33, batch 2200, train_loss[loss=3.423, NarTop10Accuracy=0.6417, over 7323.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7137, over 5995.44 frames. ], batch size: 31, lr: 2.57e-03 +2024-08-06 21:54:21,990 INFO [trainer.py:765] (7/8) Epoch 33, batch 2300, train_loss[loss=2.878, NarTop10Accuracy=0.7543, over 5730.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7143, over 6007.08 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 21:54:46,429 INFO [trainer.py:765] (7/8) Epoch 33, batch 2400, train_loss[loss=2.739, NarTop10Accuracy=0.7745, over 5073.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7185, over 5782.94 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:09,862 INFO [trainer.py:765] (7/8) Epoch 33, batch 2500, train_loss[loss=2.73, NarTop10Accuracy=0.7813, over 5073.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7221, over 5473.95 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 21:55:29,695 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 21:56:24,721 INFO [trainer.py:765] (7/8) Epoch 34, batch 100, train_loss[loss=3.395, NarTop10Accuracy=0.6403, over 7227.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7164, over 2369.93 frames. ], batch size: 31, lr: 2.52e-03 +2024-08-06 21:56:55,613 INFO [trainer.py:765] (7/8) Epoch 34, batch 200, train_loss[loss=3.112, NarTop10Accuracy=0.7052, over 6777.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7217, over 3875.44 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 21:57:31,777 INFO [trainer.py:765] (7/8) Epoch 34, batch 300, train_loss[loss=2.82, NarTop10Accuracy=0.765, over 7380.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7192, over 4670.75 frames. ], batch size: 23, lr: 2.52e-03 +2024-08-06 21:58:02,724 INFO [trainer.py:765] (7/8) Epoch 34, batch 400, train_loss[loss=3.099, NarTop10Accuracy=0.7112, over 5124.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7234, over 5098.39 frames. ], batch size: 7, lr: 2.52e-03 +2024-08-06 21:58:34,690 INFO [trainer.py:765] (7/8) Epoch 34, batch 500, train_loss[loss=3.198, NarTop10Accuracy=0.6873, over 6201.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7207, over 5377.95 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 21:59:09,616 INFO [trainer.py:765] (7/8) Epoch 34, batch 600, train_loss[loss=2.894, NarTop10Accuracy=0.7427, over 5691.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7191, over 5648.95 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 21:59:46,056 INFO [trainer.py:765] (7/8) Epoch 34, batch 700, train_loss[loss=3.169, NarTop10Accuracy=0.6918, over 5010.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7179, over 5716.17 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:17,575 INFO [trainer.py:765] (7/8) Epoch 34, batch 800, train_loss[loss=2.856, NarTop10Accuracy=0.7543, over 5106.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.72, over 5763.49 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 22:00:49,874 INFO [trainer.py:765] (7/8) Epoch 34, batch 900, train_loss[loss=2.901, NarTop10Accuracy=0.7474, over 6258.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.72, over 5790.58 frames. ], batch size: 13, lr: 2.51e-03 +2024-08-06 22:01:25,338 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 22:01:33,386 INFO [trainer.py:811] (7/8) Epoch 34, validation: loss=2.9, NarTop10Accuracy=0.7444, over 1905321.00 frames. +2024-08-06 22:01:33,387 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 22:01:34,091 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.259e+02 2.434e+02 2.615e+02 5.125e+02, threshold=4.868e+02, percent-clipped=0.1 +2024-08-06 22:01:35,624 INFO [trainer.py:765] (7/8) Epoch 34, batch 1000, train_loss[loss=3.454, NarTop10Accuracy=0.6373, over 6291.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7171, over 5913.30 frames. ], batch size: 13, lr: 2.51e-03 +2024-08-06 22:02:10,830 INFO [trainer.py:765] (7/8) Epoch 34, batch 1100, train_loss[loss=3.221, NarTop10Accuracy=0.6749, over 6942.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7153, over 5947.16 frames. ], batch size: 17, lr: 2.51e-03 +2024-08-06 22:02:46,786 INFO [trainer.py:765] (7/8) Epoch 34, batch 1200, train_loss[loss=2.929, NarTop10Accuracy=0.7474, over 7257.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7172, over 5943.83 frames. ], batch size: 31, lr: 2.50e-03 +2024-08-06 22:03:20,814 INFO [trainer.py:765] (7/8) Epoch 34, batch 1300, train_loss[loss=2.66, NarTop10Accuracy=0.7867, over 5007.00 frames. ], tot_loss[loss=3.041, NarTop10Accuracy=0.7175, over 5987.30 frames. ], batch size: 6, lr: 2.50e-03 +2024-08-06 22:03:52,949 INFO [trainer.py:765] (7/8) Epoch 34, batch 1400, train_loss[loss=3.278, NarTop10Accuracy=0.6647, over 5997.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7178, over 5994.47 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 22:04:20,822 INFO [trainer.py:765] (7/8) Epoch 34, batch 1500, train_loss[loss=3.028, NarTop10Accuracy=0.7168, over 6453.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7177, over 5925.81 frames. ], batch size: 50, lr: 2.50e-03 +2024-08-06 22:04:48,600 INFO [trainer.py:765] (7/8) Epoch 34, batch 1600, train_loss[loss=2.864, NarTop10Accuracy=0.7462, over 7269.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.716, over 5900.95 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 22:05:15,241 INFO [trainer.py:765] (7/8) Epoch 34, batch 1700, train_loss[loss=3.109, NarTop10Accuracy=0.7041, over 6183.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7175, over 5902.94 frames. ], batch size: 13, lr: 2.50e-03 +2024-08-06 22:05:41,720 INFO [trainer.py:765] (7/8) Epoch 34, batch 1800, train_loss[loss=3.347, NarTop10Accuracy=0.6595, over 7122.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7165, over 5977.33 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 22:06:08,207 INFO [trainer.py:765] (7/8) Epoch 34, batch 1900, train_loss[loss=3.187, NarTop10Accuracy=0.6934, over 5769.00 frames. ], tot_loss[loss=3.064, NarTop10Accuracy=0.7125, over 6016.52 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 22:06:33,770 INFO [trainer.py:765] (7/8) Epoch 34, batch 2000, train_loss[loss=3.071, NarTop10Accuracy=0.7131, over 5952.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7158, over 5982.59 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 22:06:59,126 INFO [trainer.py:765] (7/8) Epoch 34, batch 2100, train_loss[loss=3.101, NarTop10Accuracy=0.6982, over 3879.00 frames. ], tot_loss[loss=3.055, NarTop10Accuracy=0.714, over 5956.93 frames. ], batch size: 4, lr: 2.49e-03 +2024-08-06 22:07:24,398 INFO [trainer.py:765] (7/8) Epoch 34, batch 2200, train_loss[loss=2.953, NarTop10Accuracy=0.742, over 7182.00 frames. ], tot_loss[loss=3.058, NarTop10Accuracy=0.7132, over 5990.43 frames. ], batch size: 31, lr: 2.49e-03 +2024-08-06 22:07:49,535 INFO [trainer.py:765] (7/8) Epoch 34, batch 2300, train_loss[loss=2.757, NarTop10Accuracy=0.7797, over 5715.00 frames. ], tot_loss[loss=3.059, NarTop10Accuracy=0.7135, over 6013.13 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 22:08:14,059 INFO [trainer.py:765] (7/8) Epoch 34, batch 2400, train_loss[loss=3.282, NarTop10Accuracy=0.6612, over 5058.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.714, over 5756.24 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:37,648 INFO [trainer.py:765] (7/8) Epoch 34, batch 2500, train_loss[loss=2.757, NarTop10Accuracy=0.7772, over 5106.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7205, over 5461.99 frames. ], batch size: 7, lr: 2.49e-03 +2024-08-06 22:08:57,311 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 22:09:52,640 INFO [trainer.py:765] (7/8) Epoch 35, batch 100, train_loss[loss=2.868, NarTop10Accuracy=0.7509, over 7587.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.717, over 2381.69 frames. ], batch size: 32, lr: 2.45e-03 +2024-08-06 22:10:29,697 INFO [trainer.py:765] (7/8) Epoch 35, batch 200, train_loss[loss=3.144, NarTop10Accuracy=0.6942, over 6921.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.7152, over 3850.19 frames. ], batch size: 17, lr: 2.45e-03 +2024-08-06 22:11:04,942 INFO [trainer.py:765] (7/8) Epoch 35, batch 300, train_loss[loss=2.868, NarTop10Accuracy=0.7586, over 6918.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.719, over 4649.18 frames. ], batch size: 22, lr: 2.44e-03 +2024-08-06 22:11:35,333 INFO [trainer.py:765] (7/8) Epoch 35, batch 400, train_loss[loss=3.002, NarTop10Accuracy=0.7258, over 5094.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7195, over 5089.80 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 22:11:40,047 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 22:11:48,129 INFO [trainer.py:811] (7/8) Epoch 35, validation: loss=2.84, NarTop10Accuracy=0.7576, over 1905321.00 frames. +2024-08-06 22:11:48,130 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 22:11:48,702 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.275e+02 2.426e+02 2.615e+02 4.095e+02, threshold=4.852e+02, percent-clipped=0.0 +2024-08-06 22:12:17,723 INFO [trainer.py:765] (7/8) Epoch 35, batch 500, train_loss[loss=2.69, NarTop10Accuracy=0.7938, over 6156.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7212, over 5390.71 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 22:12:51,425 INFO [trainer.py:765] (7/8) Epoch 35, batch 600, train_loss[loss=3.312, NarTop10Accuracy=0.6562, over 5757.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7176, over 5648.42 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 22:13:24,941 INFO [trainer.py:765] (7/8) Epoch 35, batch 700, train_loss[loss=2.56, NarTop10Accuracy=0.8134, over 4212.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7174, over 5700.28 frames. ], batch size: 5, lr: 2.44e-03 +2024-08-06 22:14:01,384 INFO [trainer.py:765] (7/8) Epoch 35, batch 800, train_loss[loss=2.841, NarTop10Accuracy=0.7628, over 5154.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7162, over 5773.98 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 22:14:34,372 INFO [trainer.py:765] (7/8) Epoch 35, batch 900, train_loss[loss=3.272, NarTop10Accuracy=0.6703, over 6684.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7194, over 5805.15 frames. ], batch size: 14, lr: 2.44e-03 +2024-08-06 22:15:09,372 INFO [trainer.py:765] (7/8) Epoch 35, batch 1000, train_loss[loss=2.826, NarTop10Accuracy=0.7549, over 6183.00 frames. ], tot_loss[loss=3.042, NarTop10Accuracy=0.7165, over 5900.18 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 22:15:48,495 INFO [trainer.py:765] (7/8) Epoch 35, batch 1100, train_loss[loss=3.049, NarTop10Accuracy=0.7174, over 6801.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7162, over 5956.17 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 22:16:22,484 INFO [trainer.py:765] (7/8) Epoch 35, batch 1200, train_loss[loss=2.876, NarTop10Accuracy=0.7459, over 6858.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.719, over 5941.31 frames. ], batch size: 31, lr: 2.43e-03 +2024-08-06 22:16:57,060 INFO [trainer.py:765] (7/8) Epoch 35, batch 1300, train_loss[loss=2.895, NarTop10Accuracy=0.7432, over 5037.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7202, over 6012.60 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 22:17:31,061 INFO [trainer.py:765] (7/8) Epoch 35, batch 1400, train_loss[loss=3.167, NarTop10Accuracy=0.6973, over 6132.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7175, over 6012.71 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 22:18:03,062 INFO [trainer.py:765] (7/8) Epoch 35, batch 1500, train_loss[loss=3.084, NarTop10Accuracy=0.7109, over 6159.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7178, over 5950.98 frames. ], batch size: 50, lr: 2.43e-03 +2024-08-06 22:18:30,728 INFO [trainer.py:765] (7/8) Epoch 35, batch 1600, train_loss[loss=2.946, NarTop10Accuracy=0.7388, over 7236.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7158, over 5924.79 frames. ], batch size: 23, lr: 2.43e-03 +2024-08-06 22:18:57,320 INFO [trainer.py:765] (7/8) Epoch 35, batch 1700, train_loss[loss=2.749, NarTop10Accuracy=0.782, over 6339.00 frames. ], tot_loss[loss=3.051, NarTop10Accuracy=0.715, over 5910.95 frames. ], batch size: 13, lr: 2.42e-03 +2024-08-06 22:19:23,703 INFO [trainer.py:765] (7/8) Epoch 35, batch 1800, train_loss[loss=3.423, NarTop10Accuracy=0.6366, over 6780.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7166, over 5971.66 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 22:19:50,201 INFO [trainer.py:765] (7/8) Epoch 35, batch 1900, train_loss[loss=3.213, NarTop10Accuracy=0.6807, over 6285.00 frames. ], tot_loss[loss=3.054, NarTop10Accuracy=0.7143, over 6018.41 frames. ], batch size: 52, lr: 2.42e-03 +2024-08-06 22:20:15,762 INFO [trainer.py:765] (7/8) Epoch 35, batch 2000, train_loss[loss=3.151, NarTop10Accuracy=0.7039, over 5703.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7174, over 5992.14 frames. ], batch size: 50, lr: 2.42e-03 +2024-08-06 22:20:41,045 INFO [trainer.py:765] (7/8) Epoch 35, batch 2100, train_loss[loss=2.495, NarTop10Accuracy=0.8225, over 3858.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7171, over 5975.90 frames. ], batch size: 4, lr: 2.42e-03 +2024-08-06 22:21:06,226 INFO [trainer.py:765] (7/8) Epoch 35, batch 2200, train_loss[loss=2.943, NarTop10Accuracy=0.74, over 7560.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7161, over 6008.86 frames. ], batch size: 32, lr: 2.42e-03 +2024-08-06 22:21:31,286 INFO [trainer.py:765] (7/8) Epoch 35, batch 2300, train_loss[loss=3.001, NarTop10Accuracy=0.7259, over 5784.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7151, over 6022.91 frames. ], batch size: 9, lr: 2.42e-03 +2024-08-06 22:21:55,648 INFO [trainer.py:765] (7/8) Epoch 35, batch 2400, train_loss[loss=3.082, NarTop10Accuracy=0.6999, over 5097.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7174, over 5772.33 frames. ], batch size: 7, lr: 2.42e-03 +2024-08-06 22:21:59,681 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 22:22:07,656 INFO [trainer.py:811] (7/8) Epoch 35, validation: loss=2.905, NarTop10Accuracy=0.7437, over 1905321.00 frames. +2024-08-06 22:22:07,657 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 22:22:08,116 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.895e+02 2.316e+02 2.462e+02 2.653e+02 5.566e+02, threshold=4.923e+02, percent-clipped=0.1 +2024-08-06 22:22:27,128 INFO [trainer.py:765] (7/8) Epoch 35, batch 2500, train_loss[loss=3.031, NarTop10Accuracy=0.7234, over 5274.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7203, over 5457.30 frames. ], batch size: 7, lr: 2.41e-03 +2024-08-06 22:22:46,832 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 22:23:47,171 INFO [trainer.py:765] (7/8) Epoch 36, batch 100, train_loss[loss=3.149, NarTop10Accuracy=0.6958, over 7503.00 frames. ], tot_loss[loss=2.99, NarTop10Accuracy=0.7278, over 2367.45 frames. ], batch size: 31, lr: 2.38e-03 +2024-08-06 22:24:22,494 INFO [trainer.py:765] (7/8) Epoch 36, batch 200, train_loss[loss=2.816, NarTop10Accuracy=0.7594, over 6831.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7219, over 3851.26 frames. ], batch size: 17, lr: 2.38e-03 +2024-08-06 22:24:54,721 INFO [trainer.py:765] (7/8) Epoch 36, batch 300, train_loss[loss=3.256, NarTop10Accuracy=0.675, over 6984.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7199, over 4664.03 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 22:25:29,275 INFO [trainer.py:765] (7/8) Epoch 36, batch 400, train_loss[loss=2.938, NarTop10Accuracy=0.7314, over 5073.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7223, over 5120.57 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 22:26:01,818 INFO [trainer.py:765] (7/8) Epoch 36, batch 500, train_loss[loss=3.425, NarTop10Accuracy=0.6391, over 6180.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7217, over 5386.72 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 22:26:35,025 INFO [trainer.py:765] (7/8) Epoch 36, batch 600, train_loss[loss=2.927, NarTop10Accuracy=0.7352, over 5655.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7225, over 5662.41 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 22:27:10,990 INFO [trainer.py:765] (7/8) Epoch 36, batch 700, train_loss[loss=3.266, NarTop10Accuracy=0.6563, over 5019.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7216, over 5734.71 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 22:27:44,915 INFO [trainer.py:765] (7/8) Epoch 36, batch 800, train_loss[loss=3.251, NarTop10Accuracy=0.6743, over 4998.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.719, over 5803.00 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 22:28:17,812 INFO [trainer.py:765] (7/8) Epoch 36, batch 900, train_loss[loss=2.686, NarTop10Accuracy=0.7824, over 6141.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7216, over 5813.04 frames. ], batch size: 13, lr: 2.37e-03 +2024-08-06 22:28:56,983 INFO [trainer.py:765] (7/8) Epoch 36, batch 1000, train_loss[loss=3.379, NarTop10Accuracy=0.6516, over 6639.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7202, over 5912.65 frames. ], batch size: 14, lr: 2.37e-03 +2024-08-06 22:29:29,364 INFO [trainer.py:765] (7/8) Epoch 36, batch 1100, train_loss[loss=2.937, NarTop10Accuracy=0.7376, over 7011.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7196, over 5935.35 frames. ], batch size: 17, lr: 2.36e-03 +2024-08-06 22:30:05,680 INFO [trainer.py:765] (7/8) Epoch 36, batch 1200, train_loss[loss=3.135, NarTop10Accuracy=0.6986, over 7029.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7214, over 5949.10 frames. ], batch size: 31, lr: 2.36e-03 +2024-08-06 22:30:42,576 INFO [trainer.py:765] (7/8) Epoch 36, batch 1300, train_loss[loss=2.805, NarTop10Accuracy=0.7738, over 5139.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7192, over 6011.70 frames. ], batch size: 6, lr: 2.36e-03 +2024-08-06 22:31:15,938 INFO [trainer.py:765] (7/8) Epoch 36, batch 1400, train_loss[loss=3.066, NarTop10Accuracy=0.7125, over 6057.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7219, over 6026.03 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 22:31:43,748 INFO [trainer.py:765] (7/8) Epoch 36, batch 1500, train_loss[loss=3.37, NarTop10Accuracy=0.6549, over 5781.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7194, over 5954.20 frames. ], batch size: 50, lr: 2.36e-03 +2024-08-06 22:32:11,459 INFO [trainer.py:765] (7/8) Epoch 36, batch 1600, train_loss[loss=3.461, NarTop10Accuracy=0.6331, over 6867.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7198, over 5939.52 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 22:32:38,108 INFO [trainer.py:765] (7/8) Epoch 36, batch 1700, train_loss[loss=3.334, NarTop10Accuracy=0.6559, over 6633.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.717, over 5914.78 frames. ], batch size: 14, lr: 2.36e-03 +2024-08-06 22:33:04,554 INFO [trainer.py:765] (7/8) Epoch 36, batch 1800, train_loss[loss=3.031, NarTop10Accuracy=0.7101, over 7200.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7179, over 5975.87 frames. ], batch size: 23, lr: 2.36e-03 +2024-08-06 22:33:15,170 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 22:33:23,567 INFO [trainer.py:811] (7/8) Epoch 36, validation: loss=2.897, NarTop10Accuracy=0.7457, over 1905321.00 frames. +2024-08-06 22:33:23,568 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 22:33:24,096 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.876e+02 2.309e+02 2.476e+02 2.664e+02 4.811e+02, threshold=4.951e+02, percent-clipped=0.0 +2024-08-06 22:33:39,456 INFO [trainer.py:765] (7/8) Epoch 36, batch 1900, train_loss[loss=2.985, NarTop10Accuracy=0.7314, over 6270.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7182, over 6009.02 frames. ], batch size: 51, lr: 2.35e-03 +2024-08-06 22:34:05,077 INFO [trainer.py:765] (7/8) Epoch 36, batch 2000, train_loss[loss=3.247, NarTop10Accuracy=0.6822, over 5676.00 frames. ], tot_loss[loss=3.032, NarTop10Accuracy=0.7194, over 5995.75 frames. ], batch size: 50, lr: 2.35e-03 +2024-08-06 22:34:30,515 INFO [trainer.py:765] (7/8) Epoch 36, batch 2100, train_loss[loss=2.727, NarTop10Accuracy=0.7776, over 4929.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7199, over 5969.94 frames. ], batch size: 5, lr: 2.35e-03 +2024-08-06 22:34:55,938 INFO [trainer.py:765] (7/8) Epoch 36, batch 2200, train_loss[loss=3.381, NarTop10Accuracy=0.6481, over 7278.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7165, over 6012.86 frames. ], batch size: 31, lr: 2.35e-03 +2024-08-06 22:35:21,146 INFO [trainer.py:765] (7/8) Epoch 36, batch 2300, train_loss[loss=3.267, NarTop10Accuracy=0.6713, over 5781.00 frames. ], tot_loss[loss=3.062, NarTop10Accuracy=0.7133, over 6012.03 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 22:35:45,600 INFO [trainer.py:765] (7/8) Epoch 36, batch 2400, train_loss[loss=3.132, NarTop10Accuracy=0.697, over 5067.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.716, over 5773.03 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:09,182 INFO [trainer.py:765] (7/8) Epoch 36, batch 2500, train_loss[loss=2.914, NarTop10Accuracy=0.7443, over 5055.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7207, over 5465.66 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 22:36:29,006 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 22:37:29,726 INFO [trainer.py:765] (7/8) Epoch 37, batch 100, train_loss[loss=2.839, NarTop10Accuracy=0.7584, over 7320.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7179, over 2389.81 frames. ], batch size: 31, lr: 2.31e-03 +2024-08-06 22:38:01,273 INFO [trainer.py:765] (7/8) Epoch 37, batch 200, train_loss[loss=2.742, NarTop10Accuracy=0.7831, over 6696.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7218, over 3865.85 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 22:38:35,957 INFO [trainer.py:765] (7/8) Epoch 37, batch 300, train_loss[loss=3.247, NarTop10Accuracy=0.6869, over 7425.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7222, over 4647.48 frames. ], batch size: 23, lr: 2.31e-03 +2024-08-06 22:39:09,308 INFO [trainer.py:765] (7/8) Epoch 37, batch 400, train_loss[loss=2.744, NarTop10Accuracy=0.7722, over 5232.00 frames. ], tot_loss[loss=3.007, NarTop10Accuracy=0.724, over 5104.66 frames. ], batch size: 7, lr: 2.31e-03 +2024-08-06 22:39:43,862 INFO [trainer.py:765] (7/8) Epoch 37, batch 500, train_loss[loss=3.166, NarTop10Accuracy=0.6828, over 6102.00 frames. ], tot_loss[loss=3.007, NarTop10Accuracy=0.7243, over 5390.34 frames. ], batch size: 11, lr: 2.31e-03 +2024-08-06 22:40:17,334 INFO [trainer.py:765] (7/8) Epoch 37, batch 600, train_loss[loss=2.799, NarTop10Accuracy=0.766, over 5679.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7228, over 5638.64 frames. ], batch size: 9, lr: 2.31e-03 +2024-08-06 22:40:51,617 INFO [trainer.py:765] (7/8) Epoch 37, batch 700, train_loss[loss=3.122, NarTop10Accuracy=0.6967, over 5019.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7185, over 5705.81 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:41:30,566 INFO [trainer.py:765] (7/8) Epoch 37, batch 800, train_loss[loss=2.804, NarTop10Accuracy=0.7601, over 5067.00 frames. ], tot_loss[loss=3.039, NarTop10Accuracy=0.7176, over 5774.91 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 22:41:59,084 INFO [trainer.py:765] (7/8) Epoch 37, batch 900, train_loss[loss=2.866, NarTop10Accuracy=0.7523, over 6765.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7214, over 5783.03 frames. ], batch size: 14, lr: 2.30e-03 +2024-08-06 22:42:38,268 INFO [trainer.py:765] (7/8) Epoch 37, batch 1000, train_loss[loss=3.193, NarTop10Accuracy=0.6877, over 6624.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7181, over 5878.84 frames. ], batch size: 14, lr: 2.30e-03 +2024-08-06 22:43:15,907 INFO [trainer.py:765] (7/8) Epoch 37, batch 1100, train_loss[loss=2.879, NarTop10Accuracy=0.7451, over 6663.00 frames. ], tot_loss[loss=3.038, NarTop10Accuracy=0.7175, over 5935.60 frames. ], batch size: 17, lr: 2.30e-03 +2024-08-06 22:43:47,741 INFO [trainer.py:765] (7/8) Epoch 37, batch 1200, train_loss[loss=2.865, NarTop10Accuracy=0.7544, over 7266.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7163, over 5947.89 frames. ], batch size: 31, lr: 2.30e-03 +2024-08-06 22:44:11,755 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 22:44:20,075 INFO [trainer.py:811] (7/8) Epoch 37, validation: loss=2.92, NarTop10Accuracy=0.7407, over 1905321.00 frames. +2024-08-06 22:44:20,075 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 22:44:20,606 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.887e+02 2.309e+02 2.481e+02 2.647e+02 8.766e+02, threshold=4.961e+02, percent-clipped=0.1 +2024-08-06 22:44:32,784 INFO [trainer.py:765] (7/8) Epoch 37, batch 1300, train_loss[loss=2.704, NarTop10Accuracy=0.7823, over 4317.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7202, over 6006.19 frames. ], batch size: 5, lr: 2.30e-03 +2024-08-06 22:45:10,388 INFO [trainer.py:765] (7/8) Epoch 37, batch 1400, train_loss[loss=2.715, NarTop10Accuracy=0.7778, over 6102.00 frames. ], tot_loss[loss=3.026, NarTop10Accuracy=0.7202, over 6012.92 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 22:45:40,512 INFO [trainer.py:765] (7/8) Epoch 37, batch 1500, train_loss[loss=2.855, NarTop10Accuracy=0.7538, over 5712.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7178, over 5958.39 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 22:46:08,437 INFO [trainer.py:765] (7/8) Epoch 37, batch 1600, train_loss[loss=3.38, NarTop10Accuracy=0.6462, over 7152.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7162, over 5934.97 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 22:46:35,187 INFO [trainer.py:765] (7/8) Epoch 37, batch 1700, train_loss[loss=3.239, NarTop10Accuracy=0.6796, over 6165.00 frames. ], tot_loss[loss=3.043, NarTop10Accuracy=0.7164, over 5928.30 frames. ], batch size: 13, lr: 2.29e-03 +2024-08-06 22:47:01,793 INFO [trainer.py:765] (7/8) Epoch 37, batch 1800, train_loss[loss=2.799, NarTop10Accuracy=0.7715, over 7215.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7172, over 5997.18 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 22:47:28,311 INFO [trainer.py:765] (7/8) Epoch 37, batch 1900, train_loss[loss=3.101, NarTop10Accuracy=0.7144, over 6417.00 frames. ], tot_loss[loss=3.04, NarTop10Accuracy=0.7171, over 6035.92 frames. ], batch size: 51, lr: 2.29e-03 +2024-08-06 22:47:53,925 INFO [trainer.py:765] (7/8) Epoch 37, batch 2000, train_loss[loss=3.22, NarTop10Accuracy=0.6877, over 6159.00 frames. ], tot_loss[loss=3.031, NarTop10Accuracy=0.7189, over 6005.89 frames. ], batch size: 52, lr: 2.29e-03 +2024-08-06 22:48:19,325 INFO [trainer.py:765] (7/8) Epoch 37, batch 2100, train_loss[loss=2.877, NarTop10Accuracy=0.742, over 4749.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7177, over 5987.33 frames. ], batch size: 5, lr: 2.29e-03 +2024-08-06 22:48:44,707 INFO [trainer.py:765] (7/8) Epoch 37, batch 2200, train_loss[loss=2.886, NarTop10Accuracy=0.7494, over 6933.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.716, over 6033.42 frames. ], batch size: 31, lr: 2.29e-03 +2024-08-06 22:49:09,913 INFO [trainer.py:765] (7/8) Epoch 37, batch 2300, train_loss[loss=2.76, NarTop10Accuracy=0.7754, over 5622.00 frames. ], tot_loss[loss=3.044, NarTop10Accuracy=0.7162, over 6035.09 frames. ], batch size: 9, lr: 2.29e-03 +2024-08-06 22:49:34,319 INFO [trainer.py:765] (7/8) Epoch 37, batch 2400, train_loss[loss=3.081, NarTop10Accuracy=0.6997, over 5145.00 frames. ], tot_loss[loss=3.021, NarTop10Accuracy=0.7205, over 5762.22 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:49:57,860 INFO [trainer.py:765] (7/8) Epoch 37, batch 2500, train_loss[loss=3.082, NarTop10Accuracy=0.6956, over 5112.00 frames. ], tot_loss[loss=2.993, NarTop10Accuracy=0.7265, over 5471.79 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 22:50:18,049 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 22:51:16,152 INFO [trainer.py:765] (7/8) Epoch 38, batch 100, train_loss[loss=2.988, NarTop10Accuracy=0.7236, over 6948.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7235, over 2384.66 frames. ], batch size: 31, lr: 2.25e-03 +2024-08-06 22:51:53,015 INFO [trainer.py:765] (7/8) Epoch 38, batch 200, train_loss[loss=3.307, NarTop10Accuracy=0.6631, over 6900.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.721, over 3868.02 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 22:52:25,203 INFO [trainer.py:765] (7/8) Epoch 38, batch 300, train_loss[loss=2.836, NarTop10Accuracy=0.7658, over 7077.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7181, over 4664.84 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 22:52:55,627 INFO [trainer.py:765] (7/8) Epoch 38, batch 400, train_loss[loss=3.19, NarTop10Accuracy=0.685, over 5133.00 frames. ], tot_loss[loss=3.025, NarTop10Accuracy=0.72, over 5121.69 frames. ], batch size: 7, lr: 2.25e-03 +2024-08-06 22:53:32,230 INFO [trainer.py:765] (7/8) Epoch 38, batch 500, train_loss[loss=2.81, NarTop10Accuracy=0.7595, over 6459.00 frames. ], tot_loss[loss=2.993, NarTop10Accuracy=0.7266, over 5408.90 frames. ], batch size: 12, lr: 2.25e-03 +2024-08-06 22:54:05,498 INFO [trainer.py:765] (7/8) Epoch 38, batch 600, train_loss[loss=3.218, NarTop10Accuracy=0.6838, over 5757.00 frames. ], tot_loss[loss=3.008, NarTop10Accuracy=0.7237, over 5658.72 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 22:54:36,003 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 22:54:43,918 INFO [trainer.py:811] (7/8) Epoch 38, validation: loss=2.939, NarTop10Accuracy=0.7369, over 1905321.00 frames. +2024-08-06 22:54:43,919 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 22:54:44,427 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.313e+02 2.478e+02 2.663e+02 7.254e+02, threshold=4.957e+02, percent-clipped=0.3 +2024-08-06 22:54:46,659 INFO [trainer.py:765] (7/8) Epoch 38, batch 700, train_loss[loss=2.839, NarTop10Accuracy=0.7575, over 5094.00 frames. ], tot_loss[loss=3.007, NarTop10Accuracy=0.7239, over 5721.18 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:24,938 INFO [trainer.py:765] (7/8) Epoch 38, batch 800, train_loss[loss=3.01, NarTop10Accuracy=0.7223, over 4965.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.721, over 5782.18 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:55:59,703 INFO [trainer.py:765] (7/8) Epoch 38, batch 900, train_loss[loss=2.843, NarTop10Accuracy=0.7548, over 6156.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7221, over 5804.28 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 22:56:32,091 INFO [trainer.py:765] (7/8) Epoch 38, batch 1000, train_loss[loss=3.383, NarTop10Accuracy=0.6531, over 6282.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7214, over 5895.93 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 22:57:08,991 INFO [trainer.py:765] (7/8) Epoch 38, batch 1100, train_loss[loss=2.991, NarTop10Accuracy=0.7278, over 6828.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7186, over 5945.72 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 22:57:42,662 INFO [trainer.py:765] (7/8) Epoch 38, batch 1200, train_loss[loss=2.878, NarTop10Accuracy=0.759, over 7143.00 frames. ], tot_loss[loss=3.036, NarTop10Accuracy=0.7178, over 5934.53 frames. ], batch size: 31, lr: 2.24e-03 +2024-08-06 22:58:16,546 INFO [trainer.py:765] (7/8) Epoch 38, batch 1300, train_loss[loss=3.248, NarTop10Accuracy=0.6738, over 5010.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7184, over 5999.60 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 22:58:49,811 INFO [trainer.py:765] (7/8) Epoch 38, batch 1400, train_loss[loss=2.839, NarTop10Accuracy=0.7623, over 6126.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7147, over 6018.39 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 22:59:22,854 INFO [trainer.py:765] (7/8) Epoch 38, batch 1500, train_loss[loss=3.508, NarTop10Accuracy=0.6178, over 6147.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7185, over 5966.37 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 22:59:50,644 INFO [trainer.py:765] (7/8) Epoch 38, batch 1600, train_loss[loss=3.354, NarTop10Accuracy=0.6505, over 7293.00 frames. ], tot_loss[loss=3.037, NarTop10Accuracy=0.7178, over 5939.32 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 23:00:17,316 INFO [trainer.py:765] (7/8) Epoch 38, batch 1700, train_loss[loss=3.062, NarTop10Accuracy=0.7225, over 6114.00 frames. ], tot_loss[loss=3.057, NarTop10Accuracy=0.7137, over 5915.81 frames. ], batch size: 13, lr: 2.23e-03 +2024-08-06 23:00:43,764 INFO [trainer.py:765] (7/8) Epoch 38, batch 1800, train_loss[loss=3.285, NarTop10Accuracy=0.667, over 7305.00 frames. ], tot_loss[loss=3.049, NarTop10Accuracy=0.7155, over 5983.55 frames. ], batch size: 23, lr: 2.23e-03 +2024-08-06 23:01:10,192 INFO [trainer.py:765] (7/8) Epoch 38, batch 1900, train_loss[loss=3.352, NarTop10Accuracy=0.6619, over 6102.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7157, over 6030.61 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 23:01:35,681 INFO [trainer.py:765] (7/8) Epoch 38, batch 2000, train_loss[loss=3.366, NarTop10Accuracy=0.6517, over 6282.00 frames. ], tot_loss[loss=3.05, NarTop10Accuracy=0.7155, over 6000.76 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 23:02:01,051 INFO [trainer.py:765] (7/8) Epoch 38, batch 2100, train_loss[loss=2.755, NarTop10Accuracy=0.7629, over 3882.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.716, over 5962.01 frames. ], batch size: 4, lr: 2.23e-03 +2024-08-06 23:02:26,315 INFO [trainer.py:765] (7/8) Epoch 38, batch 2200, train_loss[loss=2.906, NarTop10Accuracy=0.7393, over 7251.00 frames. ], tot_loss[loss=3.046, NarTop10Accuracy=0.7157, over 6007.00 frames. ], batch size: 31, lr: 2.23e-03 +2024-08-06 23:02:51,420 INFO [trainer.py:765] (7/8) Epoch 38, batch 2300, train_loss[loss=2.697, NarTop10Accuracy=0.7813, over 5580.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7154, over 6030.91 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 23:03:16,348 INFO [trainer.py:765] (7/8) Epoch 38, batch 2400, train_loss[loss=2.686, NarTop10Accuracy=0.7845, over 5121.00 frames. ], tot_loss[loss=3.035, NarTop10Accuracy=0.7178, over 5783.36 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:39,824 INFO [trainer.py:765] (7/8) Epoch 38, batch 2500, train_loss[loss=3.468, NarTop10Accuracy=0.635, over 4953.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7219, over 5474.68 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 23:03:59,638 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 23:04:58,940 INFO [trainer.py:765] (7/8) Epoch 39, batch 100, train_loss[loss=3.36, NarTop10Accuracy=0.6515, over 6975.00 frames. ], tot_loss[loss=2.986, NarTop10Accuracy=0.7286, over 2367.07 frames. ], batch size: 31, lr: 2.19e-03 +2024-08-06 23:05:03,469 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 23:05:11,563 INFO [trainer.py:811] (7/8) Epoch 39, validation: loss=2.9, NarTop10Accuracy=0.7445, over 1905321.00 frames. +2024-08-06 23:05:11,564 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 23:05:12,137 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 2.316e+02 2.500e+02 2.688e+02 4.683e+02, threshold=5.001e+02, percent-clipped=0.0 +2024-08-06 23:05:40,163 INFO [trainer.py:765] (7/8) Epoch 39, batch 200, train_loss[loss=2.753, NarTop10Accuracy=0.7734, over 6852.00 frames. ], tot_loss[loss=2.993, NarTop10Accuracy=0.7266, over 3849.25 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 23:06:17,293 INFO [trainer.py:765] (7/8) Epoch 39, batch 300, train_loss[loss=3.091, NarTop10Accuracy=0.7142, over 7071.00 frames. ], tot_loss[loss=2.987, NarTop10Accuracy=0.7275, over 4661.97 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 23:06:48,276 INFO [trainer.py:765] (7/8) Epoch 39, batch 400, train_loss[loss=2.945, NarTop10Accuracy=0.7364, over 5115.00 frames. ], tot_loss[loss=2.985, NarTop10Accuracy=0.7284, over 5107.23 frames. ], batch size: 7, lr: 2.19e-03 +2024-08-06 23:07:19,175 INFO [trainer.py:765] (7/8) Epoch 39, batch 500, train_loss[loss=3.41, NarTop10Accuracy=0.6436, over 6078.00 frames. ], tot_loss[loss=2.997, NarTop10Accuracy=0.7257, over 5389.84 frames. ], batch size: 11, lr: 2.19e-03 +2024-08-06 23:07:52,563 INFO [trainer.py:765] (7/8) Epoch 39, batch 600, train_loss[loss=2.665, NarTop10Accuracy=0.7903, over 5712.00 frames. ], tot_loss[loss=3.014, NarTop10Accuracy=0.7223, over 5660.59 frames. ], batch size: 9, lr: 2.19e-03 +2024-08-06 23:08:33,694 INFO [trainer.py:765] (7/8) Epoch 39, batch 700, train_loss[loss=3.117, NarTop10Accuracy=0.6976, over 4998.00 frames. ], tot_loss[loss=3.024, NarTop10Accuracy=0.7201, over 5723.31 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:09:05,861 INFO [trainer.py:765] (7/8) Epoch 39, batch 800, train_loss[loss=2.749, NarTop10Accuracy=0.7803, over 5109.00 frames. ], tot_loss[loss=3.027, NarTop10Accuracy=0.7197, over 5779.06 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:09:38,865 INFO [trainer.py:765] (7/8) Epoch 39, batch 900, train_loss[loss=3.294, NarTop10Accuracy=0.6637, over 6153.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7217, over 5792.45 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 23:10:18,460 INFO [trainer.py:765] (7/8) Epoch 39, batch 1000, train_loss[loss=2.662, NarTop10Accuracy=0.789, over 6468.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7229, over 5891.27 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 23:10:53,934 INFO [trainer.py:765] (7/8) Epoch 39, batch 1100, train_loss[loss=2.816, NarTop10Accuracy=0.7733, over 6864.00 frames. ], tot_loss[loss=3.03, NarTop10Accuracy=0.7189, over 5922.98 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 23:11:27,822 INFO [trainer.py:765] (7/8) Epoch 39, batch 1200, train_loss[loss=2.995, NarTop10Accuracy=0.7292, over 7272.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.721, over 5931.90 frames. ], batch size: 31, lr: 2.18e-03 +2024-08-06 23:12:07,252 INFO [trainer.py:765] (7/8) Epoch 39, batch 1300, train_loss[loss=2.843, NarTop10Accuracy=0.7544, over 5112.00 frames. ], tot_loss[loss=3.016, NarTop10Accuracy=0.7221, over 5983.52 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 23:12:39,301 INFO [trainer.py:765] (7/8) Epoch 39, batch 1400, train_loss[loss=2.999, NarTop10Accuracy=0.7288, over 6174.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7211, over 5994.12 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 23:13:09,756 INFO [trainer.py:765] (7/8) Epoch 39, batch 1500, train_loss[loss=3.561, NarTop10Accuracy=0.6102, over 5919.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7208, over 5938.23 frames. ], batch size: 50, lr: 2.18e-03 +2024-08-06 23:13:37,586 INFO [trainer.py:765] (7/8) Epoch 39, batch 1600, train_loss[loss=2.936, NarTop10Accuracy=0.7404, over 7218.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.723, over 5919.08 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:04,220 INFO [trainer.py:765] (7/8) Epoch 39, batch 1700, train_loss[loss=3.362, NarTop10Accuracy=0.6567, over 6378.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7182, over 5911.58 frames. ], batch size: 13, lr: 2.17e-03 +2024-08-06 23:14:30,767 INFO [trainer.py:765] (7/8) Epoch 39, batch 1800, train_loss[loss=2.87, NarTop10Accuracy=0.7486, over 7119.00 frames. ], tot_loss[loss=3.034, NarTop10Accuracy=0.7185, over 5970.59 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 23:14:57,180 INFO [trainer.py:765] (7/8) Epoch 39, batch 1900, train_loss[loss=3.009, NarTop10Accuracy=0.7269, over 6321.00 frames. ], tot_loss[loss=3.047, NarTop10Accuracy=0.7159, over 6005.80 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 23:15:22,751 INFO [trainer.py:765] (7/8) Epoch 39, batch 2000, train_loss[loss=3.242, NarTop10Accuracy=0.6812, over 6486.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7198, over 5992.29 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 23:15:48,060 INFO [trainer.py:765] (7/8) Epoch 39, batch 2100, train_loss[loss=3.462, NarTop10Accuracy=0.6281, over 3855.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7201, over 5976.87 frames. ], batch size: 4, lr: 2.17e-03 +2024-08-06 23:15:51,871 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 23:16:02,156 INFO [trainer.py:811] (7/8) Epoch 39, validation: loss=2.85, NarTop10Accuracy=0.7552, over 1905321.00 frames. +2024-08-06 23:16:02,156 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 23:16:02,645 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.369e+02 2.530e+02 2.720e+02 6.127e+02, threshold=5.059e+02, percent-clipped=0.2 +2024-08-06 23:16:23,652 INFO [trainer.py:765] (7/8) Epoch 39, batch 2200, train_loss[loss=3.151, NarTop10Accuracy=0.697, over 7257.00 frames. ], tot_loss[loss=3.033, NarTop10Accuracy=0.7191, over 6020.25 frames. ], batch size: 31, lr: 2.17e-03 +2024-08-06 23:16:48,847 INFO [trainer.py:765] (7/8) Epoch 39, batch 2300, train_loss[loss=2.67, NarTop10Accuracy=0.7907, over 5640.00 frames. ], tot_loss[loss=3.045, NarTop10Accuracy=0.7167, over 6016.07 frames. ], batch size: 9, lr: 2.17e-03 +2024-08-06 23:17:13,136 INFO [trainer.py:765] (7/8) Epoch 39, batch 2400, train_loss[loss=2.668, NarTop10Accuracy=0.7888, over 5136.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.722, over 5763.72 frames. ], batch size: 7, lr: 2.17e-03 +2024-08-06 23:17:36,712 INFO [trainer.py:765] (7/8) Epoch 39, batch 2500, train_loss[loss=2.977, NarTop10Accuracy=0.7315, over 5166.00 frames. ], tot_loss[loss=2.993, NarTop10Accuracy=0.7265, over 5480.97 frames. ], batch size: 7, lr: 2.16e-03 +2024-08-06 23:17:56,532 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 23:18:48,946 INFO [trainer.py:765] (7/8) Epoch 40, batch 100, train_loss[loss=3.077, NarTop10Accuracy=0.7076, over 7029.00 frames. ], tot_loss[loss=3, NarTop10Accuracy=0.725, over 2366.84 frames. ], batch size: 31, lr: 2.14e-03 +2024-08-06 23:19:23,035 INFO [trainer.py:765] (7/8) Epoch 40, batch 200, train_loss[loss=2.797, NarTop10Accuracy=0.7729, over 6708.00 frames. ], tot_loss[loss=2.991, NarTop10Accuracy=0.727, over 3850.68 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 23:19:57,187 INFO [trainer.py:765] (7/8) Epoch 40, batch 300, train_loss[loss=2.82, NarTop10Accuracy=0.7588, over 7053.00 frames. ], tot_loss[loss=3.012, NarTop10Accuracy=0.7228, over 4675.47 frames. ], batch size: 22, lr: 2.13e-03 +2024-08-06 23:20:30,182 INFO [trainer.py:765] (7/8) Epoch 40, batch 400, train_loss[loss=2.785, NarTop10Accuracy=0.7711, over 5256.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7217, over 5106.46 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 23:21:00,251 INFO [trainer.py:765] (7/8) Epoch 40, batch 500, train_loss[loss=2.718, NarTop10Accuracy=0.7856, over 6048.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7216, over 5386.26 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 23:21:34,882 INFO [trainer.py:765] (7/8) Epoch 40, batch 600, train_loss[loss=2.978, NarTop10Accuracy=0.7366, over 6198.00 frames. ], tot_loss[loss=3.006, NarTop10Accuracy=0.724, over 5653.83 frames. ], batch size: 10, lr: 2.13e-03 +2024-08-06 23:22:11,097 INFO [trainer.py:765] (7/8) Epoch 40, batch 700, train_loss[loss=2.995, NarTop10Accuracy=0.7195, over 5208.00 frames. ], tot_loss[loss=3.006, NarTop10Accuracy=0.724, over 5724.72 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:22:44,754 INFO [trainer.py:765] (7/8) Epoch 40, batch 800, train_loss[loss=2.542, NarTop10Accuracy=0.8151, over 5022.00 frames. ], tot_loss[loss=3.02, NarTop10Accuracy=0.7213, over 5773.15 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 23:23:16,635 INFO [trainer.py:765] (7/8) Epoch 40, batch 900, train_loss[loss=3.353, NarTop10Accuracy=0.6624, over 6654.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7213, over 5810.33 frames. ], batch size: 14, lr: 2.13e-03 +2024-08-06 23:23:55,591 INFO [trainer.py:765] (7/8) Epoch 40, batch 1000, train_loss[loss=3.425, NarTop10Accuracy=0.6405, over 6348.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7206, over 5902.54 frames. ], batch size: 13, lr: 2.13e-03 +2024-08-06 23:24:30,208 INFO [trainer.py:765] (7/8) Epoch 40, batch 1100, train_loss[loss=2.755, NarTop10Accuracy=0.7822, over 6771.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7214, over 5922.12 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 23:25:03,090 INFO [trainer.py:765] (7/8) Epoch 40, batch 1200, train_loss[loss=2.926, NarTop10Accuracy=0.7365, over 7185.00 frames. ], tot_loss[loss=3.017, NarTop10Accuracy=0.7217, over 5928.68 frames. ], batch size: 31, lr: 2.12e-03 +2024-08-06 23:25:41,843 INFO [trainer.py:765] (7/8) Epoch 40, batch 1300, train_loss[loss=2.847, NarTop10Accuracy=0.7576, over 5109.00 frames. ], tot_loss[loss=3.01, NarTop10Accuracy=0.7229, over 5993.12 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 23:26:13,385 INFO [trainer.py:765] (7/8) Epoch 40, batch 1400, train_loss[loss=2.792, NarTop10Accuracy=0.7766, over 6150.00 frames. ], tot_loss[loss=3.028, NarTop10Accuracy=0.7198, over 6009.27 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 23:26:43,377 INFO [trainer.py:765] (7/8) Epoch 40, batch 1500, train_loss[loss=3.246, NarTop10Accuracy=0.6787, over 6390.00 frames. ], tot_loss[loss=3.018, NarTop10Accuracy=0.7219, over 5949.10 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:26:54,419 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 23:27:02,676 INFO [trainer.py:811] (7/8) Epoch 40, validation: loss=2.86, NarTop10Accuracy=0.7522, over 1905321.00 frames. +2024-08-06 23:27:02,677 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 23:27:03,156 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.329e+02 2.511e+02 2.723e+02 1.241e+03, threshold=5.022e+02, percent-clipped=0.2 +2024-08-06 23:27:19,382 INFO [trainer.py:765] (7/8) Epoch 40, batch 1600, train_loss[loss=2.854, NarTop10Accuracy=0.7588, over 7257.00 frames. ], tot_loss[loss=3.023, NarTop10Accuracy=0.7208, over 5919.94 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:27:46,057 INFO [trainer.py:765] (7/8) Epoch 40, batch 1700, train_loss[loss=3.284, NarTop10Accuracy=0.6674, over 6381.00 frames. ], tot_loss[loss=3.019, NarTop10Accuracy=0.7212, over 5925.93 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 23:28:12,579 INFO [trainer.py:765] (7/8) Epoch 40, batch 1800, train_loss[loss=3.115, NarTop10Accuracy=0.7067, over 6885.00 frames. ], tot_loss[loss=3.002, NarTop10Accuracy=0.7249, over 5973.26 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 23:28:38,909 INFO [trainer.py:765] (7/8) Epoch 40, batch 1900, train_loss[loss=3.226, NarTop10Accuracy=0.6811, over 6432.00 frames. ], tot_loss[loss=3.009, NarTop10Accuracy=0.7238, over 6030.56 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:04,445 INFO [trainer.py:765] (7/8) Epoch 40, batch 2000, train_loss[loss=3.523, NarTop10Accuracy=0.6298, over 5991.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7228, over 6013.52 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 23:29:29,750 INFO [trainer.py:765] (7/8) Epoch 40, batch 2100, train_loss[loss=2.665, NarTop10Accuracy=0.7929, over 3759.00 frames. ], tot_loss[loss=3.013, NarTop10Accuracy=0.7225, over 5987.82 frames. ], batch size: 4, lr: 2.11e-03 +2024-08-06 23:29:54,939 INFO [trainer.py:765] (7/8) Epoch 40, batch 2200, train_loss[loss=3.238, NarTop10Accuracy=0.6765, over 7359.00 frames. ], tot_loss[loss=3.022, NarTop10Accuracy=0.7206, over 6013.85 frames. ], batch size: 31, lr: 2.11e-03 +2024-08-06 23:30:20,013 INFO [trainer.py:765] (7/8) Epoch 40, batch 2300, train_loss[loss=2.994, NarTop10Accuracy=0.7246, over 5787.00 frames. ], tot_loss[loss=3.029, NarTop10Accuracy=0.7195, over 6029.60 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 23:30:44,296 INFO [trainer.py:765] (7/8) Epoch 40, batch 2400, train_loss[loss=2.757, NarTop10Accuracy=0.7724, over 5016.00 frames. ], tot_loss[loss=3.015, NarTop10Accuracy=0.7222, over 5785.52 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:07,738 INFO [trainer.py:765] (7/8) Epoch 40, batch 2500, train_loss[loss=2.988, NarTop10Accuracy=0.7267, over 5175.00 frames. ], tot_loss[loss=2.984, NarTop10Accuracy=0.7285, over 5481.37 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 23:31:27,718 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 23:31:27,721 INFO [trainer.py:1069] (7/8) Done! diff --git a/libritts-r/tensorboard_stage1/events.out.tfevents.1722931336.6867463.3160.0 b/libritts-r/tensorboard_stage1/events.out.tfevents.1722931336.6867463.3160.0 new file mode 100644 index 0000000000000000000000000000000000000000..fbe9aa85263f1ec0eb47b1cfc464f03916a28dab --- /dev/null +++ b/libritts-r/tensorboard_stage1/events.out.tfevents.1722931336.6867463.3160.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e4fdcf08995c4e16faac48f01f61e78a24c67e8d0f4ca11829b8f07000ebe0c +size 135 diff --git a/libritts-r/tensorboard_stage1/events.out.tfevents.1722931437.6867463.17896.0 b/libritts-r/tensorboard_stage1/events.out.tfevents.1722931437.6867463.17896.0 new file mode 100644 index 0000000000000000000000000000000000000000..be791ea3e8175130c75efa0cc84e928e744140fe --- /dev/null +++ b/libritts-r/tensorboard_stage1/events.out.tfevents.1722931437.6867463.17896.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c65dd1cf55da46628acbeaccf4d3fdba589d779fb4908ec4adf688777d119534 +size 88 diff --git a/libritts-r/tensorboard_stage1/events.out.tfevents.1722931574.6867463.20306.0 b/libritts-r/tensorboard_stage1/events.out.tfevents.1722931574.6867463.20306.0 new file mode 100644 index 0000000000000000000000000000000000000000..d38c054651d4eb1ae826e441a335c8886463740f --- /dev/null +++ b/libritts-r/tensorboard_stage1/events.out.tfevents.1722931574.6867463.20306.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e402ea5b9f65580ee8f730e153fdfe780d5b67538a8246fc3f618309f88ca1d +size 103227 diff --git a/libritts-r/tensorboard_stage2/events.out.tfevents.1722954221.6867463.1063288.0 b/libritts-r/tensorboard_stage2/events.out.tfevents.1722954221.6867463.1063288.0 new file mode 100644 index 0000000000000000000000000000000000000000..e7863b6a4a5d7886ef45d49f6317fe913a318f69 --- /dev/null +++ b/libritts-r/tensorboard_stage2/events.out.tfevents.1722954221.6867463.1063288.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0a6ca0899fbb9fecb7e2a6a080cf907cb090980428971e0d996640a6d8a8e93 +size 434306 diff --git a/libritts/log/log-train-2024-08-06-03-01-46-0 b/libritts/log/log-train-2024-08-06-03-01-46-0 new file mode 100644 index 0000000000000000000000000000000000000000..106a587aeeb183dcefcd2dacdbe3ea373e6a874e --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-01-46-0 @@ -0,0 +1,15 @@ +2024-08-06 03:01:46,136 INFO [trainer.py:870] (0/8) Training started +2024-08-06 03:01:46,140 INFO [trainer.py:889] (0/8) Device: cuda:0 +2024-08-06 03:01:46,141 INFO [trainer.py:890] (0/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': True, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:01:46,141 INFO [trainer.py:892] (0/8) About to create model +2024-08-06 03:01:47,114 INFO [trainer.py:899] (0/8) Number of model parameters: 367386628 +2024-08-06 03:01:47,976 INFO [trainer.py:914] (0/8) Using DDP +2024-08-06 03:01:50,133 INFO [datamodule.py:427] (0/8) About to get train cuts +2024-08-06 03:01:50,144 INFO [datamodule.py:434] (0/8) About to get dev cuts +2024-08-06 03:01:50,151 INFO [datamodule.py:292] (0/8) Disable SpecAugment +2024-08-06 03:01:50,151 INFO [datamodule.py:294] (0/8) About to create train dataset +2024-08-06 03:01:50,152 INFO [datamodule.py:323] (0/8) Using DynamicBucketingSampler +2024-08-06 03:01:50,769 INFO [datamodule.py:344] (0/8) About to create train dataloader +2024-08-06 03:01:50,769 INFO [datamodule.py:367] (0/8) About to create dev dataset +2024-08-06 03:01:51,096 INFO [datamodule.py:388] (0/8) About to create dev dataloader +2024-08-06 03:01:51,096 INFO [trainer.py:1104] (0/8) Sanity check -- see if any of the batches in epoch 1 would cause OOM. diff --git a/libritts/log/log-train-2024-08-06-03-01-46-1 b/libritts/log/log-train-2024-08-06-03-01-46-1 new file mode 100644 index 0000000000000000000000000000000000000000..9f5c18feab6c7eed85fe1628dd2d3ccc35024ddf --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-01-46-1 @@ -0,0 +1,15 @@ +2024-08-06 03:01:46,127 INFO [trainer.py:870] (1/8) Training started +2024-08-06 03:01:46,128 INFO [trainer.py:889] (1/8) Device: cuda:1 +2024-08-06 03:01:46,128 INFO [trainer.py:890] (1/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': True, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:01:46,128 INFO [trainer.py:892] (1/8) About to create model +2024-08-06 03:01:47,130 INFO [trainer.py:899] (1/8) Number of model parameters: 367386628 +2024-08-06 03:01:48,044 INFO [trainer.py:914] (1/8) Using DDP +2024-08-06 03:01:50,132 INFO [datamodule.py:427] (1/8) About to get train cuts +2024-08-06 03:01:50,144 INFO [datamodule.py:434] (1/8) About to get dev cuts +2024-08-06 03:01:50,151 INFO [datamodule.py:292] (1/8) Disable SpecAugment +2024-08-06 03:01:50,151 INFO [datamodule.py:294] (1/8) About to create train dataset +2024-08-06 03:01:50,152 INFO [datamodule.py:323] (1/8) Using DynamicBucketingSampler +2024-08-06 03:01:50,773 INFO [datamodule.py:344] (1/8) About to create train dataloader +2024-08-06 03:01:50,773 INFO [datamodule.py:367] (1/8) About to create dev dataset +2024-08-06 03:01:51,101 INFO [datamodule.py:388] (1/8) About to create dev dataloader +2024-08-06 03:01:51,101 INFO [trainer.py:1104] (1/8) Sanity check -- see if any of the batches in epoch 1 would cause OOM. diff --git a/libritts/log/log-train-2024-08-06-03-01-46-2 b/libritts/log/log-train-2024-08-06-03-01-46-2 new file mode 100644 index 0000000000000000000000000000000000000000..54de0fab79ae8f4736154cd547c865e456b18542 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-01-46-2 @@ -0,0 +1,15 @@ +2024-08-06 03:01:46,149 INFO [trainer.py:870] (2/8) Training started +2024-08-06 03:01:46,150 INFO [trainer.py:889] (2/8) Device: cuda:2 +2024-08-06 03:01:46,150 INFO [trainer.py:890] (2/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': True, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:01:46,150 INFO [trainer.py:892] (2/8) About to create model +2024-08-06 03:01:46,898 INFO [trainer.py:899] (2/8) Number of model parameters: 367386628 +2024-08-06 03:01:47,589 INFO [trainer.py:914] (2/8) Using DDP +2024-08-06 03:01:50,133 INFO [datamodule.py:427] (2/8) About to get train cuts +2024-08-06 03:01:50,144 INFO [datamodule.py:434] (2/8) About to get dev cuts +2024-08-06 03:01:50,151 INFO [datamodule.py:292] (2/8) Disable SpecAugment +2024-08-06 03:01:50,151 INFO [datamodule.py:294] (2/8) About to create train dataset +2024-08-06 03:01:50,152 INFO [datamodule.py:323] (2/8) Using DynamicBucketingSampler +2024-08-06 03:01:50,778 INFO [datamodule.py:344] (2/8) About to create train dataloader +2024-08-06 03:01:50,778 INFO [datamodule.py:367] (2/8) About to create dev dataset +2024-08-06 03:01:51,108 INFO [datamodule.py:388] (2/8) About to create dev dataloader +2024-08-06 03:01:51,108 INFO [trainer.py:1104] (2/8) Sanity check -- see if any of the batches in epoch 1 would cause OOM. diff --git a/libritts/log/log-train-2024-08-06-03-01-46-3 b/libritts/log/log-train-2024-08-06-03-01-46-3 new file mode 100644 index 0000000000000000000000000000000000000000..4559413a3aa1a96cc8fd2a8ca56f0e85ad4a4678 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-01-46-3 @@ -0,0 +1,15 @@ +2024-08-06 03:01:46,149 INFO [trainer.py:870] (3/8) Training started +2024-08-06 03:01:46,150 INFO [trainer.py:889] (3/8) Device: cuda:3 +2024-08-06 03:01:46,151 INFO [trainer.py:890] (3/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': True, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:01:46,151 INFO [trainer.py:892] (3/8) About to create model +2024-08-06 03:01:46,887 INFO [trainer.py:899] (3/8) Number of model parameters: 367386628 +2024-08-06 03:01:47,590 INFO [trainer.py:914] (3/8) Using DDP +2024-08-06 03:01:50,133 INFO [datamodule.py:427] (3/8) About to get train cuts +2024-08-06 03:01:50,144 INFO [datamodule.py:434] (3/8) About to get dev cuts +2024-08-06 03:01:50,151 INFO [datamodule.py:292] (3/8) Disable SpecAugment +2024-08-06 03:01:50,151 INFO [datamodule.py:294] (3/8) About to create train dataset +2024-08-06 03:01:50,153 INFO [datamodule.py:323] (3/8) Using DynamicBucketingSampler +2024-08-06 03:01:50,772 INFO [datamodule.py:344] (3/8) About to create train dataloader +2024-08-06 03:01:50,773 INFO [datamodule.py:367] (3/8) About to create dev dataset +2024-08-06 03:01:51,100 INFO [datamodule.py:388] (3/8) About to create dev dataloader +2024-08-06 03:01:51,100 INFO [trainer.py:1104] (3/8) Sanity check -- see if any of the batches in epoch 1 would cause OOM. diff --git a/libritts/log/log-train-2024-08-06-03-01-46-4 b/libritts/log/log-train-2024-08-06-03-01-46-4 new file mode 100644 index 0000000000000000000000000000000000000000..41e782ca08f887a901ddb35e30cdb6b3b6d75f97 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-01-46-4 @@ -0,0 +1,15 @@ +2024-08-06 03:01:46,133 INFO [trainer.py:870] (4/8) Training started +2024-08-06 03:01:46,133 INFO [trainer.py:889] (4/8) Device: cuda:4 +2024-08-06 03:01:46,134 INFO [trainer.py:890] (4/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': True, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:01:46,134 INFO [trainer.py:892] (4/8) About to create model +2024-08-06 03:01:47,116 INFO [trainer.py:899] (4/8) Number of model parameters: 367386628 +2024-08-06 03:01:47,977 INFO [trainer.py:914] (4/8) Using DDP +2024-08-06 03:01:50,133 INFO [datamodule.py:427] (4/8) About to get train cuts +2024-08-06 03:01:50,144 INFO [datamodule.py:434] (4/8) About to get dev cuts +2024-08-06 03:01:50,151 INFO [datamodule.py:292] (4/8) Disable SpecAugment +2024-08-06 03:01:50,151 INFO [datamodule.py:294] (4/8) About to create train dataset +2024-08-06 03:01:50,152 INFO [datamodule.py:323] (4/8) Using DynamicBucketingSampler +2024-08-06 03:01:50,768 INFO [datamodule.py:344] (4/8) About to create train dataloader +2024-08-06 03:01:50,768 INFO [datamodule.py:367] (4/8) About to create dev dataset +2024-08-06 03:01:51,099 INFO [datamodule.py:388] (4/8) About to create dev dataloader +2024-08-06 03:01:51,099 INFO [trainer.py:1104] (4/8) Sanity check -- see if any of the batches in epoch 1 would cause OOM. diff --git a/libritts/log/log-train-2024-08-06-03-01-46-5 b/libritts/log/log-train-2024-08-06-03-01-46-5 new file mode 100644 index 0000000000000000000000000000000000000000..f2fc49d785f5af3dad694e17da24b4ad9b743db0 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-01-46-5 @@ -0,0 +1,15 @@ +2024-08-06 03:01:46,128 INFO [trainer.py:870] (5/8) Training started +2024-08-06 03:01:46,129 INFO [trainer.py:889] (5/8) Device: cuda:5 +2024-08-06 03:01:46,129 INFO [trainer.py:890] (5/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': True, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:01:46,129 INFO [trainer.py:892] (5/8) About to create model +2024-08-06 03:01:47,129 INFO [trainer.py:899] (5/8) Number of model parameters: 367386628 +2024-08-06 03:01:48,055 INFO [trainer.py:914] (5/8) Using DDP +2024-08-06 03:01:50,131 INFO [datamodule.py:427] (5/8) About to get train cuts +2024-08-06 03:01:50,144 INFO [datamodule.py:434] (5/8) About to get dev cuts +2024-08-06 03:01:50,150 INFO [datamodule.py:292] (5/8) Disable SpecAugment +2024-08-06 03:01:50,150 INFO [datamodule.py:294] (5/8) About to create train dataset +2024-08-06 03:01:50,152 INFO [datamodule.py:323] (5/8) Using DynamicBucketingSampler +2024-08-06 03:01:50,777 INFO [datamodule.py:344] (5/8) About to create train dataloader +2024-08-06 03:01:50,777 INFO [datamodule.py:367] (5/8) About to create dev dataset +2024-08-06 03:01:51,111 INFO [datamodule.py:388] (5/8) About to create dev dataloader +2024-08-06 03:01:51,111 INFO [trainer.py:1104] (5/8) Sanity check -- see if any of the batches in epoch 1 would cause OOM. diff --git a/libritts/log/log-train-2024-08-06-03-01-46-6 b/libritts/log/log-train-2024-08-06-03-01-46-6 new file mode 100644 index 0000000000000000000000000000000000000000..3dc887c2882e40554bc809a3a8aee88a847c81a4 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-01-46-6 @@ -0,0 +1,15 @@ +2024-08-06 03:01:46,128 INFO [trainer.py:870] (6/8) Training started +2024-08-06 03:01:46,129 INFO [trainer.py:889] (6/8) Device: cuda:6 +2024-08-06 03:01:46,129 INFO [trainer.py:890] (6/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': True, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:01:46,129 INFO [trainer.py:892] (6/8) About to create model +2024-08-06 03:01:46,923 INFO [trainer.py:899] (6/8) Number of model parameters: 367386628 +2024-08-06 03:01:47,594 INFO [trainer.py:914] (6/8) Using DDP +2024-08-06 03:01:50,130 INFO [datamodule.py:427] (6/8) About to get train cuts +2024-08-06 03:01:50,144 INFO [datamodule.py:434] (6/8) About to get dev cuts +2024-08-06 03:01:50,151 INFO [datamodule.py:292] (6/8) Disable SpecAugment +2024-08-06 03:01:50,151 INFO [datamodule.py:294] (6/8) About to create train dataset +2024-08-06 03:01:50,152 INFO [datamodule.py:323] (6/8) Using DynamicBucketingSampler +2024-08-06 03:01:50,772 INFO [datamodule.py:344] (6/8) About to create train dataloader +2024-08-06 03:01:50,772 INFO [datamodule.py:367] (6/8) About to create dev dataset +2024-08-06 03:01:51,100 INFO [datamodule.py:388] (6/8) About to create dev dataloader +2024-08-06 03:01:51,100 INFO [trainer.py:1104] (6/8) Sanity check -- see if any of the batches in epoch 1 would cause OOM. diff --git a/libritts/log/log-train-2024-08-06-03-01-46-7 b/libritts/log/log-train-2024-08-06-03-01-46-7 new file mode 100644 index 0000000000000000000000000000000000000000..328b7444ff1ef543d3349da6347df06a05b7c14a --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-01-46-7 @@ -0,0 +1,15 @@ +2024-08-06 03:01:46,127 INFO [trainer.py:870] (7/8) Training started +2024-08-06 03:01:46,128 INFO [trainer.py:889] (7/8) Device: cuda:7 +2024-08-06 03:01:46,128 INFO [trainer.py:890] (7/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': True, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:01:46,128 INFO [trainer.py:892] (7/8) About to create model +2024-08-06 03:01:47,129 INFO [trainer.py:899] (7/8) Number of model parameters: 367386628 +2024-08-06 03:01:48,055 INFO [trainer.py:914] (7/8) Using DDP +2024-08-06 03:01:50,131 INFO [datamodule.py:427] (7/8) About to get train cuts +2024-08-06 03:01:50,144 INFO [datamodule.py:434] (7/8) About to get dev cuts +2024-08-06 03:01:50,150 INFO [datamodule.py:292] (7/8) Disable SpecAugment +2024-08-06 03:01:50,150 INFO [datamodule.py:294] (7/8) About to create train dataset +2024-08-06 03:01:50,152 INFO [datamodule.py:323] (7/8) Using DynamicBucketingSampler +2024-08-06 03:01:50,766 INFO [datamodule.py:344] (7/8) About to create train dataloader +2024-08-06 03:01:50,766 INFO [datamodule.py:367] (7/8) About to create dev dataset +2024-08-06 03:01:51,090 INFO [datamodule.py:388] (7/8) About to create dev dataloader +2024-08-06 03:01:51,091 INFO [trainer.py:1104] (7/8) Sanity check -- see if any of the batches in epoch 1 would cause OOM. diff --git a/libritts/log/log-train-2024-08-06-03-26-50-0 b/libritts/log/log-train-2024-08-06-03-26-50-0 new file mode 100644 index 0000000000000000000000000000000000000000..dffeffe42c0568498e41dd323acaf42b214a6dff --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-26-50-0 @@ -0,0 +1,14 @@ +2024-08-06 03:26:50,289 INFO [trainer.py:870] (0/8) Training started +2024-08-06 03:26:50,293 INFO [trainer.py:889] (0/8) Device: cuda:0 +2024-08-06 03:26:50,293 INFO [trainer.py:890] (0/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:26:50,293 INFO [trainer.py:892] (0/8) About to create model +2024-08-06 03:26:51,081 INFO [trainer.py:899] (0/8) Number of model parameters: 367386628 +2024-08-06 03:26:51,961 INFO [trainer.py:914] (0/8) Using DDP +2024-08-06 03:26:54,035 INFO [datamodule.py:427] (0/8) About to get train cuts +2024-08-06 03:26:54,036 INFO [datamodule.py:434] (0/8) About to get dev cuts +2024-08-06 03:26:54,037 INFO [datamodule.py:292] (0/8) Disable SpecAugment +2024-08-06 03:26:54,037 INFO [datamodule.py:294] (0/8) About to create train dataset +2024-08-06 03:26:54,038 INFO [datamodule.py:323] (0/8) Using DynamicBucketingSampler +2024-08-06 03:26:54,644 INFO [datamodule.py:344] (0/8) About to create train dataloader +2024-08-06 03:26:54,644 INFO [datamodule.py:367] (0/8) About to create dev dataset +2024-08-06 03:26:54,968 INFO [datamodule.py:388] (0/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-26-50-1 b/libritts/log/log-train-2024-08-06-03-26-50-1 new file mode 100644 index 0000000000000000000000000000000000000000..73088ec1acf72f687bccacb541fd7c05c8df650c --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-26-50-1 @@ -0,0 +1,14 @@ +2024-08-06 03:26:50,319 INFO [trainer.py:870] (1/8) Training started +2024-08-06 03:26:50,320 INFO [trainer.py:889] (1/8) Device: cuda:1 +2024-08-06 03:26:50,320 INFO [trainer.py:890] (1/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:26:50,320 INFO [trainer.py:892] (1/8) About to create model +2024-08-06 03:26:51,037 INFO [trainer.py:899] (1/8) Number of model parameters: 367386628 +2024-08-06 03:26:51,836 INFO [trainer.py:914] (1/8) Using DDP +2024-08-06 03:26:54,036 INFO [datamodule.py:427] (1/8) About to get train cuts +2024-08-06 03:26:54,038 INFO [datamodule.py:434] (1/8) About to get dev cuts +2024-08-06 03:26:54,039 INFO [datamodule.py:292] (1/8) Disable SpecAugment +2024-08-06 03:26:54,039 INFO [datamodule.py:294] (1/8) About to create train dataset +2024-08-06 03:26:54,040 INFO [datamodule.py:323] (1/8) Using DynamicBucketingSampler +2024-08-06 03:26:54,657 INFO [datamodule.py:344] (1/8) About to create train dataloader +2024-08-06 03:26:54,657 INFO [datamodule.py:367] (1/8) About to create dev dataset +2024-08-06 03:26:54,990 INFO [datamodule.py:388] (1/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-26-50-2 b/libritts/log/log-train-2024-08-06-03-26-50-2 new file mode 100644 index 0000000000000000000000000000000000000000..2a09c434f0e98915ee536ad3596fa8bb5e68d079 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-26-50-2 @@ -0,0 +1,14 @@ +2024-08-06 03:26:50,323 INFO [trainer.py:870] (2/8) Training started +2024-08-06 03:26:50,324 INFO [trainer.py:889] (2/8) Device: cuda:2 +2024-08-06 03:26:50,324 INFO [trainer.py:890] (2/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:26:50,324 INFO [trainer.py:892] (2/8) About to create model +2024-08-06 03:26:51,090 INFO [trainer.py:899] (2/8) Number of model parameters: 367386628 +2024-08-06 03:26:51,890 INFO [trainer.py:914] (2/8) Using DDP +2024-08-06 03:26:54,038 INFO [datamodule.py:427] (2/8) About to get train cuts +2024-08-06 03:26:54,040 INFO [datamodule.py:434] (2/8) About to get dev cuts +2024-08-06 03:26:54,041 INFO [datamodule.py:292] (2/8) Disable SpecAugment +2024-08-06 03:26:54,041 INFO [datamodule.py:294] (2/8) About to create train dataset +2024-08-06 03:26:54,041 INFO [datamodule.py:323] (2/8) Using DynamicBucketingSampler +2024-08-06 03:26:54,668 INFO [datamodule.py:344] (2/8) About to create train dataloader +2024-08-06 03:26:54,669 INFO [datamodule.py:367] (2/8) About to create dev dataset +2024-08-06 03:26:55,004 INFO [datamodule.py:388] (2/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-26-50-3 b/libritts/log/log-train-2024-08-06-03-26-50-3 new file mode 100644 index 0000000000000000000000000000000000000000..4992d711f603927f7d60e1d6d654caa90c14d38a --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-26-50-3 @@ -0,0 +1,14 @@ +2024-08-06 03:26:50,316 INFO [trainer.py:870] (3/8) Training started +2024-08-06 03:26:50,317 INFO [trainer.py:889] (3/8) Device: cuda:3 +2024-08-06 03:26:50,317 INFO [trainer.py:890] (3/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:26:50,317 INFO [trainer.py:892] (3/8) About to create model +2024-08-06 03:26:51,135 INFO [trainer.py:899] (3/8) Number of model parameters: 367386628 +2024-08-06 03:26:51,959 INFO [trainer.py:914] (3/8) Using DDP +2024-08-06 03:26:54,035 INFO [datamodule.py:427] (3/8) About to get train cuts +2024-08-06 03:26:54,036 INFO [datamodule.py:434] (3/8) About to get dev cuts +2024-08-06 03:26:54,038 INFO [datamodule.py:292] (3/8) Disable SpecAugment +2024-08-06 03:26:54,038 INFO [datamodule.py:294] (3/8) About to create train dataset +2024-08-06 03:26:54,038 INFO [datamodule.py:323] (3/8) Using DynamicBucketingSampler +2024-08-06 03:26:54,678 INFO [datamodule.py:344] (3/8) About to create train dataloader +2024-08-06 03:26:54,679 INFO [datamodule.py:367] (3/8) About to create dev dataset +2024-08-06 03:26:55,067 INFO [datamodule.py:388] (3/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-26-50-4 b/libritts/log/log-train-2024-08-06-03-26-50-4 new file mode 100644 index 0000000000000000000000000000000000000000..64050f169e7936337955bd1c9e85e220e260f3d5 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-26-50-4 @@ -0,0 +1,14 @@ +2024-08-06 03:26:50,273 INFO [trainer.py:870] (4/8) Training started +2024-08-06 03:26:50,274 INFO [trainer.py:889] (4/8) Device: cuda:4 +2024-08-06 03:26:50,274 INFO [trainer.py:890] (4/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:26:50,274 INFO [trainer.py:892] (4/8) About to create model +2024-08-06 03:26:51,035 INFO [trainer.py:899] (4/8) Number of model parameters: 367386628 +2024-08-06 03:26:51,876 INFO [trainer.py:914] (4/8) Using DDP +2024-08-06 03:26:54,038 INFO [datamodule.py:427] (4/8) About to get train cuts +2024-08-06 03:26:54,039 INFO [datamodule.py:434] (4/8) About to get dev cuts +2024-08-06 03:26:54,041 INFO [datamodule.py:292] (4/8) Disable SpecAugment +2024-08-06 03:26:54,041 INFO [datamodule.py:294] (4/8) About to create train dataset +2024-08-06 03:26:54,041 INFO [datamodule.py:323] (4/8) Using DynamicBucketingSampler +2024-08-06 03:26:54,651 INFO [datamodule.py:344] (4/8) About to create train dataloader +2024-08-06 03:26:54,651 INFO [datamodule.py:367] (4/8) About to create dev dataset +2024-08-06 03:26:54,977 INFO [datamodule.py:388] (4/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-26-50-5 b/libritts/log/log-train-2024-08-06-03-26-50-5 new file mode 100644 index 0000000000000000000000000000000000000000..21c4a6d76dc2c71b09bbddb270453eca8c563668 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-26-50-5 @@ -0,0 +1,14 @@ +2024-08-06 03:26:50,320 INFO [trainer.py:870] (5/8) Training started +2024-08-06 03:26:50,321 INFO [trainer.py:889] (5/8) Device: cuda:5 +2024-08-06 03:26:50,321 INFO [trainer.py:890] (5/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:26:50,321 INFO [trainer.py:892] (5/8) About to create model +2024-08-06 03:26:51,111 INFO [trainer.py:899] (5/8) Number of model parameters: 367386628 +2024-08-06 03:26:51,974 INFO [trainer.py:914] (5/8) Using DDP +2024-08-06 03:26:54,033 INFO [datamodule.py:427] (5/8) About to get train cuts +2024-08-06 03:26:54,036 INFO [datamodule.py:434] (5/8) About to get dev cuts +2024-08-06 03:26:54,037 INFO [datamodule.py:292] (5/8) Disable SpecAugment +2024-08-06 03:26:54,037 INFO [datamodule.py:294] (5/8) About to create train dataset +2024-08-06 03:26:54,038 INFO [datamodule.py:323] (5/8) Using DynamicBucketingSampler +2024-08-06 03:26:54,648 INFO [datamodule.py:344] (5/8) About to create train dataloader +2024-08-06 03:26:54,649 INFO [datamodule.py:367] (5/8) About to create dev dataset +2024-08-06 03:26:54,978 INFO [datamodule.py:388] (5/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-26-50-6 b/libritts/log/log-train-2024-08-06-03-26-50-6 new file mode 100644 index 0000000000000000000000000000000000000000..a798f380f21dbe5abeaf3f7b670e9108be2ba40b --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-26-50-6 @@ -0,0 +1,14 @@ +2024-08-06 03:26:50,325 INFO [trainer.py:870] (6/8) Training started +2024-08-06 03:26:50,326 INFO [trainer.py:889] (6/8) Device: cuda:6 +2024-08-06 03:26:50,327 INFO [trainer.py:890] (6/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:26:50,327 INFO [trainer.py:892] (6/8) About to create model +2024-08-06 03:26:51,093 INFO [trainer.py:899] (6/8) Number of model parameters: 367386628 +2024-08-06 03:26:51,830 INFO [trainer.py:914] (6/8) Using DDP +2024-08-06 03:26:54,038 INFO [datamodule.py:427] (6/8) About to get train cuts +2024-08-06 03:26:54,040 INFO [datamodule.py:434] (6/8) About to get dev cuts +2024-08-06 03:26:54,041 INFO [datamodule.py:292] (6/8) Disable SpecAugment +2024-08-06 03:26:54,041 INFO [datamodule.py:294] (6/8) About to create train dataset +2024-08-06 03:26:54,042 INFO [datamodule.py:323] (6/8) Using DynamicBucketingSampler +2024-08-06 03:26:54,650 INFO [datamodule.py:344] (6/8) About to create train dataloader +2024-08-06 03:26:54,650 INFO [datamodule.py:367] (6/8) About to create dev dataset +2024-08-06 03:26:54,977 INFO [datamodule.py:388] (6/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-26-50-7 b/libritts/log/log-train-2024-08-06-03-26-50-7 new file mode 100644 index 0000000000000000000000000000000000000000..ab5fb47a7467817da1c5234ace6fc575426c25dd --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-26-50-7 @@ -0,0 +1,14 @@ +2024-08-06 03:26:50,272 INFO [trainer.py:870] (7/8) Training started +2024-08-06 03:26:50,273 INFO [trainer.py:889] (7/8) Device: cuda:7 +2024-08-06 03:26:50,273 INFO [trainer.py:890] (7/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:26:50,273 INFO [trainer.py:892] (7/8) About to create model +2024-08-06 03:26:51,030 INFO [trainer.py:899] (7/8) Number of model parameters: 367386628 +2024-08-06 03:26:51,813 INFO [trainer.py:914] (7/8) Using DDP +2024-08-06 03:26:54,038 INFO [datamodule.py:427] (7/8) About to get train cuts +2024-08-06 03:26:54,040 INFO [datamodule.py:434] (7/8) About to get dev cuts +2024-08-06 03:26:54,041 INFO [datamodule.py:292] (7/8) Disable SpecAugment +2024-08-06 03:26:54,041 INFO [datamodule.py:294] (7/8) About to create train dataset +2024-08-06 03:26:54,042 INFO [datamodule.py:323] (7/8) Using DynamicBucketingSampler +2024-08-06 03:26:54,663 INFO [datamodule.py:344] (7/8) About to create train dataloader +2024-08-06 03:26:54,663 INFO [datamodule.py:367] (7/8) About to create dev dataset +2024-08-06 03:26:54,996 INFO [datamodule.py:388] (7/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-36-20-0 b/libritts/log/log-train-2024-08-06-03-36-20-0 new file mode 100644 index 0000000000000000000000000000000000000000..cfe4e20ba330cbf4e5a2b8c659d6c98d81b15e92 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-36-20-0 @@ -0,0 +1,14 @@ +2024-08-06 03:36:20,618 INFO [trainer.py:870] (0/8) Training started +2024-08-06 03:36:20,622 INFO [trainer.py:889] (0/8) Device: cuda:0 +2024-08-06 03:36:20,622 INFO [trainer.py:890] (0/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:36:20,623 INFO [trainer.py:892] (0/8) About to create model +2024-08-06 03:36:21,369 INFO [trainer.py:899] (0/8) Number of model parameters: 367386628 +2024-08-06 03:36:22,184 INFO [trainer.py:914] (0/8) Using DDP +2024-08-06 03:36:24,258 INFO [datamodule.py:427] (0/8) About to get train cuts +2024-08-06 03:36:24,259 INFO [datamodule.py:434] (0/8) About to get dev cuts +2024-08-06 03:36:24,260 INFO [datamodule.py:292] (0/8) Disable SpecAugment +2024-08-06 03:36:24,260 INFO [datamodule.py:294] (0/8) About to create train dataset +2024-08-06 03:36:24,262 INFO [datamodule.py:323] (0/8) Using DynamicBucketingSampler +2024-08-06 03:36:24,874 INFO [datamodule.py:344] (0/8) About to create train dataloader +2024-08-06 03:36:24,875 INFO [datamodule.py:367] (0/8) About to create dev dataset +2024-08-06 03:36:25,205 INFO [datamodule.py:388] (0/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-36-20-1 b/libritts/log/log-train-2024-08-06-03-36-20-1 new file mode 100644 index 0000000000000000000000000000000000000000..29346d68c9d357ee3b94d083b85805ce598839e1 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-36-20-1 @@ -0,0 +1,14 @@ +2024-08-06 03:36:20,601 INFO [trainer.py:870] (1/8) Training started +2024-08-06 03:36:20,602 INFO [trainer.py:889] (1/8) Device: cuda:1 +2024-08-06 03:36:20,602 INFO [trainer.py:890] (1/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:36:20,602 INFO [trainer.py:892] (1/8) About to create model +2024-08-06 03:36:21,356 INFO [trainer.py:899] (1/8) Number of model parameters: 367386628 +2024-08-06 03:36:22,156 INFO [trainer.py:914] (1/8) Using DDP +2024-08-06 03:36:24,258 INFO [datamodule.py:427] (1/8) About to get train cuts +2024-08-06 03:36:24,260 INFO [datamodule.py:434] (1/8) About to get dev cuts +2024-08-06 03:36:24,260 INFO [datamodule.py:292] (1/8) Disable SpecAugment +2024-08-06 03:36:24,260 INFO [datamodule.py:294] (1/8) About to create train dataset +2024-08-06 03:36:24,262 INFO [datamodule.py:323] (1/8) Using DynamicBucketingSampler +2024-08-06 03:36:24,876 INFO [datamodule.py:344] (1/8) About to create train dataloader +2024-08-06 03:36:24,876 INFO [datamodule.py:367] (1/8) About to create dev dataset +2024-08-06 03:36:25,210 INFO [datamodule.py:388] (1/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-36-20-2 b/libritts/log/log-train-2024-08-06-03-36-20-2 new file mode 100644 index 0000000000000000000000000000000000000000..c63668ea3a47d191c5b6a063d5469b1621c5a5e2 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-36-20-2 @@ -0,0 +1,14 @@ +2024-08-06 03:36:20,573 INFO [trainer.py:870] (2/8) Training started +2024-08-06 03:36:20,574 INFO [trainer.py:889] (2/8) Device: cuda:2 +2024-08-06 03:36:20,574 INFO [trainer.py:890] (2/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:36:20,574 INFO [trainer.py:892] (2/8) About to create model +2024-08-06 03:36:21,364 INFO [trainer.py:899] (2/8) Number of model parameters: 367386628 +2024-08-06 03:36:22,194 INFO [trainer.py:914] (2/8) Using DDP +2024-08-06 03:36:24,258 INFO [datamodule.py:427] (2/8) About to get train cuts +2024-08-06 03:36:24,260 INFO [datamodule.py:434] (2/8) About to get dev cuts +2024-08-06 03:36:24,261 INFO [datamodule.py:292] (2/8) Disable SpecAugment +2024-08-06 03:36:24,261 INFO [datamodule.py:294] (2/8) About to create train dataset +2024-08-06 03:36:24,262 INFO [datamodule.py:323] (2/8) Using DynamicBucketingSampler +2024-08-06 03:36:24,875 INFO [datamodule.py:344] (2/8) About to create train dataloader +2024-08-06 03:36:24,876 INFO [datamodule.py:367] (2/8) About to create dev dataset +2024-08-06 03:36:25,205 INFO [datamodule.py:388] (2/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-36-20-3 b/libritts/log/log-train-2024-08-06-03-36-20-3 new file mode 100644 index 0000000000000000000000000000000000000000..1b701d6008cae8b872c74d46b6a6243fd12e1173 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-36-20-3 @@ -0,0 +1,14 @@ +2024-08-06 03:36:20,626 INFO [trainer.py:870] (3/8) Training started +2024-08-06 03:36:20,627 INFO [trainer.py:889] (3/8) Device: cuda:3 +2024-08-06 03:36:20,627 INFO [trainer.py:890] (3/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:36:20,627 INFO [trainer.py:892] (3/8) About to create model +2024-08-06 03:36:21,405 INFO [trainer.py:899] (3/8) Number of model parameters: 367386628 +2024-08-06 03:36:22,199 INFO [trainer.py:914] (3/8) Using DDP +2024-08-06 03:36:24,258 INFO [datamodule.py:427] (3/8) About to get train cuts +2024-08-06 03:36:24,259 INFO [datamodule.py:434] (3/8) About to get dev cuts +2024-08-06 03:36:24,260 INFO [datamodule.py:292] (3/8) Disable SpecAugment +2024-08-06 03:36:24,260 INFO [datamodule.py:294] (3/8) About to create train dataset +2024-08-06 03:36:24,261 INFO [datamodule.py:323] (3/8) Using DynamicBucketingSampler +2024-08-06 03:36:24,882 INFO [datamodule.py:344] (3/8) About to create train dataloader +2024-08-06 03:36:24,882 INFO [datamodule.py:367] (3/8) About to create dev dataset +2024-08-06 03:36:25,213 INFO [datamodule.py:388] (3/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-36-20-4 b/libritts/log/log-train-2024-08-06-03-36-20-4 new file mode 100644 index 0000000000000000000000000000000000000000..49e98666a5019ecfe2c7df005759b29f1a84367a --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-36-20-4 @@ -0,0 +1,14 @@ +2024-08-06 03:36:20,625 INFO [trainer.py:870] (4/8) Training started +2024-08-06 03:36:20,626 INFO [trainer.py:889] (4/8) Device: cuda:4 +2024-08-06 03:36:20,626 INFO [trainer.py:890] (4/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:36:20,626 INFO [trainer.py:892] (4/8) About to create model +2024-08-06 03:36:21,378 INFO [trainer.py:899] (4/8) Number of model parameters: 367386628 +2024-08-06 03:36:22,189 INFO [trainer.py:914] (4/8) Using DDP +2024-08-06 03:36:24,252 INFO [datamodule.py:427] (4/8) About to get train cuts +2024-08-06 03:36:24,254 INFO [datamodule.py:434] (4/8) About to get dev cuts +2024-08-06 03:36:24,255 INFO [datamodule.py:292] (4/8) Disable SpecAugment +2024-08-06 03:36:24,255 INFO [datamodule.py:294] (4/8) About to create train dataset +2024-08-06 03:36:24,256 INFO [datamodule.py:323] (4/8) Using DynamicBucketingSampler +2024-08-06 03:36:24,871 INFO [datamodule.py:344] (4/8) About to create train dataloader +2024-08-06 03:36:24,872 INFO [datamodule.py:367] (4/8) About to create dev dataset +2024-08-06 03:36:25,198 INFO [datamodule.py:388] (4/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-36-20-5 b/libritts/log/log-train-2024-08-06-03-36-20-5 new file mode 100644 index 0000000000000000000000000000000000000000..c1a3da0ea9df3c393260b5329f333e4ab6a9ad00 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-36-20-5 @@ -0,0 +1,14 @@ +2024-08-06 03:36:20,625 INFO [trainer.py:870] (5/8) Training started +2024-08-06 03:36:20,626 INFO [trainer.py:889] (5/8) Device: cuda:5 +2024-08-06 03:36:20,626 INFO [trainer.py:890] (5/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:36:20,627 INFO [trainer.py:892] (5/8) About to create model +2024-08-06 03:36:21,351 INFO [trainer.py:899] (5/8) Number of model parameters: 367386628 +2024-08-06 03:36:22,134 INFO [trainer.py:914] (5/8) Using DDP +2024-08-06 03:36:24,258 INFO [datamodule.py:427] (5/8) About to get train cuts +2024-08-06 03:36:24,259 INFO [datamodule.py:434] (5/8) About to get dev cuts +2024-08-06 03:36:24,260 INFO [datamodule.py:292] (5/8) Disable SpecAugment +2024-08-06 03:36:24,260 INFO [datamodule.py:294] (5/8) About to create train dataset +2024-08-06 03:36:24,261 INFO [datamodule.py:323] (5/8) Using DynamicBucketingSampler +2024-08-06 03:36:24,868 INFO [datamodule.py:344] (5/8) About to create train dataloader +2024-08-06 03:36:24,869 INFO [datamodule.py:367] (5/8) About to create dev dataset +2024-08-06 03:36:25,202 INFO [datamodule.py:388] (5/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-36-20-6 b/libritts/log/log-train-2024-08-06-03-36-20-6 new file mode 100644 index 0000000000000000000000000000000000000000..3441f1a9403587a4a0301a00620b02ef34c9c7b4 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-36-20-6 @@ -0,0 +1,14 @@ +2024-08-06 03:36:20,624 INFO [trainer.py:870] (6/8) Training started +2024-08-06 03:36:20,625 INFO [trainer.py:889] (6/8) Device: cuda:6 +2024-08-06 03:36:20,625 INFO [trainer.py:890] (6/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:36:20,625 INFO [trainer.py:892] (6/8) About to create model +2024-08-06 03:36:21,379 INFO [trainer.py:899] (6/8) Number of model parameters: 367386628 +2024-08-06 03:36:22,190 INFO [trainer.py:914] (6/8) Using DDP +2024-08-06 03:36:24,255 INFO [datamodule.py:427] (6/8) About to get train cuts +2024-08-06 03:36:24,256 INFO [datamodule.py:434] (6/8) About to get dev cuts +2024-08-06 03:36:24,258 INFO [datamodule.py:292] (6/8) Disable SpecAugment +2024-08-06 03:36:24,258 INFO [datamodule.py:294] (6/8) About to create train dataset +2024-08-06 03:36:24,258 INFO [datamodule.py:323] (6/8) Using DynamicBucketingSampler +2024-08-06 03:36:24,873 INFO [datamodule.py:344] (6/8) About to create train dataloader +2024-08-06 03:36:24,873 INFO [datamodule.py:367] (6/8) About to create dev dataset +2024-08-06 03:36:25,206 INFO [datamodule.py:388] (6/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-36-20-7 b/libritts/log/log-train-2024-08-06-03-36-20-7 new file mode 100644 index 0000000000000000000000000000000000000000..f6f18de9d4cf07fe8ff8d4a73829534ec1f360c1 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-36-20-7 @@ -0,0 +1,14 @@ +2024-08-06 03:36:20,582 INFO [trainer.py:870] (7/8) Training started +2024-08-06 03:36:20,583 INFO [trainer.py:889] (7/8) Device: cuda:7 +2024-08-06 03:36:20,583 INFO [trainer.py:890] (7/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:36:20,584 INFO [trainer.py:892] (7/8) About to create model +2024-08-06 03:36:21,333 INFO [trainer.py:899] (7/8) Number of model parameters: 367386628 +2024-08-06 03:36:22,159 INFO [trainer.py:914] (7/8) Using DDP +2024-08-06 03:36:24,257 INFO [datamodule.py:427] (7/8) About to get train cuts +2024-08-06 03:36:24,258 INFO [datamodule.py:434] (7/8) About to get dev cuts +2024-08-06 03:36:24,259 INFO [datamodule.py:292] (7/8) Disable SpecAugment +2024-08-06 03:36:24,259 INFO [datamodule.py:294] (7/8) About to create train dataset +2024-08-06 03:36:24,260 INFO [datamodule.py:323] (7/8) Using DynamicBucketingSampler +2024-08-06 03:36:24,866 INFO [datamodule.py:344] (7/8) About to create train dataloader +2024-08-06 03:36:24,867 INFO [datamodule.py:367] (7/8) About to create dev dataset +2024-08-06 03:36:25,192 INFO [datamodule.py:388] (7/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-38-55-0 b/libritts/log/log-train-2024-08-06-03-38-55-0 new file mode 100644 index 0000000000000000000000000000000000000000..9f0cd578adbcb233af5d4d0d48c40d5c428821f2 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-38-55-0 @@ -0,0 +1,14 @@ +2024-08-06 03:38:55,643 INFO [trainer.py:870] (0/8) Training started +2024-08-06 03:38:55,647 INFO [trainer.py:889] (0/8) Device: cuda:0 +2024-08-06 03:38:55,648 INFO [trainer.py:890] (0/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:38:55,648 INFO [trainer.py:892] (0/8) About to create model +2024-08-06 03:38:56,399 INFO [trainer.py:899] (0/8) Number of model parameters: 367386628 +2024-08-06 03:38:57,208 INFO [trainer.py:914] (0/8) Using DDP +2024-08-06 03:38:59,272 INFO [datamodule.py:427] (0/8) About to get train cuts +2024-08-06 03:38:59,274 INFO [datamodule.py:434] (0/8) About to get dev cuts +2024-08-06 03:38:59,275 INFO [datamodule.py:292] (0/8) Disable SpecAugment +2024-08-06 03:38:59,275 INFO [datamodule.py:294] (0/8) About to create train dataset +2024-08-06 03:38:59,276 INFO [datamodule.py:323] (0/8) Using DynamicBucketingSampler +2024-08-06 03:38:59,883 INFO [datamodule.py:344] (0/8) About to create train dataloader +2024-08-06 03:38:59,883 INFO [datamodule.py:367] (0/8) About to create dev dataset +2024-08-06 03:39:00,207 INFO [datamodule.py:388] (0/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-38-55-1 b/libritts/log/log-train-2024-08-06-03-38-55-1 new file mode 100644 index 0000000000000000000000000000000000000000..4910e3a4cfa5422dd20273333dc2a806ac0160b9 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-38-55-1 @@ -0,0 +1,14 @@ +2024-08-06 03:38:55,673 INFO [trainer.py:870] (1/8) Training started +2024-08-06 03:38:55,674 INFO [trainer.py:889] (1/8) Device: cuda:1 +2024-08-06 03:38:55,674 INFO [trainer.py:890] (1/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:38:55,674 INFO [trainer.py:892] (1/8) About to create model +2024-08-06 03:38:56,432 INFO [trainer.py:899] (1/8) Number of model parameters: 367386628 +2024-08-06 03:38:57,244 INFO [trainer.py:914] (1/8) Using DDP +2024-08-06 03:38:59,273 INFO [datamodule.py:427] (1/8) About to get train cuts +2024-08-06 03:38:59,274 INFO [datamodule.py:434] (1/8) About to get dev cuts +2024-08-06 03:38:59,275 INFO [datamodule.py:292] (1/8) Disable SpecAugment +2024-08-06 03:38:59,275 INFO [datamodule.py:294] (1/8) About to create train dataset +2024-08-06 03:38:59,276 INFO [datamodule.py:323] (1/8) Using DynamicBucketingSampler +2024-08-06 03:38:59,892 INFO [datamodule.py:344] (1/8) About to create train dataloader +2024-08-06 03:38:59,893 INFO [datamodule.py:367] (1/8) About to create dev dataset +2024-08-06 03:39:00,219 INFO [datamodule.py:388] (1/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-38-55-2 b/libritts/log/log-train-2024-08-06-03-38-55-2 new file mode 100644 index 0000000000000000000000000000000000000000..46aca48bc79c80998c4715cb5c383dcd3ac73a94 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-38-55-2 @@ -0,0 +1,14 @@ +2024-08-06 03:38:55,672 INFO [trainer.py:870] (2/8) Training started +2024-08-06 03:38:55,673 INFO [trainer.py:889] (2/8) Device: cuda:2 +2024-08-06 03:38:55,673 INFO [trainer.py:890] (2/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:38:55,673 INFO [trainer.py:892] (2/8) About to create model +2024-08-06 03:38:56,433 INFO [trainer.py:899] (2/8) Number of model parameters: 367386628 +2024-08-06 03:38:57,236 INFO [trainer.py:914] (2/8) Using DDP +2024-08-06 03:38:59,273 INFO [datamodule.py:427] (2/8) About to get train cuts +2024-08-06 03:38:59,275 INFO [datamodule.py:434] (2/8) About to get dev cuts +2024-08-06 03:38:59,275 INFO [datamodule.py:292] (2/8) Disable SpecAugment +2024-08-06 03:38:59,275 INFO [datamodule.py:294] (2/8) About to create train dataset +2024-08-06 03:38:59,276 INFO [datamodule.py:323] (2/8) Using DynamicBucketingSampler +2024-08-06 03:38:59,881 INFO [datamodule.py:344] (2/8) About to create train dataloader +2024-08-06 03:38:59,881 INFO [datamodule.py:367] (2/8) About to create dev dataset +2024-08-06 03:39:00,207 INFO [datamodule.py:388] (2/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-38-55-3 b/libritts/log/log-train-2024-08-06-03-38-55-3 new file mode 100644 index 0000000000000000000000000000000000000000..b827944e863739e9204e2df1b406224286953704 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-38-55-3 @@ -0,0 +1,14 @@ +2024-08-06 03:38:55,674 INFO [trainer.py:870] (3/8) Training started +2024-08-06 03:38:55,675 INFO [trainer.py:889] (3/8) Device: cuda:3 +2024-08-06 03:38:55,675 INFO [trainer.py:890] (3/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:38:55,675 INFO [trainer.py:892] (3/8) About to create model +2024-08-06 03:38:56,423 INFO [trainer.py:899] (3/8) Number of model parameters: 367386628 +2024-08-06 03:38:57,230 INFO [trainer.py:914] (3/8) Using DDP +2024-08-06 03:38:59,273 INFO [datamodule.py:427] (3/8) About to get train cuts +2024-08-06 03:38:59,275 INFO [datamodule.py:434] (3/8) About to get dev cuts +2024-08-06 03:38:59,276 INFO [datamodule.py:292] (3/8) Disable SpecAugment +2024-08-06 03:38:59,276 INFO [datamodule.py:294] (3/8) About to create train dataset +2024-08-06 03:38:59,277 INFO [datamodule.py:323] (3/8) Using DynamicBucketingSampler +2024-08-06 03:38:59,884 INFO [datamodule.py:344] (3/8) About to create train dataloader +2024-08-06 03:38:59,885 INFO [datamodule.py:367] (3/8) About to create dev dataset +2024-08-06 03:39:00,214 INFO [datamodule.py:388] (3/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-38-55-4 b/libritts/log/log-train-2024-08-06-03-38-55-4 new file mode 100644 index 0000000000000000000000000000000000000000..00deaf3e0a139561f9e3cc0e12390e806df56b80 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-38-55-4 @@ -0,0 +1,14 @@ +2024-08-06 03:38:55,673 INFO [trainer.py:870] (4/8) Training started +2024-08-06 03:38:55,674 INFO [trainer.py:889] (4/8) Device: cuda:4 +2024-08-06 03:38:55,674 INFO [trainer.py:890] (4/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:38:55,675 INFO [trainer.py:892] (4/8) About to create model +2024-08-06 03:38:56,398 INFO [trainer.py:899] (4/8) Number of model parameters: 367386628 +2024-08-06 03:38:57,174 INFO [trainer.py:914] (4/8) Using DDP +2024-08-06 03:38:59,269 INFO [datamodule.py:427] (4/8) About to get train cuts +2024-08-06 03:38:59,272 INFO [datamodule.py:434] (4/8) About to get dev cuts +2024-08-06 03:38:59,273 INFO [datamodule.py:292] (4/8) Disable SpecAugment +2024-08-06 03:38:59,273 INFO [datamodule.py:294] (4/8) About to create train dataset +2024-08-06 03:38:59,274 INFO [datamodule.py:323] (4/8) Using DynamicBucketingSampler +2024-08-06 03:38:59,880 INFO [datamodule.py:344] (4/8) About to create train dataloader +2024-08-06 03:38:59,881 INFO [datamodule.py:367] (4/8) About to create dev dataset +2024-08-06 03:39:00,207 INFO [datamodule.py:388] (4/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-38-55-5 b/libritts/log/log-train-2024-08-06-03-38-55-5 new file mode 100644 index 0000000000000000000000000000000000000000..38ad04a8e10669b29b59df491dca9aea13ccf791 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-38-55-5 @@ -0,0 +1,14 @@ +2024-08-06 03:38:55,624 INFO [trainer.py:870] (5/8) Training started +2024-08-06 03:38:55,625 INFO [trainer.py:889] (5/8) Device: cuda:5 +2024-08-06 03:38:55,626 INFO [trainer.py:890] (5/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:38:55,626 INFO [trainer.py:892] (5/8) About to create model +2024-08-06 03:38:56,387 INFO [trainer.py:899] (5/8) Number of model parameters: 367386628 +2024-08-06 03:38:57,232 INFO [trainer.py:914] (5/8) Using DDP +2024-08-06 03:38:59,273 INFO [datamodule.py:427] (5/8) About to get train cuts +2024-08-06 03:38:59,275 INFO [datamodule.py:434] (5/8) About to get dev cuts +2024-08-06 03:38:59,276 INFO [datamodule.py:292] (5/8) Disable SpecAugment +2024-08-06 03:38:59,276 INFO [datamodule.py:294] (5/8) About to create train dataset +2024-08-06 03:38:59,276 INFO [datamodule.py:323] (5/8) Using DynamicBucketingSampler +2024-08-06 03:38:59,895 INFO [datamodule.py:344] (5/8) About to create train dataloader +2024-08-06 03:38:59,895 INFO [datamodule.py:367] (5/8) About to create dev dataset +2024-08-06 03:39:00,223 INFO [datamodule.py:388] (5/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-38-55-6 b/libritts/log/log-train-2024-08-06-03-38-55-6 new file mode 100644 index 0000000000000000000000000000000000000000..05a8af8088b307a0ed154b4995158dbdb714037d --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-38-55-6 @@ -0,0 +1,14 @@ +2024-08-06 03:38:55,651 INFO [trainer.py:870] (6/8) Training started +2024-08-06 03:38:55,652 INFO [trainer.py:889] (6/8) Device: cuda:6 +2024-08-06 03:38:55,652 INFO [trainer.py:890] (6/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:38:55,652 INFO [trainer.py:892] (6/8) About to create model +2024-08-06 03:38:56,412 INFO [trainer.py:899] (6/8) Number of model parameters: 367386628 +2024-08-06 03:38:57,196 INFO [trainer.py:914] (6/8) Using DDP +2024-08-06 03:38:59,273 INFO [datamodule.py:427] (6/8) About to get train cuts +2024-08-06 03:38:59,275 INFO [datamodule.py:434] (6/8) About to get dev cuts +2024-08-06 03:38:59,275 INFO [datamodule.py:292] (6/8) Disable SpecAugment +2024-08-06 03:38:59,275 INFO [datamodule.py:294] (6/8) About to create train dataset +2024-08-06 03:38:59,276 INFO [datamodule.py:323] (6/8) Using DynamicBucketingSampler +2024-08-06 03:38:59,885 INFO [datamodule.py:344] (6/8) About to create train dataloader +2024-08-06 03:38:59,886 INFO [datamodule.py:367] (6/8) About to create dev dataset +2024-08-06 03:39:00,212 INFO [datamodule.py:388] (6/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-38-55-7 b/libritts/log/log-train-2024-08-06-03-38-55-7 new file mode 100644 index 0000000000000000000000000000000000000000..e8d2e876384e9e9d4522c69d691c34117472816f --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-38-55-7 @@ -0,0 +1,14 @@ +2024-08-06 03:38:55,625 INFO [trainer.py:870] (7/8) Training started +2024-08-06 03:38:55,626 INFO [trainer.py:889] (7/8) Device: cuda:7 +2024-08-06 03:38:55,626 INFO [trainer.py:890] (7/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:38:55,626 INFO [trainer.py:892] (7/8) About to create model +2024-08-06 03:38:56,368 INFO [trainer.py:899] (7/8) Number of model parameters: 367386628 +2024-08-06 03:38:57,198 INFO [trainer.py:914] (7/8) Using DDP +2024-08-06 03:38:59,273 INFO [datamodule.py:427] (7/8) About to get train cuts +2024-08-06 03:38:59,275 INFO [datamodule.py:434] (7/8) About to get dev cuts +2024-08-06 03:38:59,276 INFO [datamodule.py:292] (7/8) Disable SpecAugment +2024-08-06 03:38:59,276 INFO [datamodule.py:294] (7/8) About to create train dataset +2024-08-06 03:38:59,277 INFO [datamodule.py:323] (7/8) Using DynamicBucketingSampler +2024-08-06 03:38:59,884 INFO [datamodule.py:344] (7/8) About to create train dataloader +2024-08-06 03:38:59,884 INFO [datamodule.py:367] (7/8) About to create dev dataset +2024-08-06 03:39:00,212 INFO [datamodule.py:388] (7/8) About to create dev dataloader diff --git a/libritts/log/log-train-2024-08-06-03-39-40-0 b/libritts/log/log-train-2024-08-06-03-39-40-0 new file mode 100644 index 0000000000000000000000000000000000000000..49f2f0acd7592c048e7b1b7aec8c2dc14cbdc41d --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-39-40-0 @@ -0,0 +1,381 @@ +2024-08-06 03:39:40,356 INFO [trainer.py:870] (0/8) Training started +2024-08-06 03:39:40,361 INFO [trainer.py:889] (0/8) Device: cuda:0 +2024-08-06 03:39:40,361 INFO [trainer.py:890] (0/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:39:40,361 INFO [trainer.py:892] (0/8) About to create model +2024-08-06 03:39:41,123 INFO [trainer.py:899] (0/8) Number of model parameters: 367386628 +2024-08-06 03:39:41,894 INFO [trainer.py:914] (0/8) Using DDP +2024-08-06 03:39:44,001 INFO [datamodule.py:427] (0/8) About to get train cuts +2024-08-06 03:39:44,003 INFO [datamodule.py:434] (0/8) About to get dev cuts +2024-08-06 03:39:44,004 INFO [datamodule.py:292] (0/8) Disable SpecAugment +2024-08-06 03:39:44,004 INFO [datamodule.py:294] (0/8) About to create train dataset +2024-08-06 03:39:44,004 INFO [datamodule.py:323] (0/8) Using DynamicBucketingSampler +2024-08-06 03:39:44,636 INFO [datamodule.py:344] (0/8) About to create train dataloader +2024-08-06 03:39:44,637 INFO [datamodule.py:367] (0/8) About to create dev dataset +2024-08-06 03:39:44,977 INFO [datamodule.py:388] (0/8) About to create dev dataloader +2024-08-06 03:40:39,570 INFO [trainer.py:765] (0/8) Epoch 1, batch 100, train_loss[loss=4.115, ArTop10Accuracy=0.5062, over 14477.00 frames. ], tot_loss[loss=4.774, ArTop10Accuracy=0.3971, over 4773.61 frames. ], batch size: 61, lr: 2.25e-02 +2024-08-06 03:41:16,921 INFO [trainer.py:765] (0/8) Epoch 1, batch 200, train_loss[loss=3.944, ArTop10Accuracy=0.5332, over 13991.00 frames. ], tot_loss[loss=4.286, ArTop10Accuracy=0.4784, over 7781.74 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 03:41:57,950 INFO [trainer.py:765] (0/8) Epoch 1, batch 300, train_loss[loss=3.794, ArTop10Accuracy=0.5671, over 14114.00 frames. ], tot_loss[loss=4.077, ArTop10Accuracy=0.5126, over 9422.32 frames. ], batch size: 44, lr: 3.00e-02 +2024-08-06 03:42:33,079 INFO [trainer.py:765] (0/8) Epoch 1, batch 400, train_loss[loss=3.666, ArTop10Accuracy=0.5856, over 10228.00 frames. ], tot_loss[loss=3.938, ArTop10Accuracy=0.5357, over 10323.30 frames. ], batch size: 14, lr: 3.00e-02 +2024-08-06 03:43:11,270 INFO [trainer.py:765] (0/8) Epoch 1, batch 500, train_loss[loss=3.655, ArTop10Accuracy=0.5704, over 12481.00 frames. ], tot_loss[loss=3.83, ArTop10Accuracy=0.5534, over 10894.29 frames. ], batch size: 22, lr: 2.99e-02 +2024-08-06 03:43:46,592 INFO [trainer.py:765] (0/8) Epoch 1, batch 600, train_loss[loss=3.507, ArTop10Accuracy=0.6035, over 11709.00 frames. ], tot_loss[loss=3.741, ArTop10Accuracy=0.5685, over 11416.88 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 03:44:27,900 INFO [trainer.py:765] (0/8) Epoch 1, batch 700, train_loss[loss=3.642, ArTop10Accuracy=0.5851, over 10365.00 frames. ], tot_loss[loss=3.687, ArTop10Accuracy=0.5778, over 11552.26 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 03:45:01,514 INFO [trainer.py:765] (0/8) Epoch 1, batch 800, train_loss[loss=3.56, ArTop10Accuracy=0.5988, over 10184.00 frames. ], tot_loss[loss=3.636, ArTop10Accuracy=0.5871, over 11671.99 frames. ], batch size: 12, lr: 2.98e-02 +2024-08-06 03:45:32,557 INFO [trainer.py:765] (0/8) Epoch 1, batch 900, train_loss[loss=3.628, ArTop10Accuracy=0.5887, over 13127.00 frames. ], tot_loss[loss=3.588, ArTop10Accuracy=0.5963, over 11724.75 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 03:46:03,649 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-1000.pt +2024-08-06 03:46:07,682 INFO [trainer.py:765] (0/8) Epoch 1, batch 1000, train_loss[loss=3.561, ArTop10Accuracy=0.6028, over 13088.00 frames. ], tot_loss[loss=3.555, ArTop10Accuracy=0.6025, over 11917.89 frames. ], batch size: 27, lr: 2.97e-02 +2024-08-06 03:46:07,988 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 8.169e+01 1.565e+02 2.239e+02 3.485e+02 9.105e+03, threshold=4.478e+02, percent-clipped=0.0 +2024-08-06 03:46:38,612 INFO [trainer.py:765] (0/8) Epoch 1, batch 1100, train_loss[loss=3.407, ArTop10Accuracy=0.628, over 13767.00 frames. ], tot_loss[loss=3.528, ArTop10Accuracy=0.6074, over 11986.86 frames. ], batch size: 34, lr: 2.96e-02 +2024-08-06 03:47:08,745 INFO [trainer.py:765] (0/8) Epoch 1, batch 1200, train_loss[loss=3.476, ArTop10Accuracy=0.6176, over 12273.00 frames. ], tot_loss[loss=3.508, ArTop10Accuracy=0.6112, over 11918.03 frames. ], batch size: 98, lr: 2.96e-02 +2024-08-06 03:47:33,694 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 03:47:33,697 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-1.pt +2024-08-06 03:48:38,675 INFO [trainer.py:765] (0/8) Epoch 2, batch 100, train_loss[loss=3.512, ArTop10Accuracy=0.6097, over 14549.00 frames. ], tot_loss[loss=3.449, ArTop10Accuracy=0.6227, over 4791.40 frames. ], batch size: 61, lr: 2.90e-02 +2024-08-06 03:49:14,596 INFO [trainer.py:765] (0/8) Epoch 2, batch 200, train_loss[loss=3.348, ArTop10Accuracy=0.6383, over 13985.00 frames. ], tot_loss[loss=3.428, ArTop10Accuracy=0.6269, over 7797.72 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 03:49:56,519 INFO [trainer.py:765] (0/8) Epoch 2, batch 300, train_loss[loss=3.485, ArTop10Accuracy=0.6182, over 14325.00 frames. ], tot_loss[loss=3.418, ArTop10Accuracy=0.6287, over 9426.43 frames. ], batch size: 44, lr: 2.89e-02 +2024-08-06 03:50:32,000 INFO [trainer.py:765] (0/8) Epoch 2, batch 400, train_loss[loss=3.38, ArTop10Accuracy=0.6332, over 10121.00 frames. ], tot_loss[loss=3.408, ArTop10Accuracy=0.6308, over 10355.26 frames. ], batch size: 14, lr: 2.88e-02 +2024-08-06 03:51:17,109 INFO [trainer.py:765] (0/8) Epoch 2, batch 500, train_loss[loss=3.381, ArTop10Accuracy=0.6257, over 12257.00 frames. ], tot_loss[loss=3.401, ArTop10Accuracy=0.632, over 10944.40 frames. ], batch size: 22, lr: 2.87e-02 +2024-08-06 03:51:53,202 INFO [trainer.py:765] (0/8) Epoch 2, batch 600, train_loss[loss=3.191, ArTop10Accuracy=0.6737, over 11680.00 frames. ], tot_loss[loss=3.397, ArTop10Accuracy=0.6329, over 11458.95 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 03:52:38,993 INFO [trainer.py:765] (0/8) Epoch 2, batch 700, train_loss[loss=3.445, ArTop10Accuracy=0.616, over 10029.00 frames. ], tot_loss[loss=3.389, ArTop10Accuracy=0.6345, over 11598.56 frames. ], batch size: 12, lr: 2.85e-02 +2024-08-06 03:52:47,089 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-2000.pt +2024-08-06 03:52:50,254 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 03:52:56,023 INFO [trainer.py:811] (0/8) Epoch 2, validation: loss=3.327, ArTop10Accuracy=0.6492, over 1829298.00 frames. +2024-08-06 03:52:56,024 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29280MB +2024-08-06 03:52:56,542 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 8.181e+01 1.431e+02 1.849e+02 2.730e+02 2.344e+03, threshold=3.697e+02, percent-clipped=7.2 +2024-08-06 03:53:21,882 INFO [trainer.py:765] (0/8) Epoch 2, batch 800, train_loss[loss=3.419, ArTop10Accuracy=0.6281, over 10166.00 frames. ], tot_loss[loss=3.379, ArTop10Accuracy=0.6362, over 11712.71 frames. ], batch size: 12, lr: 2.84e-02 +2024-08-06 03:53:53,299 INFO [trainer.py:765] (0/8) Epoch 2, batch 900, train_loss[loss=3.226, ArTop10Accuracy=0.6545, over 12935.00 frames. ], tot_loss[loss=3.365, ArTop10Accuracy=0.6387, over 11751.69 frames. ], batch size: 27, lr: 2.83e-02 +2024-08-06 03:54:24,809 INFO [trainer.py:765] (0/8) Epoch 2, batch 1000, train_loss[loss=3.293, ArTop10Accuracy=0.6561, over 13065.00 frames. ], tot_loss[loss=3.361, ArTop10Accuracy=0.6399, over 11949.49 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 03:54:56,006 INFO [trainer.py:765] (0/8) Epoch 2, batch 1100, train_loss[loss=3.227, ArTop10Accuracy=0.6616, over 13740.00 frames. ], tot_loss[loss=3.359, ArTop10Accuracy=0.6402, over 11992.45 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 03:55:26,229 INFO [trainer.py:765] (0/8) Epoch 2, batch 1200, train_loss[loss=3.337, ArTop10Accuracy=0.6505, over 12307.00 frames. ], tot_loss[loss=3.354, ArTop10Accuracy=0.6413, over 11931.70 frames. ], batch size: 99, lr: 2.80e-02 +2024-08-06 03:55:51,293 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 03:55:51,296 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-2.pt +2024-08-06 03:57:04,102 INFO [trainer.py:765] (0/8) Epoch 3, batch 100, train_loss[loss=3.263, ArTop10Accuracy=0.6638, over 14527.00 frames. ], tot_loss[loss=3.307, ArTop10Accuracy=0.6505, over 4779.02 frames. ], batch size: 61, lr: 2.67e-02 +2024-08-06 03:57:50,980 INFO [trainer.py:765] (0/8) Epoch 3, batch 200, train_loss[loss=3.322, ArTop10Accuracy=0.6496, over 13716.00 frames. ], tot_loss[loss=3.295, ArTop10Accuracy=0.6534, over 7792.17 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 03:58:26,075 INFO [trainer.py:765] (0/8) Epoch 3, batch 300, train_loss[loss=3.167, ArTop10Accuracy=0.6692, over 14278.00 frames. ], tot_loss[loss=3.276, ArTop10Accuracy=0.6566, over 9431.63 frames. ], batch size: 44, lr: 2.64e-02 +2024-08-06 03:59:11,253 INFO [trainer.py:765] (0/8) Epoch 3, batch 400, train_loss[loss=3.069, ArTop10Accuracy=0.6916, over 10973.00 frames. ], tot_loss[loss=3.269, ArTop10Accuracy=0.6584, over 10353.81 frames. ], batch size: 15, lr: 2.63e-02 +2024-08-06 03:59:26,365 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-3000.pt +2024-08-06 03:59:29,674 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 8.720e+01 1.461e+02 1.775e+02 2.344e+02 9.150e+02, threshold=3.550e+02, percent-clipped=5.2 +2024-08-06 03:59:49,303 INFO [trainer.py:765] (0/8) Epoch 3, batch 500, train_loss[loss=3.139, ArTop10Accuracy=0.6852, over 12539.00 frames. ], tot_loss[loss=3.258, ArTop10Accuracy=0.6609, over 10927.22 frames. ], batch size: 22, lr: 2.62e-02 +2024-08-06 04:00:35,096 INFO [trainer.py:765] (0/8) Epoch 3, batch 600, train_loss[loss=3.245, ArTop10Accuracy=0.6688, over 11633.00 frames. ], tot_loss[loss=3.248, ArTop10Accuracy=0.6623, over 11458.63 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 04:01:22,058 INFO [trainer.py:765] (0/8) Epoch 3, batch 700, train_loss[loss=3.032, ArTop10Accuracy=0.7001, over 10188.00 frames. ], tot_loss[loss=3.241, ArTop10Accuracy=0.6634, over 11600.56 frames. ], batch size: 12, lr: 2.60e-02 +2024-08-06 04:01:56,269 INFO [trainer.py:765] (0/8) Epoch 3, batch 800, train_loss[loss=3.122, ArTop10Accuracy=0.6856, over 10334.00 frames. ], tot_loss[loss=3.225, ArTop10Accuracy=0.6665, over 11685.71 frames. ], batch size: 12, lr: 2.59e-02 +2024-08-06 04:02:27,741 INFO [trainer.py:765] (0/8) Epoch 3, batch 900, train_loss[loss=3.193, ArTop10Accuracy=0.6785, over 12852.00 frames. ], tot_loss[loss=3.204, ArTop10Accuracy=0.6708, over 11736.38 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 04:02:59,283 INFO [trainer.py:765] (0/8) Epoch 3, batch 1000, train_loss[loss=3.144, ArTop10Accuracy=0.6877, over 13191.00 frames. ], tot_loss[loss=3.2, ArTop10Accuracy=0.6719, over 11929.19 frames. ], batch size: 27, lr: 2.56e-02 +2024-08-06 04:03:30,942 INFO [trainer.py:765] (0/8) Epoch 3, batch 1100, train_loss[loss=3.19, ArTop10Accuracy=0.6685, over 13637.00 frames. ], tot_loss[loss=3.192, ArTop10Accuracy=0.6736, over 11995.14 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 04:04:01,312 INFO [trainer.py:765] (0/8) Epoch 3, batch 1200, train_loss[loss=3.308, ArTop10Accuracy=0.6544, over 12230.00 frames. ], tot_loss[loss=3.183, ArTop10Accuracy=0.6752, over 11923.50 frames. ], batch size: 97, lr: 2.54e-02 +2024-08-06 04:04:26,675 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 04:04:26,677 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-3.pt +2024-08-06 04:05:43,369 INFO [trainer.py:765] (0/8) Epoch 4, batch 100, train_loss[loss=3.056, ArTop10Accuracy=0.6973, over 14180.00 frames. ], tot_loss[loss=3.14, ArTop10Accuracy=0.6851, over 4786.19 frames. ], batch size: 61, lr: 2.38e-02 +2024-08-06 04:06:07,078 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-4000.pt +2024-08-06 04:06:10,595 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 04:06:16,404 INFO [trainer.py:811] (0/8) Epoch 4, validation: loss=3.063, ArTop10Accuracy=0.7031, over 1829298.00 frames. +2024-08-06 04:06:16,404 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29681MB +2024-08-06 04:06:16,746 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.493e+02 1.709e+02 2.068e+02 7.969e+02, threshold=3.418e+02, percent-clipped=2.9 +2024-08-06 04:06:31,826 INFO [trainer.py:765] (0/8) Epoch 4, batch 200, train_loss[loss=3.064, ArTop10Accuracy=0.7017, over 13834.00 frames. ], tot_loss[loss=3.13, ArTop10Accuracy=0.6866, over 7801.49 frames. ], batch size: 34, lr: 2.37e-02 +2024-08-06 04:07:18,545 INFO [trainer.py:765] (0/8) Epoch 4, batch 300, train_loss[loss=3.176, ArTop10Accuracy=0.6745, over 14344.00 frames. ], tot_loss[loss=3.121, ArTop10Accuracy=0.6879, over 9440.54 frames. ], batch size: 44, lr: 2.36e-02 +2024-08-06 04:08:01,910 INFO [trainer.py:765] (0/8) Epoch 4, batch 400, train_loss[loss=3.104, ArTop10Accuracy=0.6971, over 11013.00 frames. ], tot_loss[loss=3.115, ArTop10Accuracy=0.6889, over 10362.70 frames. ], batch size: 15, lr: 2.34e-02 +2024-08-06 04:08:45,344 INFO [trainer.py:765] (0/8) Epoch 4, batch 500, train_loss[loss=2.908, ArTop10Accuracy=0.7181, over 12354.00 frames. ], tot_loss[loss=3.112, ArTop10Accuracy=0.6894, over 10930.04 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 04:09:37,072 INFO [trainer.py:765] (0/8) Epoch 4, batch 600, train_loss[loss=3.091, ArTop10Accuracy=0.697, over 11648.00 frames. ], tot_loss[loss=3.111, ArTop10Accuracy=0.6895, over 11445.30 frames. ], batch size: 18, lr: 2.32e-02 +2024-08-06 04:10:13,501 INFO [trainer.py:765] (0/8) Epoch 4, batch 700, train_loss[loss=2.829, ArTop10Accuracy=0.7409, over 10140.00 frames. ], tot_loss[loss=3.115, ArTop10Accuracy=0.6885, over 11581.61 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 04:10:51,959 INFO [trainer.py:765] (0/8) Epoch 4, batch 800, train_loss[loss=3.144, ArTop10Accuracy=0.6847, over 10271.00 frames. ], tot_loss[loss=3.121, ArTop10Accuracy=0.6873, over 11687.48 frames. ], batch size: 12, lr: 2.30e-02 +2024-08-06 04:11:23,330 INFO [trainer.py:765] (0/8) Epoch 4, batch 900, train_loss[loss=2.983, ArTop10Accuracy=0.712, over 12966.00 frames. ], tot_loss[loss=3.104, ArTop10Accuracy=0.6906, over 11732.98 frames. ], batch size: 27, lr: 2.29e-02 +2024-08-06 04:11:54,826 INFO [trainer.py:765] (0/8) Epoch 4, batch 1000, train_loss[loss=3.082, ArTop10Accuracy=0.6988, over 12857.00 frames. ], tot_loss[loss=3.104, ArTop10Accuracy=0.691, over 11938.75 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 04:12:25,960 INFO [trainer.py:765] (0/8) Epoch 4, batch 1100, train_loss[loss=3.137, ArTop10Accuracy=0.683, over 13604.00 frames. ], tot_loss[loss=3.105, ArTop10Accuracy=0.6909, over 11980.29 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 04:12:45,699 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-5000.pt +2024-08-06 04:12:48,544 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.106e+02 1.440e+02 1.608e+02 1.893e+02 7.925e+02, threshold=3.216e+02, percent-clipped=2.0 +2024-08-06 04:12:58,828 INFO [trainer.py:765] (0/8) Epoch 4, batch 1200, train_loss[loss=3.254, ArTop10Accuracy=0.6703, over 12869.00 frames. ], tot_loss[loss=3.102, ArTop10Accuracy=0.6914, over 11925.65 frames. ], batch size: 97, lr: 2.25e-02 +2024-08-06 04:13:23,893 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 04:13:23,895 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-4.pt +2024-08-06 04:14:38,685 INFO [trainer.py:765] (0/8) Epoch 5, batch 100, train_loss[loss=3.128, ArTop10Accuracy=0.6899, over 14565.00 frames. ], tot_loss[loss=3.067, ArTop10Accuracy=0.699, over 4772.29 frames. ], batch size: 61, lr: 2.10e-02 +2024-08-06 04:15:26,826 INFO [trainer.py:765] (0/8) Epoch 5, batch 200, train_loss[loss=2.968, ArTop10Accuracy=0.719, over 13647.00 frames. ], tot_loss[loss=3.056, ArTop10Accuracy=0.7015, over 7783.15 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 04:16:08,011 INFO [trainer.py:765] (0/8) Epoch 5, batch 300, train_loss[loss=3.154, ArTop10Accuracy=0.6844, over 14238.00 frames. ], tot_loss[loss=3.056, ArTop10Accuracy=0.7015, over 9403.87 frames. ], batch size: 44, lr: 2.08e-02 +2024-08-06 04:16:53,133 INFO [trainer.py:765] (0/8) Epoch 5, batch 400, train_loss[loss=3.091, ArTop10Accuracy=0.6908, over 10385.00 frames. ], tot_loss[loss=3.056, ArTop10Accuracy=0.7008, over 10321.36 frames. ], batch size: 14, lr: 2.07e-02 +2024-08-06 04:17:36,638 INFO [trainer.py:765] (0/8) Epoch 5, batch 500, train_loss[loss=3.144, ArTop10Accuracy=0.6915, over 12162.00 frames. ], tot_loss[loss=3.052, ArTop10Accuracy=0.7013, over 10879.21 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 04:18:22,113 INFO [trainer.py:765] (0/8) Epoch 5, batch 600, train_loss[loss=3.041, ArTop10Accuracy=0.7091, over 11478.00 frames. ], tot_loss[loss=3.058, ArTop10Accuracy=0.6998, over 11418.18 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 04:19:17,033 INFO [trainer.py:765] (0/8) Epoch 5, batch 700, train_loss[loss=3.009, ArTop10Accuracy=0.7107, over 10038.00 frames. ], tot_loss[loss=3.061, ArTop10Accuracy=0.6994, over 11585.15 frames. ], batch size: 12, lr: 2.04e-02 +2024-08-06 04:19:51,066 INFO [trainer.py:765] (0/8) Epoch 5, batch 800, train_loss[loss=3.071, ArTop10Accuracy=0.6936, over 10125.00 frames. ], tot_loss[loss=3.06, ArTop10Accuracy=0.6995, over 11685.86 frames. ], batch size: 12, lr: 2.03e-02 +2024-08-06 04:20:18,214 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-6000.pt +2024-08-06 04:20:21,954 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 04:20:27,475 INFO [trainer.py:811] (0/8) Epoch 5, validation: loss=2.998, ArTop10Accuracy=0.7157, over 1829298.00 frames. +2024-08-06 04:20:27,476 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30166MB +2024-08-06 04:20:27,781 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.057e+02 1.385e+02 1.542e+02 1.759e+02 7.741e+02, threshold=3.083e+02, percent-clipped=0.7 +2024-08-06 04:20:31,767 INFO [trainer.py:765] (0/8) Epoch 5, batch 900, train_loss[loss=3.019, ArTop10Accuracy=0.709, over 13026.00 frames. ], tot_loss[loss=3.053, ArTop10Accuracy=0.7007, over 11740.81 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 04:21:03,306 INFO [trainer.py:765] (0/8) Epoch 5, batch 1000, train_loss[loss=3.032, ArTop10Accuracy=0.7024, over 12918.00 frames. ], tot_loss[loss=3.053, ArTop10Accuracy=0.701, over 11956.52 frames. ], batch size: 27, lr: 2.01e-02 +2024-08-06 04:21:34,451 INFO [trainer.py:765] (0/8) Epoch 5, batch 1100, train_loss[loss=2.969, ArTop10Accuracy=0.7173, over 13590.00 frames. ], tot_loss[loss=3.055, ArTop10Accuracy=0.7007, over 12007.67 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 04:22:04,752 INFO [trainer.py:765] (0/8) Epoch 5, batch 1200, train_loss[loss=3.204, ArTop10Accuracy=0.6734, over 12969.00 frames. ], tot_loss[loss=3.059, ArTop10Accuracy=0.7002, over 11947.57 frames. ], batch size: 99, lr: 1.99e-02 +2024-08-06 04:22:30,431 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 04:22:30,435 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-5.pt +2024-08-06 04:23:46,282 INFO [trainer.py:765] (0/8) Epoch 6, batch 100, train_loss[loss=3.133, ArTop10Accuracy=0.6927, over 14358.00 frames. ], tot_loss[loss=3.013, ArTop10Accuracy=0.7093, over 4797.85 frames. ], batch size: 61, lr: 1.85e-02 +2024-08-06 04:24:35,256 INFO [trainer.py:765] (0/8) Epoch 6, batch 200, train_loss[loss=3.1, ArTop10Accuracy=0.6943, over 13607.00 frames. ], tot_loss[loss=3.012, ArTop10Accuracy=0.7099, over 7796.71 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 04:25:16,676 INFO [trainer.py:765] (0/8) Epoch 6, batch 300, train_loss[loss=3.032, ArTop10Accuracy=0.706, over 14227.00 frames. ], tot_loss[loss=3.015, ArTop10Accuracy=0.709, over 9423.91 frames. ], batch size: 44, lr: 1.83e-02 +2024-08-06 04:26:08,924 INFO [trainer.py:765] (0/8) Epoch 6, batch 400, train_loss[loss=2.856, ArTop10Accuracy=0.7436, over 10157.00 frames. ], tot_loss[loss=3.016, ArTop10Accuracy=0.7092, over 10328.64 frames. ], batch size: 14, lr: 1.83e-02 +2024-08-06 04:26:51,485 INFO [trainer.py:765] (0/8) Epoch 6, batch 500, train_loss[loss=2.92, ArTop10Accuracy=0.73, over 12394.00 frames. ], tot_loss[loss=3.014, ArTop10Accuracy=0.7096, over 10896.45 frames. ], batch size: 22, lr: 1.82e-02 +2024-08-06 04:27:39,298 INFO [trainer.py:765] (0/8) Epoch 6, batch 600, train_loss[loss=3.119, ArTop10Accuracy=0.6874, over 11516.00 frames. ], tot_loss[loss=3.02, ArTop10Accuracy=0.7085, over 11416.22 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 04:27:41,654 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-7000.pt +2024-08-06 04:27:46,369 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.343e+02 1.474e+02 1.660e+02 8.574e+02, threshold=2.947e+02, percent-clipped=0.6 +2024-08-06 04:28:33,240 INFO [trainer.py:765] (0/8) Epoch 6, batch 700, train_loss[loss=3.091, ArTop10Accuracy=0.6931, over 10181.00 frames. ], tot_loss[loss=3.022, ArTop10Accuracy=0.7079, over 11583.44 frames. ], batch size: 12, lr: 1.80e-02 +2024-08-06 04:29:11,216 INFO [trainer.py:765] (0/8) Epoch 6, batch 800, train_loss[loss=2.962, ArTop10Accuracy=0.7174, over 10098.00 frames. ], tot_loss[loss=3.025, ArTop10Accuracy=0.7068, over 11703.27 frames. ], batch size: 12, lr: 1.79e-02 +2024-08-06 04:29:42,751 INFO [trainer.py:765] (0/8) Epoch 6, batch 900, train_loss[loss=2.938, ArTop10Accuracy=0.7197, over 13251.00 frames. ], tot_loss[loss=3.024, ArTop10Accuracy=0.7069, over 11757.31 frames. ], batch size: 28, lr: 1.78e-02 +2024-08-06 04:30:14,306 INFO [trainer.py:765] (0/8) Epoch 6, batch 1000, train_loss[loss=3.008, ArTop10Accuracy=0.7192, over 12801.00 frames. ], tot_loss[loss=3.026, ArTop10Accuracy=0.7069, over 11964.72 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 04:30:45,383 INFO [trainer.py:765] (0/8) Epoch 6, batch 1100, train_loss[loss=3.029, ArTop10Accuracy=0.703, over 13928.00 frames. ], tot_loss[loss=3.03, ArTop10Accuracy=0.7059, over 11998.77 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 04:31:15,674 INFO [trainer.py:765] (0/8) Epoch 6, batch 1200, train_loss[loss=3.148, ArTop10Accuracy=0.6807, over 13222.00 frames. ], tot_loss[loss=3.03, ArTop10Accuracy=0.7057, over 11968.48 frames. ], batch size: 98, lr: 1.76e-02 +2024-08-06 04:31:40,595 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 04:31:40,598 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-6.pt +2024-08-06 04:32:52,405 INFO [trainer.py:765] (0/8) Epoch 7, batch 100, train_loss[loss=3.019, ArTop10Accuracy=0.7107, over 14649.00 frames. ], tot_loss[loss=2.98, ArTop10Accuracy=0.7165, over 4784.46 frames. ], batch size: 62, lr: 1.64e-02 +2024-08-06 04:33:38,223 INFO [trainer.py:765] (0/8) Epoch 7, batch 200, train_loss[loss=2.923, ArTop10Accuracy=0.7315, over 13779.00 frames. ], tot_loss[loss=2.983, ArTop10Accuracy=0.7158, over 7796.21 frames. ], batch size: 34, lr: 1.64e-02 +2024-08-06 04:34:22,609 INFO [trainer.py:765] (0/8) Epoch 7, batch 300, train_loss[loss=3.033, ArTop10Accuracy=0.7063, over 14454.00 frames. ], tot_loss[loss=2.982, ArTop10Accuracy=0.7162, over 9419.13 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 04:34:36,847 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-8000.pt +2024-08-06 04:34:39,927 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 04:34:45,809 INFO [trainer.py:811] (0/8) Epoch 7, validation: loss=2.963, ArTop10Accuracy=0.7233, over 1829298.00 frames. +2024-08-06 04:34:45,809 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30166MB +2024-08-06 04:34:46,124 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.306e+02 1.435e+02 1.599e+02 8.689e+02, threshold=2.871e+02, percent-clipped=0.9 +2024-08-06 04:35:17,147 INFO [trainer.py:765] (0/8) Epoch 7, batch 400, train_loss[loss=3.007, ArTop10Accuracy=0.7141, over 10939.00 frames. ], tot_loss[loss=2.984, ArTop10Accuracy=0.7154, over 10333.36 frames. ], batch size: 15, lr: 1.62e-02 +2024-08-06 04:36:01,711 INFO [trainer.py:765] (0/8) Epoch 7, batch 500, train_loss[loss=3.046, ArTop10Accuracy=0.7014, over 12358.00 frames. ], tot_loss[loss=2.983, ArTop10Accuracy=0.7154, over 10900.99 frames. ], batch size: 22, lr: 1.61e-02 +2024-08-06 04:36:48,812 INFO [trainer.py:765] (0/8) Epoch 7, batch 600, train_loss[loss=2.922, ArTop10Accuracy=0.7226, over 11642.00 frames. ], tot_loss[loss=2.991, ArTop10Accuracy=0.7138, over 11416.97 frames. ], batch size: 18, lr: 1.61e-02 +2024-08-06 04:37:34,800 INFO [trainer.py:765] (0/8) Epoch 7, batch 700, train_loss[loss=2.882, ArTop10Accuracy=0.7363, over 10099.00 frames. ], tot_loss[loss=2.993, ArTop10Accuracy=0.7129, over 11576.53 frames. ], batch size: 12, lr: 1.60e-02 +2024-08-06 04:38:13,614 INFO [trainer.py:765] (0/8) Epoch 7, batch 800, train_loss[loss=3.023, ArTop10Accuracy=0.7031, over 10117.00 frames. ], tot_loss[loss=2.998, ArTop10Accuracy=0.7118, over 11686.69 frames. ], batch size: 12, lr: 1.59e-02 +2024-08-06 04:38:45,110 INFO [trainer.py:765] (0/8) Epoch 7, batch 900, train_loss[loss=2.892, ArTop10Accuracy=0.7333, over 12890.00 frames. ], tot_loss[loss=2.985, ArTop10Accuracy=0.714, over 11734.77 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 04:39:16,575 INFO [trainer.py:765] (0/8) Epoch 7, batch 1000, train_loss[loss=2.88, ArTop10Accuracy=0.7392, over 12922.00 frames. ], tot_loss[loss=2.992, ArTop10Accuracy=0.7132, over 11960.08 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 04:39:47,571 INFO [trainer.py:765] (0/8) Epoch 7, batch 1100, train_loss[loss=3.04, ArTop10Accuracy=0.7042, over 13794.00 frames. ], tot_loss[loss=2.997, ArTop10Accuracy=0.7125, over 11992.82 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 04:40:17,990 INFO [trainer.py:765] (0/8) Epoch 7, batch 1200, train_loss[loss=3.169, ArTop10Accuracy=0.6834, over 11938.00 frames. ], tot_loss[loss=3, ArTop10Accuracy=0.7122, over 11941.88 frames. ], batch size: 99, lr: 1.57e-02 +2024-08-06 04:40:43,386 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 04:40:43,389 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-7.pt +2024-08-06 04:41:34,479 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-9000.pt +2024-08-06 04:41:37,491 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 9.816e+01 1.295e+02 1.411e+02 1.574e+02 4.953e+02, threshold=2.821e+02, percent-clipped=1.1 +2024-08-06 04:41:58,371 INFO [trainer.py:765] (0/8) Epoch 8, batch 100, train_loss[loss=2.969, ArTop10Accuracy=0.7203, over 14773.00 frames. ], tot_loss[loss=2.966, ArTop10Accuracy=0.7197, over 4791.07 frames. ], batch size: 61, lr: 1.47e-02 +2024-08-06 04:42:44,986 INFO [trainer.py:765] (0/8) Epoch 8, batch 200, train_loss[loss=2.851, ArTop10Accuracy=0.7433, over 13565.00 frames. ], tot_loss[loss=2.968, ArTop10Accuracy=0.7191, over 7794.06 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 04:43:28,045 INFO [trainer.py:765] (0/8) Epoch 8, batch 300, train_loss[loss=3.05, ArTop10Accuracy=0.7073, over 14269.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7203, over 9405.56 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 04:44:14,461 INFO [trainer.py:765] (0/8) Epoch 8, batch 400, train_loss[loss=3.076, ArTop10Accuracy=0.6892, over 10975.00 frames. ], tot_loss[loss=2.966, ArTop10Accuracy=0.7194, over 10330.46 frames. ], batch size: 15, lr: 1.45e-02 +2024-08-06 04:45:00,692 INFO [trainer.py:765] (0/8) Epoch 8, batch 500, train_loss[loss=2.887, ArTop10Accuracy=0.7299, over 12258.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7199, over 10893.93 frames. ], batch size: 22, lr: 1.45e-02 +2024-08-06 04:45:45,393 INFO [trainer.py:765] (0/8) Epoch 8, batch 600, train_loss[loss=2.882, ArTop10Accuracy=0.7336, over 11537.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7193, over 11429.99 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 04:46:34,038 INFO [trainer.py:765] (0/8) Epoch 8, batch 700, train_loss[loss=2.913, ArTop10Accuracy=0.7296, over 9320.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.7189, over 11568.50 frames. ], batch size: 11, lr: 1.43e-02 +2024-08-06 04:47:10,207 INFO [trainer.py:765] (0/8) Epoch 8, batch 800, train_loss[loss=2.918, ArTop10Accuracy=0.7321, over 10097.00 frames. ], tot_loss[loss=2.969, ArTop10Accuracy=0.7178, over 11680.89 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 04:47:41,605 INFO [trainer.py:765] (0/8) Epoch 8, batch 900, train_loss[loss=2.832, ArTop10Accuracy=0.7399, over 12865.00 frames. ], tot_loss[loss=2.96, ArTop10Accuracy=0.7193, over 11730.79 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:13,033 INFO [trainer.py:765] (0/8) Epoch 8, batch 1000, train_loss[loss=2.932, ArTop10Accuracy=0.7198, over 12777.00 frames. ], tot_loss[loss=2.968, ArTop10Accuracy=0.7179, over 11920.09 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:28,826 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-10000.pt +2024-08-06 04:48:31,904 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 04:48:37,663 INFO [trainer.py:811] (0/8) Epoch 8, validation: loss=2.946, ArTop10Accuracy=0.7266, over 1829298.00 frames. +2024-08-06 04:48:37,664 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30166MB +2024-08-06 04:48:37,951 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.289e+02 1.393e+02 1.532e+02 3.557e+02, threshold=2.786e+02, percent-clipped=0.2 +2024-08-06 04:48:52,932 INFO [trainer.py:765] (0/8) Epoch 8, batch 1100, train_loss[loss=2.994, ArTop10Accuracy=0.7165, over 13708.00 frames. ], tot_loss[loss=2.977, ArTop10Accuracy=0.7162, over 11999.59 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 04:49:23,202 INFO [trainer.py:765] (0/8) Epoch 8, batch 1200, train_loss[loss=3.071, ArTop10Accuracy=0.6969, over 12970.00 frames. ], tot_loss[loss=2.982, ArTop10Accuracy=0.7159, over 11928.39 frames. ], batch size: 97, lr: 1.40e-02 +2024-08-06 04:49:49,140 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 04:49:49,143 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-8.pt +2024-08-06 04:51:01,547 INFO [trainer.py:765] (0/8) Epoch 9, batch 100, train_loss[loss=2.966, ArTop10Accuracy=0.7242, over 14839.00 frames. ], tot_loss[loss=2.942, ArTop10Accuracy=0.7244, over 4775.07 frames. ], batch size: 62, lr: 1.32e-02 +2024-08-06 04:51:45,414 INFO [trainer.py:765] (0/8) Epoch 9, batch 200, train_loss[loss=3.012, ArTop10Accuracy=0.7159, over 13736.00 frames. ], tot_loss[loss=2.938, ArTop10Accuracy=0.7251, over 7778.10 frames. ], batch size: 34, lr: 1.32e-02 +2024-08-06 04:52:29,082 INFO [trainer.py:765] (0/8) Epoch 9, batch 300, train_loss[loss=2.872, ArTop10Accuracy=0.7323, over 14581.00 frames. ], tot_loss[loss=2.936, ArTop10Accuracy=0.7253, over 9412.20 frames. ], batch size: 44, lr: 1.31e-02 +2024-08-06 04:53:16,431 INFO [trainer.py:765] (0/8) Epoch 9, batch 400, train_loss[loss=2.944, ArTop10Accuracy=0.721, over 11103.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.725, over 10325.61 frames. ], batch size: 15, lr: 1.31e-02 +2024-08-06 04:53:58,143 INFO [trainer.py:765] (0/8) Epoch 9, batch 500, train_loss[loss=3.011, ArTop10Accuracy=0.7143, over 12100.00 frames. ], tot_loss[loss=2.943, ArTop10Accuracy=0.7242, over 10915.61 frames. ], batch size: 22, lr: 1.30e-02 +2024-08-06 04:54:51,077 INFO [trainer.py:765] (0/8) Epoch 9, batch 600, train_loss[loss=2.959, ArTop10Accuracy=0.716, over 11554.00 frames. ], tot_loss[loss=2.941, ArTop10Accuracy=0.7236, over 11424.40 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 04:55:34,400 INFO [trainer.py:765] (0/8) Epoch 9, batch 700, train_loss[loss=2.819, ArTop10Accuracy=0.7445, over 9911.00 frames. ], tot_loss[loss=2.949, ArTop10Accuracy=0.722, over 11549.12 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 04:56:01,421 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-11000.pt +2024-08-06 04:56:04,573 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.257e+02 1.367e+02 1.507e+02 8.820e+02, threshold=2.735e+02, percent-clipped=0.5 +2024-08-06 04:56:13,597 INFO [trainer.py:765] (0/8) Epoch 9, batch 800, train_loss[loss=2.794, ArTop10Accuracy=0.756, over 10111.00 frames. ], tot_loss[loss=2.954, ArTop10Accuracy=0.7212, over 11662.03 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 04:56:44,975 INFO [trainer.py:765] (0/8) Epoch 9, batch 900, train_loss[loss=2.956, ArTop10Accuracy=0.7281, over 12998.00 frames. ], tot_loss[loss=2.947, ArTop10Accuracy=0.7224, over 11722.20 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:16,491 INFO [trainer.py:765] (0/8) Epoch 9, batch 1000, train_loss[loss=2.988, ArTop10Accuracy=0.711, over 13043.00 frames. ], tot_loss[loss=2.952, ArTop10Accuracy=0.7215, over 11938.11 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:47,657 INFO [trainer.py:765] (0/8) Epoch 9, batch 1100, train_loss[loss=3.061, ArTop10Accuracy=0.6999, over 13675.00 frames. ], tot_loss[loss=2.958, ArTop10Accuracy=0.7204, over 11999.12 frames. ], batch size: 34, lr: 1.27e-02 +2024-08-06 04:58:18,094 INFO [trainer.py:765] (0/8) Epoch 9, batch 1200, train_loss[loss=3.116, ArTop10Accuracy=0.6897, over 12997.00 frames. ], tot_loss[loss=2.955, ArTop10Accuracy=0.7208, over 11961.99 frames. ], batch size: 99, lr: 1.27e-02 +2024-08-06 04:58:43,366 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 04:58:43,369 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-9.pt +2024-08-06 04:59:52,749 INFO [trainer.py:765] (0/8) Epoch 10, batch 100, train_loss[loss=2.958, ArTop10Accuracy=0.7209, over 14615.00 frames. ], tot_loss[loss=2.924, ArTop10Accuracy=0.7278, over 4781.40 frames. ], batch size: 61, lr: 1.20e-02 +2024-08-06 05:00:43,729 INFO [trainer.py:765] (0/8) Epoch 10, batch 200, train_loss[loss=2.939, ArTop10Accuracy=0.7309, over 13773.00 frames. ], tot_loss[loss=2.922, ArTop10Accuracy=0.7284, over 7792.17 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 05:01:20,592 INFO [trainer.py:765] (0/8) Epoch 10, batch 300, train_loss[loss=2.999, ArTop10Accuracy=0.712, over 14139.00 frames. ], tot_loss[loss=2.914, ArTop10Accuracy=0.7294, over 9407.34 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 05:02:10,048 INFO [trainer.py:765] (0/8) Epoch 10, batch 400, train_loss[loss=2.946, ArTop10Accuracy=0.7272, over 10816.00 frames. ], tot_loss[loss=2.918, ArTop10Accuracy=0.7286, over 10322.17 frames. ], batch size: 15, lr: 1.19e-02 +2024-08-06 05:02:46,487 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-12000.pt +2024-08-06 05:02:49,613 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 05:02:55,377 INFO [trainer.py:811] (0/8) Epoch 10, validation: loss=2.927, ArTop10Accuracy=0.7304, over 1829298.00 frames. +2024-08-06 05:02:55,378 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30166MB +2024-08-06 05:02:55,728 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.269e+02 1.367e+02 1.518e+02 4.405e+02, threshold=2.733e+02, percent-clipped=0.4 +2024-08-06 05:02:58,361 INFO [trainer.py:765] (0/8) Epoch 10, batch 500, train_loss[loss=2.952, ArTop10Accuracy=0.7206, over 12376.00 frames. ], tot_loss[loss=2.917, ArTop10Accuracy=0.7287, over 10889.85 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 05:03:48,229 INFO [trainer.py:765] (0/8) Epoch 10, batch 600, train_loss[loss=2.982, ArTop10Accuracy=0.7126, over 11661.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.7269, over 11429.77 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 05:04:36,715 INFO [trainer.py:765] (0/8) Epoch 10, batch 700, train_loss[loss=2.898, ArTop10Accuracy=0.7335, over 9294.00 frames. ], tot_loss[loss=2.933, ArTop10Accuracy=0.7257, over 11585.14 frames. ], batch size: 11, lr: 1.18e-02 +2024-08-06 05:05:10,725 INFO [trainer.py:765] (0/8) Epoch 10, batch 800, train_loss[loss=2.655, ArTop10Accuracy=0.7708, over 9951.00 frames. ], tot_loss[loss=2.934, ArTop10Accuracy=0.7254, over 11695.16 frames. ], batch size: 12, lr: 1.17e-02 +2024-08-06 05:05:42,245 INFO [trainer.py:765] (0/8) Epoch 10, batch 900, train_loss[loss=2.921, ArTop10Accuracy=0.7253, over 13015.00 frames. ], tot_loss[loss=2.926, ArTop10Accuracy=0.7267, over 11744.64 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 05:06:13,843 INFO [trainer.py:765] (0/8) Epoch 10, batch 1000, train_loss[loss=2.892, ArTop10Accuracy=0.7333, over 13293.00 frames. ], tot_loss[loss=2.933, ArTop10Accuracy=0.7253, over 11946.61 frames. ], batch size: 28, lr: 1.16e-02 +2024-08-06 05:06:45,055 INFO [trainer.py:765] (0/8) Epoch 10, batch 1100, train_loss[loss=2.847, ArTop10Accuracy=0.7367, over 13715.00 frames. ], tot_loss[loss=2.938, ArTop10Accuracy=0.7245, over 12011.58 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 05:07:15,484 INFO [trainer.py:765] (0/8) Epoch 10, batch 1200, train_loss[loss=3.095, ArTop10Accuracy=0.6982, over 12601.00 frames. ], tot_loss[loss=2.939, ArTop10Accuracy=0.7243, over 11964.58 frames. ], batch size: 99, lr: 1.16e-02 +2024-08-06 05:07:40,412 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 05:07:40,415 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-10.pt +2024-08-06 05:08:52,966 INFO [trainer.py:765] (0/8) Epoch 11, batch 100, train_loss[loss=2.961, ArTop10Accuracy=0.7253, over 14781.00 frames. ], tot_loss[loss=2.907, ArTop10Accuracy=0.7314, over 4781.48 frames. ], batch size: 61, lr: 1.10e-02 +2024-08-06 05:09:41,278 INFO [trainer.py:765] (0/8) Epoch 11, batch 200, train_loss[loss=2.871, ArTop10Accuracy=0.7337, over 13697.00 frames. ], tot_loss[loss=2.902, ArTop10Accuracy=0.7323, over 7786.93 frames. ], batch size: 34, lr: 1.10e-02 +2024-08-06 05:09:48,259 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-13000.pt +2024-08-06 05:09:51,175 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.278e+02 1.371e+02 1.502e+02 3.785e+02, threshold=2.743e+02, percent-clipped=0.3 +2024-08-06 05:10:24,721 INFO [trainer.py:765] (0/8) Epoch 11, batch 300, train_loss[loss=3.031, ArTop10Accuracy=0.7094, over 14213.00 frames. ], tot_loss[loss=2.898, ArTop10Accuracy=0.7329, over 9403.86 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 05:11:11,784 INFO [trainer.py:765] (0/8) Epoch 11, batch 400, train_loss[loss=2.805, ArTop10Accuracy=0.7457, over 11071.00 frames. ], tot_loss[loss=2.903, ArTop10Accuracy=0.7318, over 10327.46 frames. ], batch size: 15, lr: 1.09e-02 +2024-08-06 05:11:52,692 INFO [trainer.py:765] (0/8) Epoch 11, batch 500, train_loss[loss=2.807, ArTop10Accuracy=0.7449, over 12207.00 frames. ], tot_loss[loss=2.899, ArTop10Accuracy=0.7324, over 10884.53 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 05:12:40,288 INFO [trainer.py:765] (0/8) Epoch 11, batch 600, train_loss[loss=2.794, ArTop10Accuracy=0.7465, over 11657.00 frames. ], tot_loss[loss=2.902, ArTop10Accuracy=0.7317, over 11423.62 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 05:13:25,709 INFO [trainer.py:765] (0/8) Epoch 11, batch 700, train_loss[loss=2.926, ArTop10Accuracy=0.7354, over 9995.00 frames. ], tot_loss[loss=2.914, ArTop10Accuracy=0.7292, over 11567.13 frames. ], batch size: 12, lr: 1.08e-02 +2024-08-06 05:14:04,207 INFO [trainer.py:765] (0/8) Epoch 11, batch 800, train_loss[loss=2.831, ArTop10Accuracy=0.7404, over 10248.00 frames. ], tot_loss[loss=2.919, ArTop10Accuracy=0.7279, over 11678.48 frames. ], batch size: 12, lr: 1.07e-02 +2024-08-06 05:14:35,668 INFO [trainer.py:765] (0/8) Epoch 11, batch 900, train_loss[loss=2.947, ArTop10Accuracy=0.7274, over 13060.00 frames. ], tot_loss[loss=2.913, ArTop10Accuracy=0.7292, over 11736.16 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 05:15:07,264 INFO [trainer.py:765] (0/8) Epoch 11, batch 1000, train_loss[loss=2.848, ArTop10Accuracy=0.7434, over 12927.00 frames. ], tot_loss[loss=2.919, ArTop10Accuracy=0.7282, over 11933.03 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 05:15:38,260 INFO [trainer.py:765] (0/8) Epoch 11, batch 1100, train_loss[loss=2.958, ArTop10Accuracy=0.7163, over 13775.00 frames. ], tot_loss[loss=2.924, ArTop10Accuracy=0.7271, over 11979.36 frames. ], batch size: 34, lr: 1.06e-02 +2024-08-06 05:16:08,499 INFO [trainer.py:765] (0/8) Epoch 11, batch 1200, train_loss[loss=3.093, ArTop10Accuracy=0.6956, over 11746.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.7269, over 11943.88 frames. ], batch size: 98, lr: 1.06e-02 +2024-08-06 05:16:12,697 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-14000.pt +2024-08-06 05:16:15,740 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 05:16:21,622 INFO [trainer.py:811] (0/8) Epoch 11, validation: loss=2.923, ArTop10Accuracy=0.7318, over 1829298.00 frames. +2024-08-06 05:16:21,623 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30166MB +2024-08-06 05:16:21,949 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.268e+02 1.368e+02 1.481e+02 4.790e+02, threshold=2.736e+02, percent-clipped=0.6 +2024-08-06 05:16:42,750 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 05:16:42,754 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-11.pt +2024-08-06 05:18:03,005 INFO [trainer.py:765] (0/8) Epoch 12, batch 100, train_loss[loss=2.927, ArTop10Accuracy=0.7272, over 14269.00 frames. ], tot_loss[loss=2.889, ArTop10Accuracy=0.7349, over 4792.73 frames. ], batch size: 61, lr: 1.01e-02 +2024-08-06 05:18:46,004 INFO [trainer.py:765] (0/8) Epoch 12, batch 200, train_loss[loss=2.944, ArTop10Accuracy=0.7207, over 13567.00 frames. ], tot_loss[loss=2.889, ArTop10Accuracy=0.7351, over 7793.59 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 05:19:31,946 INFO [trainer.py:765] (0/8) Epoch 12, batch 300, train_loss[loss=2.915, ArTop10Accuracy=0.7271, over 14300.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.7358, over 9416.87 frames. ], batch size: 44, lr: 1.01e-02 +2024-08-06 05:20:12,431 INFO [trainer.py:765] (0/8) Epoch 12, batch 400, train_loss[loss=2.867, ArTop10Accuracy=0.7323, over 10417.00 frames. ], tot_loss[loss=2.888, ArTop10Accuracy=0.7349, over 10344.80 frames. ], batch size: 14, lr: 1.00e-02 +2024-08-06 05:21:00,640 INFO [trainer.py:765] (0/8) Epoch 12, batch 500, train_loss[loss=2.862, ArTop10Accuracy=0.7405, over 12390.00 frames. ], tot_loss[loss=2.889, ArTop10Accuracy=0.7344, over 10925.04 frames. ], batch size: 22, lr: 9.99e-03 +2024-08-06 05:21:43,915 INFO [trainer.py:765] (0/8) Epoch 12, batch 600, train_loss[loss=2.917, ArTop10Accuracy=0.7277, over 12081.00 frames. ], tot_loss[loss=2.89, ArTop10Accuracy=0.7339, over 11450.54 frames. ], batch size: 19, lr: 9.96e-03 +2024-08-06 05:22:32,206 INFO [trainer.py:765] (0/8) Epoch 12, batch 700, train_loss[loss=2.869, ArTop10Accuracy=0.7396, over 9986.00 frames. ], tot_loss[loss=2.901, ArTop10Accuracy=0.7318, over 11571.92 frames. ], batch size: 12, lr: 9.93e-03 +2024-08-06 05:23:08,911 INFO [trainer.py:765] (0/8) Epoch 12, batch 800, train_loss[loss=2.894, ArTop10Accuracy=0.7277, over 10152.00 frames. ], tot_loss[loss=2.902, ArTop10Accuracy=0.7313, over 11684.18 frames. ], batch size: 12, lr: 9.90e-03 +2024-08-06 05:23:40,460 INFO [trainer.py:765] (0/8) Epoch 12, batch 900, train_loss[loss=2.897, ArTop10Accuracy=0.731, over 13401.00 frames. ], tot_loss[loss=2.89, ArTop10Accuracy=0.7337, over 11754.03 frames. ], batch size: 28, lr: 9.87e-03 +2024-08-06 05:23:51,883 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-15000.pt +2024-08-06 05:23:54,576 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.273e+02 1.376e+02 1.503e+02 4.050e+02, threshold=2.752e+02, percent-clipped=0.4 +2024-08-06 05:24:14,345 INFO [trainer.py:765] (0/8) Epoch 12, batch 1000, train_loss[loss=2.754, ArTop10Accuracy=0.7565, over 12959.00 frames. ], tot_loss[loss=2.893, ArTop10Accuracy=0.7331, over 11947.69 frames. ], batch size: 27, lr: 9.84e-03 +2024-08-06 05:24:45,501 INFO [trainer.py:765] (0/8) Epoch 12, batch 1100, train_loss[loss=2.945, ArTop10Accuracy=0.7268, over 13596.00 frames. ], tot_loss[loss=2.905, ArTop10Accuracy=0.731, over 11995.15 frames. ], batch size: 34, lr: 9.81e-03 +2024-08-06 05:25:15,881 INFO [trainer.py:765] (0/8) Epoch 12, batch 1200, train_loss[loss=3.04, ArTop10Accuracy=0.7013, over 12046.00 frames. ], tot_loss[loss=2.908, ArTop10Accuracy=0.7301, over 11944.18 frames. ], batch size: 98, lr: 9.78e-03 +2024-08-06 05:25:40,807 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 05:25:40,811 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-12.pt +2024-08-06 05:26:46,787 INFO [trainer.py:765] (0/8) Epoch 13, batch 100, train_loss[loss=2.926, ArTop10Accuracy=0.7308, over 14510.00 frames. ], tot_loss[loss=2.874, ArTop10Accuracy=0.7383, over 4779.02 frames. ], batch size: 61, lr: 9.36e-03 +2024-08-06 05:27:32,553 INFO [trainer.py:765] (0/8) Epoch 13, batch 200, train_loss[loss=2.903, ArTop10Accuracy=0.7277, over 13742.00 frames. ], tot_loss[loss=2.869, ArTop10Accuracy=0.7386, over 7793.82 frames. ], batch size: 34, lr: 9.34e-03 +2024-08-06 05:28:16,036 INFO [trainer.py:765] (0/8) Epoch 13, batch 300, train_loss[loss=2.966, ArTop10Accuracy=0.714, over 14193.00 frames. ], tot_loss[loss=2.87, ArTop10Accuracy=0.7383, over 9442.19 frames. ], batch size: 44, lr: 9.31e-03 +2024-08-06 05:29:00,149 INFO [trainer.py:765] (0/8) Epoch 13, batch 400, train_loss[loss=2.841, ArTop10Accuracy=0.7461, over 10234.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7381, over 10371.04 frames. ], batch size: 14, lr: 9.28e-03 +2024-08-06 05:29:43,967 INFO [trainer.py:765] (0/8) Epoch 13, batch 500, train_loss[loss=2.821, ArTop10Accuracy=0.7487, over 12354.00 frames. ], tot_loss[loss=2.867, ArTop10Accuracy=0.7386, over 10918.12 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 05:30:24,247 INFO [trainer.py:765] (0/8) Epoch 13, batch 600, train_loss[loss=2.837, ArTop10Accuracy=0.746, over 11500.00 frames. ], tot_loss[loss=2.874, ArTop10Accuracy=0.7371, over 11428.61 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 05:30:58,110 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-16000.pt +2024-08-06 05:31:01,160 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 05:31:07,054 INFO [trainer.py:811] (0/8) Epoch 13, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 05:31:07,054 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30166MB +2024-08-06 05:31:07,351 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.049e+02 1.283e+02 1.389e+02 1.496e+02 2.729e+02, threshold=2.779e+02, percent-clipped=0.0 +2024-08-06 05:31:24,043 INFO [trainer.py:765] (0/8) Epoch 13, batch 700, train_loss[loss=2.64, ArTop10Accuracy=0.7729, over 10296.00 frames. ], tot_loss[loss=2.878, ArTop10Accuracy=0.7363, over 11548.15 frames. ], batch size: 12, lr: 9.20e-03 +2024-08-06 05:32:00,147 INFO [trainer.py:765] (0/8) Epoch 13, batch 800, train_loss[loss=2.782, ArTop10Accuracy=0.753, over 10040.00 frames. ], tot_loss[loss=2.885, ArTop10Accuracy=0.735, over 11660.34 frames. ], batch size: 12, lr: 9.18e-03 +2024-08-06 05:32:31,521 INFO [trainer.py:765] (0/8) Epoch 13, batch 900, train_loss[loss=2.874, ArTop10Accuracy=0.736, over 12905.00 frames. ], tot_loss[loss=2.879, ArTop10Accuracy=0.7363, over 11710.03 frames. ], batch size: 27, lr: 9.15e-03 +2024-08-06 05:33:03,043 INFO [trainer.py:765] (0/8) Epoch 13, batch 1000, train_loss[loss=2.811, ArTop10Accuracy=0.7524, over 13056.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.7346, over 11935.03 frames. ], batch size: 27, lr: 9.13e-03 +2024-08-06 05:33:34,232 INFO [trainer.py:765] (0/8) Epoch 13, batch 1100, train_loss[loss=2.976, ArTop10Accuracy=0.7186, over 13991.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7331, over 11998.86 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 05:34:04,519 INFO [trainer.py:765] (0/8) Epoch 13, batch 1200, train_loss[loss=3.129, ArTop10Accuracy=0.6866, over 12041.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7331, over 11937.14 frames. ], batch size: 97, lr: 9.07e-03 +2024-08-06 05:34:29,796 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 05:34:29,802 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-13.pt +2024-08-06 05:35:39,198 INFO [trainer.py:765] (0/8) Epoch 14, batch 100, train_loss[loss=2.972, ArTop10Accuracy=0.7162, over 14273.00 frames. ], tot_loss[loss=2.868, ArTop10Accuracy=0.7393, over 4770.11 frames. ], batch size: 61, lr: 8.71e-03 +2024-08-06 05:36:23,063 INFO [trainer.py:765] (0/8) Epoch 14, batch 200, train_loss[loss=2.915, ArTop10Accuracy=0.728, over 13756.00 frames. ], tot_loss[loss=2.859, ArTop10Accuracy=0.7411, over 7792.21 frames. ], batch size: 34, lr: 8.68e-03 +2024-08-06 05:37:09,309 INFO [trainer.py:765] (0/8) Epoch 14, batch 300, train_loss[loss=2.876, ArTop10Accuracy=0.7367, over 14623.00 frames. ], tot_loss[loss=2.855, ArTop10Accuracy=0.742, over 9429.87 frames. ], batch size: 44, lr: 8.66e-03 +2024-08-06 05:37:43,059 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-17000.pt +2024-08-06 05:37:46,029 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.304e+02 1.410e+02 1.531e+02 2.912e+02, threshold=2.820e+02, percent-clipped=0.2 +2024-08-06 05:37:55,138 INFO [trainer.py:765] (0/8) Epoch 14, batch 400, train_loss[loss=2.872, ArTop10Accuracy=0.7444, over 10346.00 frames. ], tot_loss[loss=2.86, ArTop10Accuracy=0.7407, over 10345.81 frames. ], batch size: 14, lr: 8.64e-03 +2024-08-06 05:38:42,025 INFO [trainer.py:765] (0/8) Epoch 14, batch 500, train_loss[loss=2.799, ArTop10Accuracy=0.7546, over 12241.00 frames. ], tot_loss[loss=2.859, ArTop10Accuracy=0.7405, over 10894.02 frames. ], batch size: 22, lr: 8.61e-03 +2024-08-06 05:39:22,374 INFO [trainer.py:765] (0/8) Epoch 14, batch 600, train_loss[loss=2.745, ArTop10Accuracy=0.7663, over 11480.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.7394, over 11401.19 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 05:40:15,143 INFO [trainer.py:765] (0/8) Epoch 14, batch 700, train_loss[loss=2.941, ArTop10Accuracy=0.7243, over 10235.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7379, over 11560.07 frames. ], batch size: 12, lr: 8.57e-03 +2024-08-06 05:40:49,135 INFO [trainer.py:765] (0/8) Epoch 14, batch 800, train_loss[loss=2.794, ArTop10Accuracy=0.7562, over 10051.00 frames. ], tot_loss[loss=2.875, ArTop10Accuracy=0.7371, over 11692.18 frames. ], batch size: 12, lr: 8.55e-03 +2024-08-06 05:41:20,466 INFO [trainer.py:765] (0/8) Epoch 14, batch 900, train_loss[loss=3.02, ArTop10Accuracy=0.7158, over 12967.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7373, over 11731.37 frames. ], batch size: 27, lr: 8.52e-03 +2024-08-06 05:41:51,995 INFO [trainer.py:765] (0/8) Epoch 14, batch 1000, train_loss[loss=2.853, ArTop10Accuracy=0.7427, over 12793.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7366, over 11927.26 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 05:42:23,216 INFO [trainer.py:765] (0/8) Epoch 14, batch 1100, train_loss[loss=2.877, ArTop10Accuracy=0.7408, over 13594.00 frames. ], tot_loss[loss=2.883, ArTop10Accuracy=0.7357, over 11998.57 frames. ], batch size: 34, lr: 8.48e-03 +2024-08-06 05:42:53,548 INFO [trainer.py:765] (0/8) Epoch 14, batch 1200, train_loss[loss=3.008, ArTop10Accuracy=0.7143, over 12112.00 frames. ], tot_loss[loss=2.881, ArTop10Accuracy=0.736, over 11938.39 frames. ], batch size: 98, lr: 8.46e-03 +2024-08-06 05:43:18,778 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 05:43:18,781 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-14.pt +2024-08-06 05:44:28,572 INFO [trainer.py:765] (0/8) Epoch 15, batch 100, train_loss[loss=2.976, ArTop10Accuracy=0.7179, over 14284.00 frames. ], tot_loss[loss=2.852, ArTop10Accuracy=0.7428, over 4799.60 frames. ], batch size: 61, lr: 8.14e-03 +2024-08-06 05:44:29,213 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-18000.pt +2024-08-06 05:44:32,247 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 05:44:38,023 INFO [trainer.py:811] (0/8) Epoch 15, validation: loss=2.913, ArTop10Accuracy=0.7339, over 1829298.00 frames. +2024-08-06 05:44:38,024 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30166MB +2024-08-06 05:44:38,413 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.307e+02 1.417e+02 1.528e+02 2.981e+02, threshold=2.833e+02, percent-clipped=0.1 +2024-08-06 05:45:20,184 INFO [trainer.py:765] (0/8) Epoch 15, batch 200, train_loss[loss=2.777, ArTop10Accuracy=0.7586, over 13661.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7436, over 7799.16 frames. ], batch size: 34, lr: 8.11e-03 +2024-08-06 05:46:04,647 INFO [trainer.py:765] (0/8) Epoch 15, batch 300, train_loss[loss=2.952, ArTop10Accuracy=0.7216, over 14181.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7438, over 9411.69 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 05:46:51,902 INFO [trainer.py:765] (0/8) Epoch 15, batch 400, train_loss[loss=2.744, ArTop10Accuracy=0.7581, over 11069.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7435, over 10330.54 frames. ], batch size: 15, lr: 8.07e-03 +2024-08-06 05:47:36,911 INFO [trainer.py:765] (0/8) Epoch 15, batch 500, train_loss[loss=2.844, ArTop10Accuracy=0.7437, over 12380.00 frames. ], tot_loss[loss=2.839, ArTop10Accuracy=0.7444, over 10912.75 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 05:48:24,723 INFO [trainer.py:765] (0/8) Epoch 15, batch 600, train_loss[loss=2.821, ArTop10Accuracy=0.7484, over 11770.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7425, over 11446.31 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 05:49:11,855 INFO [trainer.py:765] (0/8) Epoch 15, batch 700, train_loss[loss=2.719, ArTop10Accuracy=0.7657, over 9994.00 frames. ], tot_loss[loss=2.851, ArTop10Accuracy=0.7416, over 11574.34 frames. ], batch size: 12, lr: 8.01e-03 +2024-08-06 05:49:45,778 INFO [trainer.py:765] (0/8) Epoch 15, batch 800, train_loss[loss=2.705, ArTop10Accuracy=0.7624, over 10154.00 frames. ], tot_loss[loss=2.861, ArTop10Accuracy=0.7397, over 11692.18 frames. ], batch size: 12, lr: 7.99e-03 +2024-08-06 05:50:17,210 INFO [trainer.py:765] (0/8) Epoch 15, batch 900, train_loss[loss=2.784, ArTop10Accuracy=0.7587, over 12947.00 frames. ], tot_loss[loss=2.85, ArTop10Accuracy=0.7417, over 11739.90 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 05:50:48,829 INFO [trainer.py:765] (0/8) Epoch 15, batch 1000, train_loss[loss=2.884, ArTop10Accuracy=0.7331, over 13346.00 frames. ], tot_loss[loss=2.858, ArTop10Accuracy=0.7404, over 11938.83 frames. ], batch size: 28, lr: 7.95e-03 +2024-08-06 05:51:20,069 INFO [trainer.py:765] (0/8) Epoch 15, batch 1100, train_loss[loss=2.742, ArTop10Accuracy=0.7603, over 13562.00 frames. ], tot_loss[loss=2.867, ArTop10Accuracy=0.7386, over 11992.08 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 05:51:20,660 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-19000.pt +2024-08-06 05:51:23,514 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.337e+02 1.431e+02 1.541e+02 2.784e+02, threshold=2.862e+02, percent-clipped=0.0 +2024-08-06 05:51:53,082 INFO [trainer.py:765] (0/8) Epoch 15, batch 1200, train_loss[loss=2.987, ArTop10Accuracy=0.7103, over 12660.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7375, over 11952.32 frames. ], batch size: 97, lr: 7.91e-03 +2024-08-06 05:52:18,231 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 05:52:18,235 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-15.pt +2024-08-06 05:53:29,263 INFO [trainer.py:765] (0/8) Epoch 16, batch 100, train_loss[loss=2.786, ArTop10Accuracy=0.7581, over 14670.00 frames. ], tot_loss[loss=2.833, ArTop10Accuracy=0.7457, over 4787.17 frames. ], batch size: 62, lr: 7.63e-03 +2024-08-06 05:54:12,878 INFO [trainer.py:765] (0/8) Epoch 16, batch 200, train_loss[loss=2.788, ArTop10Accuracy=0.7446, over 13685.00 frames. ], tot_loss[loss=2.833, ArTop10Accuracy=0.746, over 7782.81 frames. ], batch size: 34, lr: 7.61e-03 +2024-08-06 05:54:59,737 INFO [trainer.py:765] (0/8) Epoch 16, batch 300, train_loss[loss=2.858, ArTop10Accuracy=0.7479, over 14147.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7455, over 9396.12 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 05:55:41,931 INFO [trainer.py:765] (0/8) Epoch 16, batch 400, train_loss[loss=2.62, ArTop10Accuracy=0.7767, over 11460.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7468, over 10327.04 frames. ], batch size: 16, lr: 7.58e-03 +2024-08-06 05:56:27,680 INFO [trainer.py:765] (0/8) Epoch 16, batch 500, train_loss[loss=2.753, ArTop10Accuracy=0.7555, over 12118.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.7463, over 10902.66 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 05:57:12,440 INFO [trainer.py:765] (0/8) Epoch 16, batch 600, train_loss[loss=2.775, ArTop10Accuracy=0.7635, over 11534.00 frames. ], tot_loss[loss=2.834, ArTop10Accuracy=0.7452, over 11429.74 frames. ], batch size: 18, lr: 7.54e-03 +2024-08-06 05:58:00,040 INFO [trainer.py:765] (0/8) Epoch 16, batch 700, train_loss[loss=2.576, ArTop10Accuracy=0.784, over 9930.00 frames. ], tot_loss[loss=2.838, ArTop10Accuracy=0.7444, over 11584.59 frames. ], batch size: 12, lr: 7.52e-03 +2024-08-06 05:58:34,024 INFO [trainer.py:765] (0/8) Epoch 16, batch 800, train_loss[loss=2.774, ArTop10Accuracy=0.7459, over 10149.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7424, over 11688.61 frames. ], batch size: 12, lr: 7.50e-03 +2024-08-06 05:58:41,568 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-20000.pt +2024-08-06 05:58:44,584 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 05:58:50,426 INFO [trainer.py:811] (0/8) Epoch 16, validation: loss=2.915, ArTop10Accuracy=0.7338, over 1829298.00 frames. +2024-08-06 05:58:50,426 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30166MB +2024-08-06 05:58:50,730 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.335e+02 1.445e+02 1.570e+02 3.252e+02, threshold=2.890e+02, percent-clipped=0.1 +2024-08-06 05:59:14,321 INFO [trainer.py:765] (0/8) Epoch 16, batch 900, train_loss[loss=2.881, ArTop10Accuracy=0.7322, over 12900.00 frames. ], tot_loss[loss=2.842, ArTop10Accuracy=0.7434, over 11731.10 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 05:59:45,915 INFO [trainer.py:765] (0/8) Epoch 16, batch 1000, train_loss[loss=2.944, ArTop10Accuracy=0.7268, over 12922.00 frames. ], tot_loss[loss=2.852, ArTop10Accuracy=0.7421, over 11938.72 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 06:00:17,091 INFO [trainer.py:765] (0/8) Epoch 16, batch 1100, train_loss[loss=2.844, ArTop10Accuracy=0.7466, over 13655.00 frames. ], tot_loss[loss=2.856, ArTop10Accuracy=0.7411, over 11990.87 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 06:00:47,464 INFO [trainer.py:765] (0/8) Epoch 16, batch 1200, train_loss[loss=3.016, ArTop10Accuracy=0.7036, over 12054.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.7408, over 11926.29 frames. ], batch size: 98, lr: 7.43e-03 +2024-08-06 06:01:13,238 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 06:01:13,241 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-16.pt +2024-08-06 06:02:27,260 INFO [trainer.py:765] (0/8) Epoch 17, batch 100, train_loss[loss=2.87, ArTop10Accuracy=0.7406, over 14582.00 frames. ], tot_loss[loss=2.826, ArTop10Accuracy=0.7477, over 4775.99 frames. ], batch size: 61, lr: 7.18e-03 +2024-08-06 06:03:11,850 INFO [trainer.py:765] (0/8) Epoch 17, batch 200, train_loss[loss=2.891, ArTop10Accuracy=0.7364, over 13562.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.7477, over 7777.46 frames. ], batch size: 34, lr: 7.17e-03 +2024-08-06 06:03:57,502 INFO [trainer.py:765] (0/8) Epoch 17, batch 300, train_loss[loss=2.914, ArTop10Accuracy=0.7359, over 14401.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7486, over 9401.38 frames. ], batch size: 44, lr: 7.15e-03 +2024-08-06 06:04:42,838 INFO [trainer.py:765] (0/8) Epoch 17, batch 400, train_loss[loss=2.705, ArTop10Accuracy=0.7711, over 10744.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7486, over 10325.29 frames. ], batch size: 15, lr: 7.13e-03 +2024-08-06 06:05:29,004 INFO [trainer.py:765] (0/8) Epoch 17, batch 500, train_loss[loss=2.898, ArTop10Accuracy=0.7378, over 12485.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7484, over 10884.29 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 06:05:45,317 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-21000.pt +2024-08-06 06:05:49,550 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.359e+02 1.445e+02 1.551e+02 2.741e+02, threshold=2.891e+02, percent-clipped=0.0 +2024-08-06 06:06:20,723 INFO [trainer.py:765] (0/8) Epoch 17, batch 600, train_loss[loss=2.821, ArTop10Accuracy=0.7406, over 11623.00 frames. ], tot_loss[loss=2.826, ArTop10Accuracy=0.7471, over 11411.79 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 06:07:04,694 INFO [trainer.py:765] (0/8) Epoch 17, batch 700, train_loss[loss=2.79, ArTop10Accuracy=0.7534, over 10259.00 frames. ], tot_loss[loss=2.832, ArTop10Accuracy=0.7458, over 11559.16 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 06:07:44,896 INFO [trainer.py:765] (0/8) Epoch 17, batch 800, train_loss[loss=2.866, ArTop10Accuracy=0.7368, over 9507.00 frames. ], tot_loss[loss=2.838, ArTop10Accuracy=0.7445, over 11664.29 frames. ], batch size: 11, lr: 7.07e-03 +2024-08-06 06:08:16,384 INFO [trainer.py:765] (0/8) Epoch 17, batch 900, train_loss[loss=2.853, ArTop10Accuracy=0.7414, over 13042.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7463, over 11738.31 frames. ], batch size: 27, lr: 7.05e-03 +2024-08-06 06:08:47,994 INFO [trainer.py:765] (0/8) Epoch 17, batch 1000, train_loss[loss=2.792, ArTop10Accuracy=0.7535, over 12989.00 frames. ], tot_loss[loss=2.835, ArTop10Accuracy=0.7452, over 11937.82 frames. ], batch size: 27, lr: 7.04e-03 +2024-08-06 06:09:19,134 INFO [trainer.py:765] (0/8) Epoch 17, batch 1100, train_loss[loss=2.841, ArTop10Accuracy=0.7416, over 13771.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7425, over 11988.67 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 06:09:49,444 INFO [trainer.py:765] (0/8) Epoch 17, batch 1200, train_loss[loss=2.999, ArTop10Accuracy=0.7195, over 12179.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7425, over 11931.79 frames. ], batch size: 99, lr: 7.01e-03 +2024-08-06 06:10:14,221 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 06:10:14,224 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-17.pt +2024-08-06 06:11:23,102 INFO [trainer.py:765] (0/8) Epoch 18, batch 100, train_loss[loss=2.853, ArTop10Accuracy=0.7415, over 14643.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.7502, over 4796.36 frames. ], batch size: 61, lr: 6.78e-03 +2024-08-06 06:12:16,260 INFO [trainer.py:765] (0/8) Epoch 18, batch 200, train_loss[loss=2.882, ArTop10Accuracy=0.7403, over 13651.00 frames. ], tot_loss[loss=2.813, ArTop10Accuracy=0.7505, over 7788.10 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 06:12:40,318 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-22000.pt +2024-08-06 06:12:43,327 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 06:12:48,991 INFO [trainer.py:811] (0/8) Epoch 18, validation: loss=2.916, ArTop10Accuracy=0.7343, over 1829298.00 frames. +2024-08-06 06:12:48,992 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30166MB +2024-08-06 06:12:49,335 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.377e+02 1.476e+02 1.588e+02 2.450e+02, threshold=2.952e+02, percent-clipped=0.0 +2024-08-06 06:13:07,116 INFO [trainer.py:765] (0/8) Epoch 18, batch 300, train_loss[loss=2.888, ArTop10Accuracy=0.7327, over 14308.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7513, over 9413.99 frames. ], batch size: 44, lr: 6.75e-03 +2024-08-06 06:13:54,098 INFO [trainer.py:765] (0/8) Epoch 18, batch 400, train_loss[loss=2.751, ArTop10Accuracy=0.7596, over 10282.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.75, over 10327.09 frames. ], batch size: 14, lr: 6.74e-03 +2024-08-06 06:14:38,488 INFO [trainer.py:765] (0/8) Epoch 18, batch 500, train_loss[loss=2.848, ArTop10Accuracy=0.744, over 12247.00 frames. ], tot_loss[loss=2.811, ArTop10Accuracy=0.7498, over 10876.52 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 06:15:23,628 INFO [trainer.py:765] (0/8) Epoch 18, batch 600, train_loss[loss=2.561, ArTop10Accuracy=0.7949, over 11489.00 frames. ], tot_loss[loss=2.818, ArTop10Accuracy=0.7481, over 11411.90 frames. ], batch size: 18, lr: 6.71e-03 +2024-08-06 06:16:17,342 INFO [trainer.py:765] (0/8) Epoch 18, batch 700, train_loss[loss=2.7, ArTop10Accuracy=0.7757, over 10169.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7477, over 11561.13 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 06:16:51,428 INFO [trainer.py:765] (0/8) Epoch 18, batch 800, train_loss[loss=2.759, ArTop10Accuracy=0.7525, over 10039.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7461, over 11679.50 frames. ], batch size: 12, lr: 6.68e-03 +2024-08-06 06:17:22,912 INFO [trainer.py:765] (0/8) Epoch 18, batch 900, train_loss[loss=2.808, ArTop10Accuracy=0.7512, over 12826.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7476, over 11727.26 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 06:17:54,529 INFO [trainer.py:765] (0/8) Epoch 18, batch 1000, train_loss[loss=2.921, ArTop10Accuracy=0.7333, over 12980.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7462, over 11940.48 frames. ], batch size: 27, lr: 6.65e-03 +2024-08-06 06:18:25,663 INFO [trainer.py:765] (0/8) Epoch 18, batch 1100, train_loss[loss=2.845, ArTop10Accuracy=0.7458, over 13691.00 frames. ], tot_loss[loss=2.839, ArTop10Accuracy=0.7444, over 11993.81 frames. ], batch size: 34, lr: 6.64e-03 +2024-08-06 06:18:55,971 INFO [trainer.py:765] (0/8) Epoch 18, batch 1200, train_loss[loss=2.972, ArTop10Accuracy=0.7211, over 12298.00 frames. ], tot_loss[loss=2.835, ArTop10Accuracy=0.7449, over 11951.69 frames. ], batch size: 97, lr: 6.63e-03 +2024-08-06 06:19:16,340 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-23000.pt +2024-08-06 06:19:19,163 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.387e+02 1.492e+02 1.607e+02 2.982e+02, threshold=2.983e+02, percent-clipped=0.1 +2024-08-06 06:19:23,631 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 06:19:23,635 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-18.pt +2024-08-06 06:20:29,728 INFO [trainer.py:765] (0/8) Epoch 19, batch 100, train_loss[loss=2.852, ArTop10Accuracy=0.7432, over 14291.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7494, over 4776.18 frames. ], batch size: 61, lr: 6.43e-03 +2024-08-06 06:21:11,275 INFO [trainer.py:765] (0/8) Epoch 19, batch 200, train_loss[loss=2.814, ArTop10Accuracy=0.7541, over 13811.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7521, over 7782.53 frames. ], batch size: 34, lr: 6.41e-03 +2024-08-06 06:21:56,079 INFO [trainer.py:765] (0/8) Epoch 19, batch 300, train_loss[loss=2.842, ArTop10Accuracy=0.7431, over 14254.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7518, over 9431.80 frames. ], batch size: 44, lr: 6.40e-03 +2024-08-06 06:22:36,013 INFO [trainer.py:765] (0/8) Epoch 19, batch 400, train_loss[loss=2.606, ArTop10Accuracy=0.7849, over 11002.00 frames. ], tot_loss[loss=2.801, ArTop10Accuracy=0.7524, over 10338.69 frames. ], batch size: 15, lr: 6.39e-03 +2024-08-06 06:23:18,998 INFO [trainer.py:765] (0/8) Epoch 19, batch 500, train_loss[loss=2.852, ArTop10Accuracy=0.7431, over 12222.00 frames. ], tot_loss[loss=2.798, ArTop10Accuracy=0.7529, over 10889.96 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 06:24:03,685 INFO [trainer.py:765] (0/8) Epoch 19, batch 600, train_loss[loss=2.907, ArTop10Accuracy=0.73, over 11542.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7514, over 11420.59 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 06:24:46,186 INFO [trainer.py:765] (0/8) Epoch 19, batch 700, train_loss[loss=2.787, ArTop10Accuracy=0.758, over 10189.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.7504, over 11562.39 frames. ], batch size: 12, lr: 6.35e-03 +2024-08-06 06:25:22,355 INFO [trainer.py:765] (0/8) Epoch 19, batch 800, train_loss[loss=2.902, ArTop10Accuracy=0.739, over 9174.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.7488, over 11675.84 frames. ], batch size: 11, lr: 6.33e-03 +2024-08-06 06:25:53,625 INFO [trainer.py:765] (0/8) Epoch 19, batch 900, train_loss[loss=2.827, ArTop10Accuracy=0.7457, over 12958.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.7489, over 11720.74 frames. ], batch size: 27, lr: 6.32e-03 +2024-08-06 06:26:21,773 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-24000.pt +2024-08-06 06:26:24,787 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 06:26:30,765 INFO [trainer.py:811] (0/8) Epoch 19, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 06:26:30,765 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 33183MB +2024-08-06 06:26:31,053 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.416e+02 1.525e+02 1.662e+02 2.849e+02, threshold=3.050e+02, percent-clipped=0.0 +2024-08-06 06:26:34,030 INFO [trainer.py:765] (0/8) Epoch 19, batch 1000, train_loss[loss=2.748, ArTop10Accuracy=0.7586, over 13149.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7478, over 11942.62 frames. ], batch size: 27, lr: 6.31e-03 +2024-08-06 06:27:05,189 INFO [trainer.py:765] (0/8) Epoch 19, batch 1100, train_loss[loss=2.835, ArTop10Accuracy=0.7438, over 13647.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7467, over 12013.00 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 06:27:35,453 INFO [trainer.py:765] (0/8) Epoch 19, batch 1200, train_loss[loss=3.08, ArTop10Accuracy=0.7029, over 11853.00 frames. ], tot_loss[loss=2.826, ArTop10Accuracy=0.7469, over 11931.73 frames. ], batch size: 99, lr: 6.28e-03 +2024-08-06 06:28:00,683 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 06:28:00,687 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-19.pt +2024-08-06 06:29:08,985 INFO [trainer.py:765] (0/8) Epoch 20, batch 100, train_loss[loss=2.85, ArTop10Accuracy=0.7455, over 14495.00 frames. ], tot_loss[loss=2.792, ArTop10Accuracy=0.7549, over 4779.37 frames. ], batch size: 61, lr: 6.10e-03 +2024-08-06 06:29:50,318 INFO [trainer.py:765] (0/8) Epoch 20, batch 200, train_loss[loss=2.777, ArTop10Accuracy=0.762, over 13551.00 frames. ], tot_loss[loss=2.788, ArTop10Accuracy=0.7547, over 7790.64 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 06:30:37,106 INFO [trainer.py:765] (0/8) Epoch 20, batch 300, train_loss[loss=2.88, ArTop10Accuracy=0.7345, over 14548.00 frames. ], tot_loss[loss=2.786, ArTop10Accuracy=0.755, over 9412.14 frames. ], batch size: 45, lr: 6.08e-03 +2024-08-06 06:31:16,353 INFO [trainer.py:765] (0/8) Epoch 20, batch 400, train_loss[loss=2.728, ArTop10Accuracy=0.7665, over 10879.00 frames. ], tot_loss[loss=2.785, ArTop10Accuracy=0.7552, over 10334.05 frames. ], batch size: 15, lr: 6.07e-03 +2024-08-06 06:32:03,759 INFO [trainer.py:765] (0/8) Epoch 20, batch 500, train_loss[loss=2.734, ArTop10Accuracy=0.7637, over 12180.00 frames. ], tot_loss[loss=2.784, ArTop10Accuracy=0.755, over 10892.48 frames. ], batch size: 22, lr: 6.05e-03 +2024-08-06 06:32:43,357 INFO [trainer.py:765] (0/8) Epoch 20, batch 600, train_loss[loss=2.778, ArTop10Accuracy=0.7586, over 11337.00 frames. ], tot_loss[loss=2.787, ArTop10Accuracy=0.754, over 11419.54 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 06:33:36,751 INFO [trainer.py:765] (0/8) Epoch 20, batch 700, train_loss[loss=2.755, ArTop10Accuracy=0.7634, over 10040.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7522, over 11566.56 frames. ], batch size: 12, lr: 6.03e-03 +2024-08-06 06:33:40,691 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-25000.pt +2024-08-06 06:33:43,829 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.417e+02 1.526e+02 1.639e+02 3.791e+02, threshold=3.052e+02, percent-clipped=0.1 +2024-08-06 06:34:13,304 INFO [trainer.py:765] (0/8) Epoch 20, batch 800, train_loss[loss=2.725, ArTop10Accuracy=0.7659, over 10180.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7513, over 11700.04 frames. ], batch size: 12, lr: 6.02e-03 +2024-08-06 06:34:44,580 INFO [trainer.py:765] (0/8) Epoch 20, batch 900, train_loss[loss=2.898, ArTop10Accuracy=0.739, over 13040.00 frames. ], tot_loss[loss=2.798, ArTop10Accuracy=0.7525, over 11747.01 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 06:35:16,138 INFO [trainer.py:765] (0/8) Epoch 20, batch 1000, train_loss[loss=2.837, ArTop10Accuracy=0.742, over 13147.00 frames. ], tot_loss[loss=2.801, ArTop10Accuracy=0.7517, over 11942.69 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 06:35:47,214 INFO [trainer.py:765] (0/8) Epoch 20, batch 1100, train_loss[loss=2.8, ArTop10Accuracy=0.7521, over 13935.00 frames. ], tot_loss[loss=2.811, ArTop10Accuracy=0.7498, over 12009.72 frames. ], batch size: 34, lr: 5.99e-03 +2024-08-06 06:36:17,438 INFO [trainer.py:765] (0/8) Epoch 20, batch 1200, train_loss[loss=2.939, ArTop10Accuracy=0.7217, over 12322.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.749, over 11955.80 frames. ], batch size: 97, lr: 5.97e-03 +2024-08-06 06:36:42,678 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 06:36:42,681 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-20.pt +2024-08-06 06:36:48,334 INFO [trainer.py:1069] (0/8) Done! diff --git a/libritts/log/log-train-2024-08-06-03-39-40-1 b/libritts/log/log-train-2024-08-06-03-39-40-1 new file mode 100644 index 0000000000000000000000000000000000000000..da98d00237bedcea4a5a6670130c1502670a8b0a --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-39-40-1 @@ -0,0 +1,336 @@ +2024-08-06 03:39:40,361 INFO [trainer.py:870] (1/8) Training started +2024-08-06 03:39:40,362 INFO [trainer.py:889] (1/8) Device: cuda:1 +2024-08-06 03:39:40,362 INFO [trainer.py:890] (1/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:39:40,362 INFO [trainer.py:892] (1/8) About to create model +2024-08-06 03:39:41,123 INFO [trainer.py:899] (1/8) Number of model parameters: 367386628 +2024-08-06 03:39:41,939 INFO [trainer.py:914] (1/8) Using DDP +2024-08-06 03:39:44,000 INFO [datamodule.py:427] (1/8) About to get train cuts +2024-08-06 03:39:44,002 INFO [datamodule.py:434] (1/8) About to get dev cuts +2024-08-06 03:39:44,003 INFO [datamodule.py:292] (1/8) Disable SpecAugment +2024-08-06 03:39:44,003 INFO [datamodule.py:294] (1/8) About to create train dataset +2024-08-06 03:39:44,003 INFO [datamodule.py:323] (1/8) Using DynamicBucketingSampler +2024-08-06 03:39:44,618 INFO [datamodule.py:344] (1/8) About to create train dataloader +2024-08-06 03:39:44,618 INFO [datamodule.py:367] (1/8) About to create dev dataset +2024-08-06 03:39:44,948 INFO [datamodule.py:388] (1/8) About to create dev dataloader +2024-08-06 03:40:39,569 INFO [trainer.py:765] (1/8) Epoch 1, batch 100, train_loss[loss=4.211, ArTop10Accuracy=0.4941, over 14685.00 frames. ], tot_loss[loss=4.797, ArTop10Accuracy=0.3935, over 4777.19 frames. ], batch size: 61, lr: 2.25e-02 +2024-08-06 03:41:16,921 INFO [trainer.py:765] (1/8) Epoch 1, batch 200, train_loss[loss=3.868, ArTop10Accuracy=0.5479, over 13852.00 frames. ], tot_loss[loss=4.306, ArTop10Accuracy=0.4751, over 7786.72 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 03:41:57,950 INFO [trainer.py:765] (1/8) Epoch 1, batch 300, train_loss[loss=3.971, ArTop10Accuracy=0.5225, over 14357.00 frames. ], tot_loss[loss=4.088, ArTop10Accuracy=0.5102, over 9419.77 frames. ], batch size: 44, lr: 3.00e-02 +2024-08-06 03:42:33,079 INFO [trainer.py:765] (1/8) Epoch 1, batch 400, train_loss[loss=3.741, ArTop10Accuracy=0.568, over 10105.00 frames. ], tot_loss[loss=3.933, ArTop10Accuracy=0.5359, over 10340.30 frames. ], batch size: 14, lr: 3.00e-02 +2024-08-06 03:43:11,270 INFO [trainer.py:765] (1/8) Epoch 1, batch 500, train_loss[loss=3.502, ArTop10Accuracy=0.5975, over 12171.00 frames. ], tot_loss[loss=3.821, ArTop10Accuracy=0.5547, over 10900.33 frames. ], batch size: 22, lr: 2.99e-02 +2024-08-06 03:43:46,592 INFO [trainer.py:765] (1/8) Epoch 1, batch 600, train_loss[loss=3.568, ArTop10Accuracy=0.5968, over 11385.00 frames. ], tot_loss[loss=3.742, ArTop10Accuracy=0.5685, over 11432.22 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 03:44:27,898 INFO [trainer.py:765] (1/8) Epoch 1, batch 700, train_loss[loss=3.5, ArTop10Accuracy=0.6223, over 10264.00 frames. ], tot_loss[loss=3.682, ArTop10Accuracy=0.5792, over 11565.69 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 03:45:01,514 INFO [trainer.py:765] (1/8) Epoch 1, batch 800, train_loss[loss=3.455, ArTop10Accuracy=0.6257, over 10082.00 frames. ], tot_loss[loss=3.635, ArTop10Accuracy=0.5877, over 11680.27 frames. ], batch size: 12, lr: 2.98e-02 +2024-08-06 03:45:32,557 INFO [trainer.py:765] (1/8) Epoch 1, batch 900, train_loss[loss=3.517, ArTop10Accuracy=0.6102, over 12948.00 frames. ], tot_loss[loss=3.581, ArTop10Accuracy=0.5978, over 11721.27 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 03:46:03,649 INFO [trainer.py:765] (1/8) Epoch 1, batch 1000, train_loss[loss=3.477, ArTop10Accuracy=0.619, over 12944.00 frames. ], tot_loss[loss=3.553, ArTop10Accuracy=0.603, over 11943.35 frames. ], batch size: 27, lr: 2.97e-02 +2024-08-06 03:46:07,989 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 8.169e+01 1.565e+02 2.239e+02 3.485e+02 9.105e+03, threshold=4.478e+02, percent-clipped=0.0 +2024-08-06 03:46:38,611 INFO [trainer.py:765] (1/8) Epoch 1, batch 1100, train_loss[loss=3.417, ArTop10Accuracy=0.6279, over 13577.00 frames. ], tot_loss[loss=3.53, ArTop10Accuracy=0.607, over 11979.20 frames. ], batch size: 34, lr: 2.96e-02 +2024-08-06 03:47:08,745 INFO [trainer.py:765] (1/8) Epoch 1, batch 1200, train_loss[loss=3.569, ArTop10Accuracy=0.6013, over 11880.00 frames. ], tot_loss[loss=3.502, ArTop10Accuracy=0.6125, over 11919.95 frames. ], batch size: 97, lr: 2.96e-02 +2024-08-06 03:47:33,462 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 03:48:38,676 INFO [trainer.py:765] (1/8) Epoch 2, batch 100, train_loss[loss=3.514, ArTop10Accuracy=0.6095, over 14636.00 frames. ], tot_loss[loss=3.453, ArTop10Accuracy=0.6225, over 4803.63 frames. ], batch size: 61, lr: 2.90e-02 +2024-08-06 03:49:14,596 INFO [trainer.py:765] (1/8) Epoch 2, batch 200, train_loss[loss=3.284, ArTop10Accuracy=0.6556, over 13788.00 frames. ], tot_loss[loss=3.433, ArTop10Accuracy=0.6259, over 7786.74 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 03:49:56,519 INFO [trainer.py:765] (1/8) Epoch 2, batch 300, train_loss[loss=3.493, ArTop10Accuracy=0.6161, over 14222.00 frames. ], tot_loss[loss=3.42, ArTop10Accuracy=0.6286, over 9427.07 frames. ], batch size: 44, lr: 2.89e-02 +2024-08-06 03:50:31,999 INFO [trainer.py:765] (1/8) Epoch 2, batch 400, train_loss[loss=3.347, ArTop10Accuracy=0.6448, over 10418.00 frames. ], tot_loss[loss=3.409, ArTop10Accuracy=0.631, over 10350.14 frames. ], batch size: 14, lr: 2.88e-02 +2024-08-06 03:51:17,109 INFO [trainer.py:765] (1/8) Epoch 2, batch 500, train_loss[loss=3.394, ArTop10Accuracy=0.6314, over 12365.00 frames. ], tot_loss[loss=3.396, ArTop10Accuracy=0.6334, over 10903.72 frames. ], batch size: 22, lr: 2.87e-02 +2024-08-06 03:51:53,203 INFO [trainer.py:765] (1/8) Epoch 2, batch 600, train_loss[loss=3.394, ArTop10Accuracy=0.6319, over 11559.00 frames. ], tot_loss[loss=3.387, ArTop10Accuracy=0.6348, over 11422.82 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 03:52:38,993 INFO [trainer.py:765] (1/8) Epoch 2, batch 700, train_loss[loss=3.332, ArTop10Accuracy=0.6517, over 10076.00 frames. ], tot_loss[loss=3.388, ArTop10Accuracy=0.6347, over 11562.03 frames. ], batch size: 12, lr: 2.85e-02 +2024-08-06 03:52:47,090 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 03:52:56,023 INFO [trainer.py:811] (1/8) Epoch 2, validation: loss=3.327, ArTop10Accuracy=0.6492, over 1829298.00 frames. +2024-08-06 03:52:56,024 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 28727MB +2024-08-06 03:52:56,541 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 8.181e+01 1.431e+02 1.849e+02 2.730e+02 2.344e+03, threshold=3.697e+02, percent-clipped=7.2 +2024-08-06 03:53:21,881 INFO [trainer.py:765] (1/8) Epoch 2, batch 800, train_loss[loss=3.398, ArTop10Accuracy=0.6388, over 9367.00 frames. ], tot_loss[loss=3.383, ArTop10Accuracy=0.6358, over 11655.20 frames. ], batch size: 11, lr: 2.84e-02 +2024-08-06 03:53:53,299 INFO [trainer.py:765] (1/8) Epoch 2, batch 900, train_loss[loss=3.391, ArTop10Accuracy=0.6392, over 12796.00 frames. ], tot_loss[loss=3.366, ArTop10Accuracy=0.639, over 11709.70 frames. ], batch size: 27, lr: 2.83e-02 +2024-08-06 03:54:24,808 INFO [trainer.py:765] (1/8) Epoch 2, batch 1000, train_loss[loss=3.409, ArTop10Accuracy=0.6317, over 13004.00 frames. ], tot_loss[loss=3.362, ArTop10Accuracy=0.64, over 11923.32 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 03:54:56,006 INFO [trainer.py:765] (1/8) Epoch 2, batch 1100, train_loss[loss=3.398, ArTop10Accuracy=0.6314, over 13842.00 frames. ], tot_loss[loss=3.36, ArTop10Accuracy=0.6402, over 11993.47 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 03:55:26,228 INFO [trainer.py:765] (1/8) Epoch 2, batch 1200, train_loss[loss=3.483, ArTop10Accuracy=0.6179, over 12353.00 frames. ], tot_loss[loss=3.348, ArTop10Accuracy=0.6423, over 11942.25 frames. ], batch size: 97, lr: 2.80e-02 +2024-08-06 03:55:51,272 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 03:57:04,101 INFO [trainer.py:765] (1/8) Epoch 3, batch 100, train_loss[loss=3.333, ArTop10Accuracy=0.6494, over 14623.00 frames. ], tot_loss[loss=3.311, ArTop10Accuracy=0.651, over 4773.76 frames. ], batch size: 61, lr: 2.67e-02 +2024-08-06 03:57:50,979 INFO [trainer.py:765] (1/8) Epoch 3, batch 200, train_loss[loss=3.328, ArTop10Accuracy=0.65, over 13756.00 frames. ], tot_loss[loss=3.288, ArTop10Accuracy=0.6547, over 7779.65 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 03:58:26,073 INFO [trainer.py:765] (1/8) Epoch 3, batch 300, train_loss[loss=3.245, ArTop10Accuracy=0.6634, over 14083.00 frames. ], tot_loss[loss=3.274, ArTop10Accuracy=0.6572, over 9407.73 frames. ], batch size: 44, lr: 2.64e-02 +2024-08-06 03:59:11,252 INFO [trainer.py:765] (1/8) Epoch 3, batch 400, train_loss[loss=3.083, ArTop10Accuracy=0.6934, over 10398.00 frames. ], tot_loss[loss=3.265, ArTop10Accuracy=0.6589, over 10326.49 frames. ], batch size: 14, lr: 2.63e-02 +2024-08-06 03:59:29,675 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 8.720e+01 1.461e+02 1.775e+02 2.344e+02 9.150e+02, threshold=3.550e+02, percent-clipped=5.2 +2024-08-06 03:59:49,302 INFO [trainer.py:765] (1/8) Epoch 3, batch 500, train_loss[loss=3.104, ArTop10Accuracy=0.6816, over 12161.00 frames. ], tot_loss[loss=3.253, ArTop10Accuracy=0.6614, over 10898.01 frames. ], batch size: 22, lr: 2.62e-02 +2024-08-06 04:00:35,094 INFO [trainer.py:765] (1/8) Epoch 3, batch 600, train_loss[loss=3.198, ArTop10Accuracy=0.6762, over 11433.00 frames. ], tot_loss[loss=3.237, ArTop10Accuracy=0.6643, over 11425.47 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 04:01:22,057 INFO [trainer.py:765] (1/8) Epoch 3, batch 700, train_loss[loss=3.226, ArTop10Accuracy=0.6768, over 10103.00 frames. ], tot_loss[loss=3.232, ArTop10Accuracy=0.6655, over 11560.08 frames. ], batch size: 12, lr: 2.60e-02 +2024-08-06 04:01:56,268 INFO [trainer.py:765] (1/8) Epoch 3, batch 800, train_loss[loss=2.973, ArTop10Accuracy=0.7149, over 10194.00 frames. ], tot_loss[loss=3.225, ArTop10Accuracy=0.6668, over 11687.74 frames. ], batch size: 12, lr: 2.59e-02 +2024-08-06 04:02:27,739 INFO [trainer.py:765] (1/8) Epoch 3, batch 900, train_loss[loss=3.063, ArTop10Accuracy=0.6936, over 12768.00 frames. ], tot_loss[loss=3.206, ArTop10Accuracy=0.6706, over 11752.15 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 04:02:59,282 INFO [trainer.py:765] (1/8) Epoch 3, batch 1000, train_loss[loss=3.204, ArTop10Accuracy=0.6693, over 12887.00 frames. ], tot_loss[loss=3.196, ArTop10Accuracy=0.6724, over 11927.65 frames. ], batch size: 27, lr: 2.56e-02 +2024-08-06 04:03:30,941 INFO [trainer.py:765] (1/8) Epoch 3, batch 1100, train_loss[loss=3.202, ArTop10Accuracy=0.6681, over 13940.00 frames. ], tot_loss[loss=3.191, ArTop10Accuracy=0.6736, over 12000.62 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 04:04:01,311 INFO [trainer.py:765] (1/8) Epoch 3, batch 1200, train_loss[loss=3.286, ArTop10Accuracy=0.6576, over 12261.00 frames. ], tot_loss[loss=3.183, ArTop10Accuracy=0.6754, over 11938.78 frames. ], batch size: 98, lr: 2.54e-02 +2024-08-06 04:04:26,857 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 04:05:43,369 INFO [trainer.py:765] (1/8) Epoch 4, batch 100, train_loss[loss=3.22, ArTop10Accuracy=0.6675, over 14405.00 frames. ], tot_loss[loss=3.141, ArTop10Accuracy=0.6839, over 4771.98 frames. ], batch size: 61, lr: 2.38e-02 +2024-08-06 04:06:07,077 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 04:06:16,404 INFO [trainer.py:811] (1/8) Epoch 4, validation: loss=3.063, ArTop10Accuracy=0.7031, over 1829298.00 frames. +2024-08-06 04:06:16,404 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29537MB +2024-08-06 04:06:16,746 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.493e+02 1.709e+02 2.068e+02 7.969e+02, threshold=3.418e+02, percent-clipped=2.9 +2024-08-06 04:06:31,825 INFO [trainer.py:765] (1/8) Epoch 4, batch 200, train_loss[loss=3.197, ArTop10Accuracy=0.682, over 13803.00 frames. ], tot_loss[loss=3.124, ArTop10Accuracy=0.6873, over 7792.86 frames. ], batch size: 34, lr: 2.37e-02 +2024-08-06 04:07:18,544 INFO [trainer.py:765] (1/8) Epoch 4, batch 300, train_loss[loss=3.211, ArTop10Accuracy=0.6781, over 14502.00 frames. ], tot_loss[loss=3.117, ArTop10Accuracy=0.6887, over 9418.04 frames. ], batch size: 45, lr: 2.36e-02 +2024-08-06 04:08:01,910 INFO [trainer.py:765] (1/8) Epoch 4, batch 400, train_loss[loss=3.034, ArTop10Accuracy=0.7056, over 11117.00 frames. ], tot_loss[loss=3.116, ArTop10Accuracy=0.6889, over 10328.79 frames. ], batch size: 15, lr: 2.34e-02 +2024-08-06 04:08:45,344 INFO [trainer.py:765] (1/8) Epoch 4, batch 500, train_loss[loss=3.084, ArTop10Accuracy=0.6913, over 12164.00 frames. ], tot_loss[loss=3.109, ArTop10Accuracy=0.6898, over 10898.32 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 04:09:37,072 INFO [trainer.py:765] (1/8) Epoch 4, batch 600, train_loss[loss=3.048, ArTop10Accuracy=0.6966, over 11971.00 frames. ], tot_loss[loss=3.111, ArTop10Accuracy=0.6894, over 11420.59 frames. ], batch size: 19, lr: 2.32e-02 +2024-08-06 04:10:13,501 INFO [trainer.py:765] (1/8) Epoch 4, batch 700, train_loss[loss=3.067, ArTop10Accuracy=0.7017, over 10131.00 frames. ], tot_loss[loss=3.112, ArTop10Accuracy=0.6892, over 11573.58 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 04:10:51,959 INFO [trainer.py:765] (1/8) Epoch 4, batch 800, train_loss[loss=2.975, ArTop10Accuracy=0.7175, over 10435.00 frames. ], tot_loss[loss=3.111, ArTop10Accuracy=0.6893, over 11665.25 frames. ], batch size: 12, lr: 2.30e-02 +2024-08-06 04:11:23,330 INFO [trainer.py:765] (1/8) Epoch 4, batch 900, train_loss[loss=3.09, ArTop10Accuracy=0.6959, over 12982.00 frames. ], tot_loss[loss=3.101, ArTop10Accuracy=0.6913, over 11731.81 frames. ], batch size: 27, lr: 2.29e-02 +2024-08-06 04:11:54,826 INFO [trainer.py:765] (1/8) Epoch 4, batch 1000, train_loss[loss=3.044, ArTop10Accuracy=0.7061, over 12973.00 frames. ], tot_loss[loss=3.099, ArTop10Accuracy=0.692, over 11933.40 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 04:12:25,960 INFO [trainer.py:765] (1/8) Epoch 4, batch 1100, train_loss[loss=3.108, ArTop10Accuracy=0.6931, over 13630.00 frames. ], tot_loss[loss=3.106, ArTop10Accuracy=0.6905, over 12001.05 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 04:12:48,545 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.106e+02 1.440e+02 1.608e+02 1.893e+02 7.925e+02, threshold=3.216e+02, percent-clipped=2.0 +2024-08-06 04:12:58,827 INFO [trainer.py:765] (1/8) Epoch 4, batch 1200, train_loss[loss=3.158, ArTop10Accuracy=0.678, over 12305.00 frames. ], tot_loss[loss=3.108, ArTop10Accuracy=0.6903, over 11931.65 frames. ], batch size: 98, lr: 2.25e-02 +2024-08-06 04:13:24,356 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 04:14:38,685 INFO [trainer.py:765] (1/8) Epoch 5, batch 100, train_loss[loss=3.184, ArTop10Accuracy=0.6818, over 14605.00 frames. ], tot_loss[loss=3.059, ArTop10Accuracy=0.7008, over 4778.12 frames. ], batch size: 61, lr: 2.10e-02 +2024-08-06 04:15:26,826 INFO [trainer.py:765] (1/8) Epoch 5, batch 200, train_loss[loss=3.144, ArTop10Accuracy=0.6805, over 13768.00 frames. ], tot_loss[loss=3.056, ArTop10Accuracy=0.7013, over 7779.65 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 04:16:08,011 INFO [trainer.py:765] (1/8) Epoch 5, batch 300, train_loss[loss=2.999, ArTop10Accuracy=0.7113, over 14653.00 frames. ], tot_loss[loss=3.052, ArTop10Accuracy=0.7016, over 9413.83 frames. ], batch size: 45, lr: 2.08e-02 +2024-08-06 04:16:53,134 INFO [trainer.py:765] (1/8) Epoch 5, batch 400, train_loss[loss=3.053, ArTop10Accuracy=0.7039, over 10298.00 frames. ], tot_loss[loss=3.052, ArTop10Accuracy=0.7016, over 10330.35 frames. ], batch size: 14, lr: 2.07e-02 +2024-08-06 04:17:36,638 INFO [trainer.py:765] (1/8) Epoch 5, batch 500, train_loss[loss=3.051, ArTop10Accuracy=0.6982, over 12439.00 frames. ], tot_loss[loss=3.049, ArTop10Accuracy=0.702, over 10905.78 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 04:18:22,114 INFO [trainer.py:765] (1/8) Epoch 5, batch 600, train_loss[loss=3.094, ArTop10Accuracy=0.6999, over 11559.00 frames. ], tot_loss[loss=3.054, ArTop10Accuracy=0.7011, over 11426.15 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 04:19:17,033 INFO [trainer.py:765] (1/8) Epoch 5, batch 700, train_loss[loss=2.871, ArTop10Accuracy=0.7291, over 10005.00 frames. ], tot_loss[loss=3.056, ArTop10Accuracy=0.7004, over 11568.53 frames. ], batch size: 12, lr: 2.04e-02 +2024-08-06 04:19:51,066 INFO [trainer.py:765] (1/8) Epoch 5, batch 800, train_loss[loss=3, ArTop10Accuracy=0.7126, over 10694.00 frames. ], tot_loss[loss=3.062, ArTop10Accuracy=0.6992, over 11693.41 frames. ], batch size: 13, lr: 2.03e-02 +2024-08-06 04:20:18,214 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 04:20:27,476 INFO [trainer.py:811] (1/8) Epoch 5, validation: loss=2.998, ArTop10Accuracy=0.7157, over 1829298.00 frames. +2024-08-06 04:20:27,476 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29914MB +2024-08-06 04:20:27,781 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.057e+02 1.385e+02 1.542e+02 1.759e+02 7.741e+02, threshold=3.083e+02, percent-clipped=0.7 +2024-08-06 04:20:31,766 INFO [trainer.py:765] (1/8) Epoch 5, batch 900, train_loss[loss=3.024, ArTop10Accuracy=0.7109, over 13120.00 frames. ], tot_loss[loss=3.052, ArTop10Accuracy=0.7012, over 11731.61 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 04:21:03,306 INFO [trainer.py:765] (1/8) Epoch 5, batch 1000, train_loss[loss=3.029, ArTop10Accuracy=0.7119, over 12783.00 frames. ], tot_loss[loss=3.054, ArTop10Accuracy=0.7009, over 11936.20 frames. ], batch size: 27, lr: 2.01e-02 +2024-08-06 04:21:34,451 INFO [trainer.py:765] (1/8) Epoch 5, batch 1100, train_loss[loss=2.999, ArTop10Accuracy=0.7113, over 13838.00 frames. ], tot_loss[loss=3.062, ArTop10Accuracy=0.6995, over 11994.41 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 04:22:04,752 INFO [trainer.py:765] (1/8) Epoch 5, batch 1200, train_loss[loss=3.164, ArTop10Accuracy=0.6829, over 11790.00 frames. ], tot_loss[loss=3.054, ArTop10Accuracy=0.7009, over 11929.62 frames. ], batch size: 98, lr: 1.99e-02 +2024-08-06 04:22:30,194 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 04:23:46,282 INFO [trainer.py:765] (1/8) Epoch 6, batch 100, train_loss[loss=3.049, ArTop10Accuracy=0.7005, over 14328.00 frames. ], tot_loss[loss=3.02, ArTop10Accuracy=0.7089, over 4777.09 frames. ], batch size: 61, lr: 1.85e-02 +2024-08-06 04:24:35,255 INFO [trainer.py:765] (1/8) Epoch 6, batch 200, train_loss[loss=2.914, ArTop10Accuracy=0.7266, over 13723.00 frames. ], tot_loss[loss=3.015, ArTop10Accuracy=0.7104, over 7776.05 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 04:25:16,677 INFO [trainer.py:765] (1/8) Epoch 6, batch 300, train_loss[loss=3.049, ArTop10Accuracy=0.7045, over 14424.00 frames. ], tot_loss[loss=3.009, ArTop10Accuracy=0.711, over 9393.06 frames. ], batch size: 44, lr: 1.83e-02 +2024-08-06 04:26:08,924 INFO [trainer.py:765] (1/8) Epoch 6, batch 400, train_loss[loss=2.863, ArTop10Accuracy=0.7268, over 10029.00 frames. ], tot_loss[loss=3.005, ArTop10Accuracy=0.711, over 10322.51 frames. ], batch size: 14, lr: 1.83e-02 +2024-08-06 04:26:51,486 INFO [trainer.py:765] (1/8) Epoch 6, batch 500, train_loss[loss=2.995, ArTop10Accuracy=0.7079, over 12478.00 frames. ], tot_loss[loss=3.002, ArTop10Accuracy=0.7113, over 10891.41 frames. ], batch size: 22, lr: 1.82e-02 +2024-08-06 04:27:39,298 INFO [trainer.py:765] (1/8) Epoch 6, batch 600, train_loss[loss=2.917, ArTop10Accuracy=0.7149, over 11750.00 frames. ], tot_loss[loss=3.007, ArTop10Accuracy=0.7099, over 11447.28 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 04:27:46,369 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.343e+02 1.474e+02 1.660e+02 8.574e+02, threshold=2.947e+02, percent-clipped=0.6 +2024-08-06 04:28:33,240 INFO [trainer.py:765] (1/8) Epoch 6, batch 700, train_loss[loss=2.854, ArTop10Accuracy=0.7402, over 10171.00 frames. ], tot_loss[loss=3.014, ArTop10Accuracy=0.7086, over 11581.22 frames. ], batch size: 12, lr: 1.80e-02 +2024-08-06 04:29:11,216 INFO [trainer.py:765] (1/8) Epoch 6, batch 800, train_loss[loss=3.12, ArTop10Accuracy=0.6847, over 10061.00 frames. ], tot_loss[loss=3.018, ArTop10Accuracy=0.7082, over 11684.35 frames. ], batch size: 12, lr: 1.79e-02 +2024-08-06 04:29:42,751 INFO [trainer.py:765] (1/8) Epoch 6, batch 900, train_loss[loss=3.003, ArTop10Accuracy=0.7076, over 13161.00 frames. ], tot_loss[loss=3.015, ArTop10Accuracy=0.7085, over 11729.41 frames. ], batch size: 27, lr: 1.78e-02 +2024-08-06 04:30:14,306 INFO [trainer.py:765] (1/8) Epoch 6, batch 1000, train_loss[loss=3.037, ArTop10Accuracy=0.6993, over 12855.00 frames. ], tot_loss[loss=3.021, ArTop10Accuracy=0.7076, over 11934.75 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 04:30:45,383 INFO [trainer.py:765] (1/8) Epoch 6, batch 1100, train_loss[loss=2.996, ArTop10Accuracy=0.7164, over 14108.00 frames. ], tot_loss[loss=3.032, ArTop10Accuracy=0.7057, over 12000.46 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 04:31:15,673 INFO [trainer.py:765] (1/8) Epoch 6, batch 1200, train_loss[loss=3.169, ArTop10Accuracy=0.6784, over 11880.00 frames. ], tot_loss[loss=3.029, ArTop10Accuracy=0.706, over 11943.58 frames. ], batch size: 99, lr: 1.76e-02 +2024-08-06 04:31:40,439 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 04:32:52,405 INFO [trainer.py:765] (1/8) Epoch 7, batch 100, train_loss[loss=3.047, ArTop10Accuracy=0.702, over 14422.00 frames. ], tot_loss[loss=2.987, ArTop10Accuracy=0.7148, over 4779.19 frames. ], batch size: 61, lr: 1.64e-02 +2024-08-06 04:33:38,223 INFO [trainer.py:765] (1/8) Epoch 7, batch 200, train_loss[loss=2.943, ArTop10Accuracy=0.7228, over 13888.00 frames. ], tot_loss[loss=2.985, ArTop10Accuracy=0.7155, over 7795.94 frames. ], batch size: 34, lr: 1.64e-02 +2024-08-06 04:34:22,609 INFO [trainer.py:765] (1/8) Epoch 7, batch 300, train_loss[loss=2.972, ArTop10Accuracy=0.7223, over 14520.00 frames. ], tot_loss[loss=2.982, ArTop10Accuracy=0.716, over 9435.04 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 04:34:36,847 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 04:34:45,809 INFO [trainer.py:811] (1/8) Epoch 7, validation: loss=2.963, ArTop10Accuracy=0.7233, over 1829298.00 frames. +2024-08-06 04:34:45,809 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29914MB +2024-08-06 04:34:46,124 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.306e+02 1.435e+02 1.599e+02 8.689e+02, threshold=2.871e+02, percent-clipped=0.9 +2024-08-06 04:35:17,146 INFO [trainer.py:765] (1/8) Epoch 7, batch 400, train_loss[loss=2.893, ArTop10Accuracy=0.739, over 10205.00 frames. ], tot_loss[loss=2.987, ArTop10Accuracy=0.7148, over 10348.81 frames. ], batch size: 14, lr: 1.62e-02 +2024-08-06 04:36:01,710 INFO [trainer.py:765] (1/8) Epoch 7, batch 500, train_loss[loss=2.904, ArTop10Accuracy=0.7278, over 12303.00 frames. ], tot_loss[loss=2.981, ArTop10Accuracy=0.7158, over 10908.06 frames. ], batch size: 22, lr: 1.61e-02 +2024-08-06 04:36:48,811 INFO [trainer.py:765] (1/8) Epoch 7, batch 600, train_loss[loss=2.902, ArTop10Accuracy=0.7295, over 11582.00 frames. ], tot_loss[loss=2.982, ArTop10Accuracy=0.7153, over 11445.31 frames. ], batch size: 18, lr: 1.61e-02 +2024-08-06 04:37:34,800 INFO [trainer.py:765] (1/8) Epoch 7, batch 700, train_loss[loss=2.89, ArTop10Accuracy=0.7312, over 9354.00 frames. ], tot_loss[loss=2.993, ArTop10Accuracy=0.7132, over 11556.36 frames. ], batch size: 11, lr: 1.60e-02 +2024-08-06 04:38:13,613 INFO [trainer.py:765] (1/8) Epoch 7, batch 800, train_loss[loss=2.881, ArTop10Accuracy=0.7282, over 9319.00 frames. ], tot_loss[loss=2.996, ArTop10Accuracy=0.7125, over 11684.94 frames. ], batch size: 11, lr: 1.59e-02 +2024-08-06 04:38:45,110 INFO [trainer.py:765] (1/8) Epoch 7, batch 900, train_loss[loss=2.983, ArTop10Accuracy=0.7156, over 13025.00 frames. ], tot_loss[loss=2.987, ArTop10Accuracy=0.7146, over 11730.77 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 04:39:16,574 INFO [trainer.py:765] (1/8) Epoch 7, batch 1000, train_loss[loss=3.091, ArTop10Accuracy=0.6947, over 12891.00 frames. ], tot_loss[loss=2.993, ArTop10Accuracy=0.7132, over 11931.30 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 04:39:47,571 INFO [trainer.py:765] (1/8) Epoch 7, batch 1100, train_loss[loss=3.078, ArTop10Accuracy=0.6948, over 13585.00 frames. ], tot_loss[loss=2.999, ArTop10Accuracy=0.7124, over 11996.35 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 04:40:17,989 INFO [trainer.py:765] (1/8) Epoch 7, batch 1200, train_loss[loss=3.127, ArTop10Accuracy=0.6918, over 12528.00 frames. ], tot_loss[loss=3, ArTop10Accuracy=0.7121, over 11948.80 frames. ], batch size: 99, lr: 1.57e-02 +2024-08-06 04:40:43,222 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 04:41:37,492 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 9.816e+01 1.295e+02 1.411e+02 1.574e+02 4.953e+02, threshold=2.821e+02, percent-clipped=1.1 +2024-08-06 04:41:58,371 INFO [trainer.py:765] (1/8) Epoch 8, batch 100, train_loss[loss=3.097, ArTop10Accuracy=0.6954, over 14820.00 frames. ], tot_loss[loss=2.969, ArTop10Accuracy=0.719, over 4786.78 frames. ], batch size: 61, lr: 1.47e-02 +2024-08-06 04:42:44,986 INFO [trainer.py:765] (1/8) Epoch 8, batch 200, train_loss[loss=3.127, ArTop10Accuracy=0.6864, over 13833.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.719, over 7780.75 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 04:43:28,045 INFO [trainer.py:765] (1/8) Epoch 8, batch 300, train_loss[loss=2.93, ArTop10Accuracy=0.7206, over 14569.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.7196, over 9393.57 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 04:44:14,462 INFO [trainer.py:765] (1/8) Epoch 8, batch 400, train_loss[loss=2.853, ArTop10Accuracy=0.7256, over 10176.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7198, over 10311.66 frames. ], batch size: 14, lr: 1.45e-02 +2024-08-06 04:45:00,692 INFO [trainer.py:765] (1/8) Epoch 8, batch 500, train_loss[loss=2.91, ArTop10Accuracy=0.723, over 12279.00 frames. ], tot_loss[loss=2.957, ArTop10Accuracy=0.7206, over 10909.44 frames. ], batch size: 22, lr: 1.45e-02 +2024-08-06 04:45:45,394 INFO [trainer.py:765] (1/8) Epoch 8, batch 600, train_loss[loss=2.965, ArTop10Accuracy=0.7214, over 11599.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7196, over 11431.46 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 04:46:34,038 INFO [trainer.py:765] (1/8) Epoch 8, batch 700, train_loss[loss=2.924, ArTop10Accuracy=0.7204, over 9422.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7186, over 11568.95 frames. ], batch size: 11, lr: 1.43e-02 +2024-08-06 04:47:10,208 INFO [trainer.py:765] (1/8) Epoch 8, batch 800, train_loss[loss=2.762, ArTop10Accuracy=0.7587, over 9953.00 frames. ], tot_loss[loss=2.971, ArTop10Accuracy=0.7178, over 11703.51 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 04:47:41,606 INFO [trainer.py:765] (1/8) Epoch 8, batch 900, train_loss[loss=2.975, ArTop10Accuracy=0.7147, over 12770.00 frames. ], tot_loss[loss=2.968, ArTop10Accuracy=0.7183, over 11749.70 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:13,032 INFO [trainer.py:765] (1/8) Epoch 8, batch 1000, train_loss[loss=2.895, ArTop10Accuracy=0.7247, over 13061.00 frames. ], tot_loss[loss=2.974, ArTop10Accuracy=0.7172, over 11954.97 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:28,827 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 04:48:37,663 INFO [trainer.py:811] (1/8) Epoch 8, validation: loss=2.946, ArTop10Accuracy=0.7266, over 1829298.00 frames. +2024-08-06 04:48:37,664 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29914MB +2024-08-06 04:48:37,951 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.289e+02 1.393e+02 1.532e+02 3.557e+02, threshold=2.786e+02, percent-clipped=0.2 +2024-08-06 04:48:52,931 INFO [trainer.py:765] (1/8) Epoch 8, batch 1100, train_loss[loss=3.083, ArTop10Accuracy=0.7016, over 13744.00 frames. ], tot_loss[loss=2.977, ArTop10Accuracy=0.7163, over 11994.55 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 04:49:23,202 INFO [trainer.py:765] (1/8) Epoch 8, batch 1200, train_loss[loss=3.142, ArTop10Accuracy=0.6871, over 11796.00 frames. ], tot_loss[loss=2.98, ArTop10Accuracy=0.7156, over 11937.07 frames. ], batch size: 98, lr: 1.40e-02 +2024-08-06 04:49:49,198 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 04:51:01,547 INFO [trainer.py:765] (1/8) Epoch 9, batch 100, train_loss[loss=3.043, ArTop10Accuracy=0.7075, over 14747.00 frames. ], tot_loss[loss=2.952, ArTop10Accuracy=0.7225, over 4779.52 frames. ], batch size: 61, lr: 1.32e-02 +2024-08-06 04:51:45,414 INFO [trainer.py:765] (1/8) Epoch 9, batch 200, train_loss[loss=2.9, ArTop10Accuracy=0.7339, over 13749.00 frames. ], tot_loss[loss=2.939, ArTop10Accuracy=0.7247, over 7784.70 frames. ], batch size: 34, lr: 1.32e-02 +2024-08-06 04:52:29,082 INFO [trainer.py:765] (1/8) Epoch 9, batch 300, train_loss[loss=2.947, ArTop10Accuracy=0.7237, over 14498.00 frames. ], tot_loss[loss=2.934, ArTop10Accuracy=0.7258, over 9413.47 frames. ], batch size: 44, lr: 1.31e-02 +2024-08-06 04:53:16,431 INFO [trainer.py:765] (1/8) Epoch 9, batch 400, train_loss[loss=2.898, ArTop10Accuracy=0.7289, over 10396.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.7248, over 10343.48 frames. ], batch size: 14, lr: 1.31e-02 +2024-08-06 04:53:58,144 INFO [trainer.py:765] (1/8) Epoch 9, batch 500, train_loss[loss=3.009, ArTop10Accuracy=0.7046, over 12482.00 frames. ], tot_loss[loss=2.936, ArTop10Accuracy=0.7248, over 10908.31 frames. ], batch size: 22, lr: 1.30e-02 +2024-08-06 04:54:51,077 INFO [trainer.py:765] (1/8) Epoch 9, batch 600, train_loss[loss=2.88, ArTop10Accuracy=0.7351, over 11555.00 frames. ], tot_loss[loss=2.939, ArTop10Accuracy=0.7239, over 11443.87 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 04:55:34,399 INFO [trainer.py:765] (1/8) Epoch 9, batch 700, train_loss[loss=2.952, ArTop10Accuracy=0.7199, over 9217.00 frames. ], tot_loss[loss=2.948, ArTop10Accuracy=0.7221, over 11577.60 frames. ], batch size: 11, lr: 1.29e-02 +2024-08-06 04:56:04,575 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.257e+02 1.367e+02 1.507e+02 8.820e+02, threshold=2.735e+02, percent-clipped=0.5 +2024-08-06 04:56:13,597 INFO [trainer.py:765] (1/8) Epoch 9, batch 800, train_loss[loss=2.97, ArTop10Accuracy=0.7133, over 9305.00 frames. ], tot_loss[loss=2.947, ArTop10Accuracy=0.7222, over 11673.87 frames. ], batch size: 11, lr: 1.29e-02 +2024-08-06 04:56:44,975 INFO [trainer.py:765] (1/8) Epoch 9, batch 900, train_loss[loss=2.84, ArTop10Accuracy=0.7408, over 13126.00 frames. ], tot_loss[loss=2.94, ArTop10Accuracy=0.7233, over 11726.70 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:16,491 INFO [trainer.py:765] (1/8) Epoch 9, batch 1000, train_loss[loss=2.927, ArTop10Accuracy=0.7269, over 12842.00 frames. ], tot_loss[loss=2.95, ArTop10Accuracy=0.7215, over 11924.86 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:47,656 INFO [trainer.py:765] (1/8) Epoch 9, batch 1100, train_loss[loss=2.928, ArTop10Accuracy=0.7262, over 13847.00 frames. ], tot_loss[loss=2.96, ArTop10Accuracy=0.7201, over 11978.23 frames. ], batch size: 34, lr: 1.27e-02 +2024-08-06 04:58:18,093 INFO [trainer.py:765] (1/8) Epoch 9, batch 1200, train_loss[loss=3.087, ArTop10Accuracy=0.7016, over 13113.00 frames. ], tot_loss[loss=2.958, ArTop10Accuracy=0.7204, over 11958.80 frames. ], batch size: 97, lr: 1.27e-02 +2024-08-06 04:58:43,245 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 04:59:52,748 INFO [trainer.py:765] (1/8) Epoch 10, batch 100, train_loss[loss=3.024, ArTop10Accuracy=0.7094, over 14484.00 frames. ], tot_loss[loss=2.922, ArTop10Accuracy=0.7288, over 4805.25 frames. ], batch size: 61, lr: 1.20e-02 +2024-08-06 05:00:43,730 INFO [trainer.py:765] (1/8) Epoch 10, batch 200, train_loss[loss=2.825, ArTop10Accuracy=0.7417, over 13692.00 frames. ], tot_loss[loss=2.919, ArTop10Accuracy=0.7285, over 7805.84 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 05:01:20,591 INFO [trainer.py:765] (1/8) Epoch 10, batch 300, train_loss[loss=2.978, ArTop10Accuracy=0.7133, over 14424.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7282, over 9437.15 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 05:02:10,047 INFO [trainer.py:765] (1/8) Epoch 10, batch 400, train_loss[loss=2.874, ArTop10Accuracy=0.7338, over 10806.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7281, over 10347.74 frames. ], batch size: 15, lr: 1.19e-02 +2024-08-06 05:02:46,487 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 05:02:55,378 INFO [trainer.py:811] (1/8) Epoch 10, validation: loss=2.927, ArTop10Accuracy=0.7304, over 1829298.00 frames. +2024-08-06 05:02:55,379 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30932MB +2024-08-06 05:02:55,728 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.269e+02 1.367e+02 1.518e+02 4.405e+02, threshold=2.733e+02, percent-clipped=0.4 +2024-08-06 05:02:58,361 INFO [trainer.py:765] (1/8) Epoch 10, batch 500, train_loss[loss=2.849, ArTop10Accuracy=0.7416, over 12035.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7277, over 10890.25 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 05:03:48,229 INFO [trainer.py:765] (1/8) Epoch 10, batch 600, train_loss[loss=2.751, ArTop10Accuracy=0.7489, over 11510.00 frames. ], tot_loss[loss=2.928, ArTop10Accuracy=0.7265, over 11425.54 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 05:04:36,715 INFO [trainer.py:765] (1/8) Epoch 10, batch 700, train_loss[loss=2.867, ArTop10Accuracy=0.7277, over 10102.00 frames. ], tot_loss[loss=2.932, ArTop10Accuracy=0.7254, over 11561.23 frames. ], batch size: 12, lr: 1.18e-02 +2024-08-06 05:05:10,725 INFO [trainer.py:765] (1/8) Epoch 10, batch 800, train_loss[loss=2.75, ArTop10Accuracy=0.7562, over 10174.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.7242, over 11665.97 frames. ], batch size: 12, lr: 1.17e-02 +2024-08-06 05:05:42,245 INFO [trainer.py:765] (1/8) Epoch 10, batch 900, train_loss[loss=2.795, ArTop10Accuracy=0.7538, over 13100.00 frames. ], tot_loss[loss=2.93, ArTop10Accuracy=0.7254, over 11717.16 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 05:06:13,844 INFO [trainer.py:765] (1/8) Epoch 10, batch 1000, train_loss[loss=2.856, ArTop10Accuracy=0.7444, over 13035.00 frames. ], tot_loss[loss=2.933, ArTop10Accuracy=0.7251, over 11937.94 frames. ], batch size: 27, lr: 1.16e-02 +2024-08-06 05:06:45,056 INFO [trainer.py:765] (1/8) Epoch 10, batch 1100, train_loss[loss=3.035, ArTop10Accuracy=0.6976, over 13713.00 frames. ], tot_loss[loss=2.938, ArTop10Accuracy=0.724, over 12000.95 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 05:07:15,484 INFO [trainer.py:765] (1/8) Epoch 10, batch 1200, train_loss[loss=3.091, ArTop10Accuracy=0.688, over 11661.00 frames. ], tot_loss[loss=2.944, ArTop10Accuracy=0.723, over 11942.60 frames. ], batch size: 98, lr: 1.16e-02 +2024-08-06 05:07:40,804 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 05:08:52,966 INFO [trainer.py:765] (1/8) Epoch 11, batch 100, train_loss[loss=2.952, ArTop10Accuracy=0.7234, over 14658.00 frames. ], tot_loss[loss=2.909, ArTop10Accuracy=0.7313, over 4795.02 frames. ], batch size: 61, lr: 1.10e-02 +2024-08-06 05:09:41,277 INFO [trainer.py:765] (1/8) Epoch 11, batch 200, train_loss[loss=2.837, ArTop10Accuracy=0.7443, over 13437.00 frames. ], tot_loss[loss=2.909, ArTop10Accuracy=0.7308, over 7799.60 frames. ], batch size: 34, lr: 1.10e-02 +2024-08-06 05:09:51,176 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.278e+02 1.371e+02 1.502e+02 3.785e+02, threshold=2.743e+02, percent-clipped=0.3 +2024-08-06 05:10:24,720 INFO [trainer.py:765] (1/8) Epoch 11, batch 300, train_loss[loss=2.882, ArTop10Accuracy=0.7337, over 14214.00 frames. ], tot_loss[loss=2.908, ArTop10Accuracy=0.7312, over 9428.44 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 05:11:11,784 INFO [trainer.py:765] (1/8) Epoch 11, batch 400, train_loss[loss=2.776, ArTop10Accuracy=0.7525, over 10812.00 frames. ], tot_loss[loss=2.908, ArTop10Accuracy=0.7311, over 10349.47 frames. ], batch size: 15, lr: 1.09e-02 +2024-08-06 05:11:52,691 INFO [trainer.py:765] (1/8) Epoch 11, batch 500, train_loss[loss=2.832, ArTop10Accuracy=0.7447, over 12256.00 frames. ], tot_loss[loss=2.905, ArTop10Accuracy=0.7312, over 10905.08 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 05:12:40,287 INFO [trainer.py:765] (1/8) Epoch 11, batch 600, train_loss[loss=2.758, ArTop10Accuracy=0.7534, over 11557.00 frames. ], tot_loss[loss=2.91, ArTop10Accuracy=0.7301, over 11431.45 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 05:13:25,708 INFO [trainer.py:765] (1/8) Epoch 11, batch 700, train_loss[loss=2.842, ArTop10Accuracy=0.7449, over 10276.00 frames. ], tot_loss[loss=2.917, ArTop10Accuracy=0.7284, over 11603.02 frames. ], batch size: 12, lr: 1.08e-02 +2024-08-06 05:14:04,205 INFO [trainer.py:765] (1/8) Epoch 11, batch 800, train_loss[loss=2.886, ArTop10Accuracy=0.7415, over 10103.00 frames. ], tot_loss[loss=2.92, ArTop10Accuracy=0.7281, over 11708.31 frames. ], batch size: 12, lr: 1.07e-02 +2024-08-06 05:14:35,667 INFO [trainer.py:765] (1/8) Epoch 11, batch 900, train_loss[loss=2.983, ArTop10Accuracy=0.7167, over 12986.00 frames. ], tot_loss[loss=2.912, ArTop10Accuracy=0.7294, over 11763.77 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 05:15:07,263 INFO [trainer.py:765] (1/8) Epoch 11, batch 1000, train_loss[loss=2.839, ArTop10Accuracy=0.7413, over 12999.00 frames. ], tot_loss[loss=2.914, ArTop10Accuracy=0.7289, over 11951.15 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 05:15:38,260 INFO [trainer.py:765] (1/8) Epoch 11, batch 1100, train_loss[loss=3, ArTop10Accuracy=0.7166, over 13743.00 frames. ], tot_loss[loss=2.919, ArTop10Accuracy=0.7277, over 11998.49 frames. ], batch size: 34, lr: 1.06e-02 +2024-08-06 05:16:08,498 INFO [trainer.py:765] (1/8) Epoch 11, batch 1200, train_loss[loss=3.05, ArTop10Accuracy=0.7029, over 11881.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7275, over 11939.46 frames. ], batch size: 99, lr: 1.06e-02 +2024-08-06 05:16:12,697 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 05:16:21,623 INFO [trainer.py:811] (1/8) Epoch 11, validation: loss=2.923, ArTop10Accuracy=0.7318, over 1829298.00 frames. +2024-08-06 05:16:21,623 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30932MB +2024-08-06 05:16:21,949 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.268e+02 1.368e+02 1.481e+02 4.790e+02, threshold=2.736e+02, percent-clipped=0.6 +2024-08-06 05:16:42,524 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 05:18:03,004 INFO [trainer.py:765] (1/8) Epoch 12, batch 100, train_loss[loss=2.948, ArTop10Accuracy=0.7227, over 14766.00 frames. ], tot_loss[loss=2.893, ArTop10Accuracy=0.7349, over 4803.10 frames. ], batch size: 61, lr: 1.01e-02 +2024-08-06 05:18:46,003 INFO [trainer.py:765] (1/8) Epoch 12, batch 200, train_loss[loss=2.996, ArTop10Accuracy=0.7162, over 13847.00 frames. ], tot_loss[loss=2.89, ArTop10Accuracy=0.7347, over 7795.71 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 05:19:31,946 INFO [trainer.py:765] (1/8) Epoch 12, batch 300, train_loss[loss=2.957, ArTop10Accuracy=0.7282, over 14435.00 frames. ], tot_loss[loss=2.889, ArTop10Accuracy=0.7351, over 9411.90 frames. ], batch size: 44, lr: 1.01e-02 +2024-08-06 05:20:12,430 INFO [trainer.py:765] (1/8) Epoch 12, batch 400, train_loss[loss=2.886, ArTop10Accuracy=0.7416, over 10391.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.7348, over 10326.32 frames. ], batch size: 14, lr: 1.00e-02 +2024-08-06 05:21:00,639 INFO [trainer.py:765] (1/8) Epoch 12, batch 500, train_loss[loss=2.952, ArTop10Accuracy=0.7296, over 12311.00 frames. ], tot_loss[loss=2.886, ArTop10Accuracy=0.7353, over 10893.08 frames. ], batch size: 22, lr: 9.99e-03 +2024-08-06 05:21:43,915 INFO [trainer.py:765] (1/8) Epoch 12, batch 600, train_loss[loss=2.877, ArTop10Accuracy=0.7314, over 11609.00 frames. ], tot_loss[loss=2.89, ArTop10Accuracy=0.734, over 11424.95 frames. ], batch size: 18, lr: 9.96e-03 +2024-08-06 05:22:32,205 INFO [trainer.py:765] (1/8) Epoch 12, batch 700, train_loss[loss=2.803, ArTop10Accuracy=0.7459, over 9975.00 frames. ], tot_loss[loss=2.893, ArTop10Accuracy=0.7333, over 11573.77 frames. ], batch size: 12, lr: 9.93e-03 +2024-08-06 05:23:08,911 INFO [trainer.py:765] (1/8) Epoch 12, batch 800, train_loss[loss=2.761, ArTop10Accuracy=0.7474, over 10005.00 frames. ], tot_loss[loss=2.9, ArTop10Accuracy=0.732, over 11684.31 frames. ], batch size: 12, lr: 9.90e-03 +2024-08-06 05:23:40,459 INFO [trainer.py:765] (1/8) Epoch 12, batch 900, train_loss[loss=2.892, ArTop10Accuracy=0.7327, over 13289.00 frames. ], tot_loss[loss=2.893, ArTop10Accuracy=0.7331, over 11737.04 frames. ], batch size: 27, lr: 9.87e-03 +2024-08-06 05:23:54,575 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.273e+02 1.376e+02 1.503e+02 4.050e+02, threshold=2.752e+02, percent-clipped=0.4 +2024-08-06 05:24:14,344 INFO [trainer.py:765] (1/8) Epoch 12, batch 1000, train_loss[loss=2.82, ArTop10Accuracy=0.7526, over 12919.00 frames. ], tot_loss[loss=2.904, ArTop10Accuracy=0.7313, over 11937.90 frames. ], batch size: 27, lr: 9.84e-03 +2024-08-06 05:24:45,501 INFO [trainer.py:765] (1/8) Epoch 12, batch 1100, train_loss[loss=2.954, ArTop10Accuracy=0.7211, over 13661.00 frames. ], tot_loss[loss=2.908, ArTop10Accuracy=0.7302, over 11999.56 frames. ], batch size: 34, lr: 9.81e-03 +2024-08-06 05:25:15,881 INFO [trainer.py:765] (1/8) Epoch 12, batch 1200, train_loss[loss=3.106, ArTop10Accuracy=0.6906, over 11901.00 frames. ], tot_loss[loss=2.913, ArTop10Accuracy=0.7293, over 11945.42 frames. ], batch size: 97, lr: 9.78e-03 +2024-08-06 05:25:41,043 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 05:26:46,787 INFO [trainer.py:765] (1/8) Epoch 13, batch 100, train_loss[loss=2.906, ArTop10Accuracy=0.731, over 14602.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7385, over 4787.21 frames. ], batch size: 61, lr: 9.36e-03 +2024-08-06 05:27:32,553 INFO [trainer.py:765] (1/8) Epoch 13, batch 200, train_loss[loss=2.885, ArTop10Accuracy=0.7358, over 13786.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7384, over 7785.28 frames. ], batch size: 34, lr: 9.34e-03 +2024-08-06 05:28:16,036 INFO [trainer.py:765] (1/8) Epoch 13, batch 300, train_loss[loss=2.867, ArTop10Accuracy=0.7418, over 14393.00 frames. ], tot_loss[loss=2.869, ArTop10Accuracy=0.7391, over 9414.48 frames. ], batch size: 44, lr: 9.31e-03 +2024-08-06 05:29:00,149 INFO [trainer.py:765] (1/8) Epoch 13, batch 400, train_loss[loss=2.824, ArTop10Accuracy=0.7474, over 10475.00 frames. ], tot_loss[loss=2.868, ArTop10Accuracy=0.7393, over 10313.15 frames. ], batch size: 14, lr: 9.28e-03 +2024-08-06 05:29:43,967 INFO [trainer.py:765] (1/8) Epoch 13, batch 500, train_loss[loss=2.729, ArTop10Accuracy=0.7527, over 12343.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.7396, over 10873.10 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 05:30:24,248 INFO [trainer.py:765] (1/8) Epoch 13, batch 600, train_loss[loss=2.75, ArTop10Accuracy=0.7631, over 11456.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7379, over 11414.87 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 05:30:58,110 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 05:31:07,054 INFO [trainer.py:811] (1/8) Epoch 13, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 05:31:07,054 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30932MB +2024-08-06 05:31:07,351 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.049e+02 1.283e+02 1.389e+02 1.496e+02 2.729e+02, threshold=2.779e+02, percent-clipped=0.0 +2024-08-06 05:31:24,042 INFO [trainer.py:765] (1/8) Epoch 13, batch 700, train_loss[loss=2.841, ArTop10Accuracy=0.7457, over 10367.00 frames. ], tot_loss[loss=2.882, ArTop10Accuracy=0.7353, over 11552.22 frames. ], batch size: 12, lr: 9.20e-03 +2024-08-06 05:32:00,146 INFO [trainer.py:765] (1/8) Epoch 13, batch 800, train_loss[loss=2.727, ArTop10Accuracy=0.7702, over 10091.00 frames. ], tot_loss[loss=2.885, ArTop10Accuracy=0.7351, over 11685.22 frames. ], batch size: 12, lr: 9.18e-03 +2024-08-06 05:32:31,520 INFO [trainer.py:765] (1/8) Epoch 13, batch 900, train_loss[loss=2.896, ArTop10Accuracy=0.7348, over 12939.00 frames. ], tot_loss[loss=2.878, ArTop10Accuracy=0.7363, over 11743.46 frames. ], batch size: 27, lr: 9.15e-03 +2024-08-06 05:33:03,042 INFO [trainer.py:765] (1/8) Epoch 13, batch 1000, train_loss[loss=2.882, ArTop10Accuracy=0.7395, over 13052.00 frames. ], tot_loss[loss=2.888, ArTop10Accuracy=0.7343, over 11933.66 frames. ], batch size: 27, lr: 9.13e-03 +2024-08-06 05:33:34,231 INFO [trainer.py:765] (1/8) Epoch 13, batch 1100, train_loss[loss=2.868, ArTop10Accuracy=0.7404, over 13751.00 frames. ], tot_loss[loss=2.895, ArTop10Accuracy=0.7331, over 12013.64 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 05:34:04,518 INFO [trainer.py:765] (1/8) Epoch 13, batch 1200, train_loss[loss=3.042, ArTop10Accuracy=0.7104, over 12299.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7327, over 11945.18 frames. ], batch size: 98, lr: 9.07e-03 +2024-08-06 05:34:30,235 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 05:35:39,198 INFO [trainer.py:765] (1/8) Epoch 14, batch 100, train_loss[loss=2.937, ArTop10Accuracy=0.7268, over 14650.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7388, over 4773.88 frames. ], batch size: 61, lr: 8.71e-03 +2024-08-06 05:36:23,063 INFO [trainer.py:765] (1/8) Epoch 14, batch 200, train_loss[loss=2.889, ArTop10Accuracy=0.7419, over 13718.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.7399, over 7789.92 frames. ], batch size: 34, lr: 8.68e-03 +2024-08-06 05:37:09,309 INFO [trainer.py:765] (1/8) Epoch 14, batch 300, train_loss[loss=2.92, ArTop10Accuracy=0.7316, over 14309.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.741, over 9421.38 frames. ], batch size: 44, lr: 8.66e-03 +2024-08-06 05:37:46,029 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.304e+02 1.410e+02 1.531e+02 2.912e+02, threshold=2.820e+02, percent-clipped=0.2 +2024-08-06 05:37:55,139 INFO [trainer.py:765] (1/8) Epoch 14, batch 400, train_loss[loss=2.894, ArTop10Accuracy=0.7352, over 10361.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.7409, over 10335.77 frames. ], batch size: 14, lr: 8.64e-03 +2024-08-06 05:38:42,025 INFO [trainer.py:765] (1/8) Epoch 14, batch 500, train_loss[loss=2.838, ArTop10Accuracy=0.7473, over 12248.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7416, over 10916.19 frames. ], batch size: 22, lr: 8.61e-03 +2024-08-06 05:39:22,375 INFO [trainer.py:765] (1/8) Epoch 14, batch 600, train_loss[loss=2.725, ArTop10Accuracy=0.7621, over 11564.00 frames. ], tot_loss[loss=2.859, ArTop10Accuracy=0.7405, over 11425.60 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 05:40:15,143 INFO [trainer.py:765] (1/8) Epoch 14, batch 700, train_loss[loss=2.855, ArTop10Accuracy=0.7373, over 10051.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7376, over 11567.50 frames. ], batch size: 12, lr: 8.57e-03 +2024-08-06 05:40:49,136 INFO [trainer.py:765] (1/8) Epoch 14, batch 800, train_loss[loss=2.86, ArTop10Accuracy=0.7426, over 10108.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7369, over 11685.36 frames. ], batch size: 12, lr: 8.55e-03 +2024-08-06 05:41:20,467 INFO [trainer.py:765] (1/8) Epoch 14, batch 900, train_loss[loss=2.889, ArTop10Accuracy=0.7261, over 13003.00 frames. ], tot_loss[loss=2.87, ArTop10Accuracy=0.7382, over 11732.11 frames. ], batch size: 27, lr: 8.52e-03 +2024-08-06 05:41:51,996 INFO [trainer.py:765] (1/8) Epoch 14, batch 1000, train_loss[loss=2.853, ArTop10Accuracy=0.739, over 12932.00 frames. ], tot_loss[loss=2.878, ArTop10Accuracy=0.7365, over 11926.03 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 05:42:23,220 INFO [trainer.py:765] (1/8) Epoch 14, batch 1100, train_loss[loss=2.838, ArTop10Accuracy=0.7475, over 13897.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7366, over 11991.45 frames. ], batch size: 34, lr: 8.48e-03 +2024-08-06 05:42:53,549 INFO [trainer.py:765] (1/8) Epoch 14, batch 1200, train_loss[loss=3.052, ArTop10Accuracy=0.7097, over 12008.00 frames. ], tot_loss[loss=2.878, ArTop10Accuracy=0.7363, over 11941.69 frames. ], batch size: 98, lr: 8.46e-03 +2024-08-06 05:43:18,869 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 05:44:28,571 INFO [trainer.py:765] (1/8) Epoch 15, batch 100, train_loss[loss=2.988, ArTop10Accuracy=0.7149, over 14408.00 frames. ], tot_loss[loss=2.851, ArTop10Accuracy=0.7418, over 4787.97 frames. ], batch size: 61, lr: 8.14e-03 +2024-08-06 05:44:29,213 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 05:44:38,024 INFO [trainer.py:811] (1/8) Epoch 15, validation: loss=2.913, ArTop10Accuracy=0.7339, over 1829298.00 frames. +2024-08-06 05:44:38,024 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30932MB +2024-08-06 05:44:38,413 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.307e+02 1.417e+02 1.528e+02 2.981e+02, threshold=2.833e+02, percent-clipped=0.1 +2024-08-06 05:45:20,185 INFO [trainer.py:765] (1/8) Epoch 15, batch 200, train_loss[loss=2.832, ArTop10Accuracy=0.7372, over 13619.00 frames. ], tot_loss[loss=2.841, ArTop10Accuracy=0.7438, over 7792.95 frames. ], batch size: 34, lr: 8.11e-03 +2024-08-06 05:46:04,647 INFO [trainer.py:765] (1/8) Epoch 15, batch 300, train_loss[loss=2.883, ArTop10Accuracy=0.7322, over 14337.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.7434, over 9432.06 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 05:46:51,902 INFO [trainer.py:765] (1/8) Epoch 15, batch 400, train_loss[loss=2.75, ArTop10Accuracy=0.7746, over 10929.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.743, over 10339.39 frames. ], batch size: 15, lr: 8.07e-03 +2024-08-06 05:47:36,911 INFO [trainer.py:765] (1/8) Epoch 15, batch 500, train_loss[loss=2.897, ArTop10Accuracy=0.7295, over 12244.00 frames. ], tot_loss[loss=2.842, ArTop10Accuracy=0.7439, over 10898.15 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 05:48:24,723 INFO [trainer.py:765] (1/8) Epoch 15, batch 600, train_loss[loss=2.708, ArTop10Accuracy=0.7705, over 11604.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7425, over 11441.84 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 05:49:11,855 INFO [trainer.py:765] (1/8) Epoch 15, batch 700, train_loss[loss=2.931, ArTop10Accuracy=0.7207, over 9923.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.7409, over 11584.21 frames. ], batch size: 12, lr: 8.01e-03 +2024-08-06 05:49:45,778 INFO [trainer.py:765] (1/8) Epoch 15, batch 800, train_loss[loss=2.799, ArTop10Accuracy=0.751, over 9435.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.7392, over 11680.50 frames. ], batch size: 11, lr: 7.99e-03 +2024-08-06 05:50:17,210 INFO [trainer.py:765] (1/8) Epoch 15, batch 900, train_loss[loss=2.885, ArTop10Accuracy=0.7425, over 13101.00 frames. ], tot_loss[loss=2.855, ArTop10Accuracy=0.7409, over 11741.27 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 05:50:48,829 INFO [trainer.py:765] (1/8) Epoch 15, batch 1000, train_loss[loss=2.879, ArTop10Accuracy=0.7285, over 12940.00 frames. ], tot_loss[loss=2.859, ArTop10Accuracy=0.7401, over 11939.29 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 05:51:20,069 INFO [trainer.py:765] (1/8) Epoch 15, batch 1100, train_loss[loss=2.92, ArTop10Accuracy=0.7312, over 13428.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7378, over 12005.19 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 05:51:23,515 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.337e+02 1.431e+02 1.541e+02 2.784e+02, threshold=2.862e+02, percent-clipped=0.0 +2024-08-06 05:51:53,082 INFO [trainer.py:765] (1/8) Epoch 15, batch 1200, train_loss[loss=2.979, ArTop10Accuracy=0.7201, over 12276.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7373, over 11940.75 frames. ], batch size: 97, lr: 7.91e-03 +2024-08-06 05:52:18,078 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 05:53:29,262 INFO [trainer.py:765] (1/8) Epoch 16, batch 100, train_loss[loss=2.95, ArTop10Accuracy=0.7261, over 14794.00 frames. ], tot_loss[loss=2.839, ArTop10Accuracy=0.7458, over 4788.63 frames. ], batch size: 61, lr: 7.63e-03 +2024-08-06 05:54:12,876 INFO [trainer.py:765] (1/8) Epoch 16, batch 200, train_loss[loss=2.867, ArTop10Accuracy=0.7372, over 13845.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.7468, over 7795.41 frames. ], batch size: 34, lr: 7.61e-03 +2024-08-06 05:54:59,736 INFO [trainer.py:765] (1/8) Epoch 16, batch 300, train_loss[loss=2.835, ArTop10Accuracy=0.7487, over 14196.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7466, over 9437.07 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 05:55:41,930 INFO [trainer.py:765] (1/8) Epoch 16, batch 400, train_loss[loss=2.825, ArTop10Accuracy=0.7417, over 10353.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7463, over 10343.85 frames. ], batch size: 14, lr: 7.58e-03 +2024-08-06 05:56:27,679 INFO [trainer.py:765] (1/8) Epoch 16, batch 500, train_loss[loss=2.891, ArTop10Accuracy=0.7377, over 12075.00 frames. ], tot_loss[loss=2.835, ArTop10Accuracy=0.7454, over 10902.38 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 05:57:12,439 INFO [trainer.py:765] (1/8) Epoch 16, batch 600, train_loss[loss=2.745, ArTop10Accuracy=0.7592, over 11620.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.7439, over 11438.59 frames. ], batch size: 18, lr: 7.54e-03 +2024-08-06 05:58:00,039 INFO [trainer.py:765] (1/8) Epoch 16, batch 700, train_loss[loss=2.872, ArTop10Accuracy=0.7347, over 10090.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7427, over 11567.21 frames. ], batch size: 12, lr: 7.52e-03 +2024-08-06 05:58:34,023 INFO [trainer.py:765] (1/8) Epoch 16, batch 800, train_loss[loss=2.702, ArTop10Accuracy=0.7621, over 9941.00 frames. ], tot_loss[loss=2.852, ArTop10Accuracy=0.7416, over 11677.75 frames. ], batch size: 12, lr: 7.50e-03 +2024-08-06 05:58:41,568 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 05:58:50,426 INFO [trainer.py:811] (1/8) Epoch 16, validation: loss=2.915, ArTop10Accuracy=0.7338, over 1829298.00 frames. +2024-08-06 05:58:50,427 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30932MB +2024-08-06 05:58:50,730 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.335e+02 1.445e+02 1.570e+02 3.252e+02, threshold=2.890e+02, percent-clipped=0.1 +2024-08-06 05:59:14,320 INFO [trainer.py:765] (1/8) Epoch 16, batch 900, train_loss[loss=2.841, ArTop10Accuracy=0.7391, over 12885.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7426, over 11730.07 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 05:59:45,915 INFO [trainer.py:765] (1/8) Epoch 16, batch 1000, train_loss[loss=2.772, ArTop10Accuracy=0.7538, over 12877.00 frames. ], tot_loss[loss=2.85, ArTop10Accuracy=0.7417, over 11925.18 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 06:00:17,091 INFO [trainer.py:765] (1/8) Epoch 16, batch 1100, train_loss[loss=2.963, ArTop10Accuracy=0.7273, over 13734.00 frames. ], tot_loss[loss=2.862, ArTop10Accuracy=0.7394, over 11985.18 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 06:00:47,464 INFO [trainer.py:765] (1/8) Epoch 16, batch 1200, train_loss[loss=2.979, ArTop10Accuracy=0.7205, over 11777.00 frames. ], tot_loss[loss=2.861, ArTop10Accuracy=0.7397, over 11930.28 frames. ], batch size: 97, lr: 7.43e-03 +2024-08-06 06:01:12,268 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 06:02:27,261 INFO [trainer.py:765] (1/8) Epoch 17, batch 100, train_loss[loss=2.891, ArTop10Accuracy=0.7388, over 14342.00 frames. ], tot_loss[loss=2.826, ArTop10Accuracy=0.7468, over 4773.07 frames. ], batch size: 61, lr: 7.18e-03 +2024-08-06 06:03:11,850 INFO [trainer.py:765] (1/8) Epoch 17, batch 200, train_loss[loss=2.898, ArTop10Accuracy=0.7334, over 13627.00 frames. ], tot_loss[loss=2.823, ArTop10Accuracy=0.7479, over 7781.00 frames. ], batch size: 34, lr: 7.17e-03 +2024-08-06 06:03:57,502 INFO [trainer.py:765] (1/8) Epoch 17, batch 300, train_loss[loss=2.856, ArTop10Accuracy=0.7417, over 14336.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7477, over 9420.34 frames. ], batch size: 44, lr: 7.15e-03 +2024-08-06 06:04:42,838 INFO [trainer.py:765] (1/8) Epoch 17, batch 400, train_loss[loss=2.741, ArTop10Accuracy=0.7622, over 10507.00 frames. ], tot_loss[loss=2.823, ArTop10Accuracy=0.7477, over 10322.09 frames. ], batch size: 14, lr: 7.13e-03 +2024-08-06 06:05:29,004 INFO [trainer.py:765] (1/8) Epoch 17, batch 500, train_loss[loss=2.815, ArTop10Accuracy=0.7536, over 12257.00 frames. ], tot_loss[loss=2.818, ArTop10Accuracy=0.7488, over 10888.13 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 06:05:49,551 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.359e+02 1.445e+02 1.551e+02 2.741e+02, threshold=2.891e+02, percent-clipped=0.0 +2024-08-06 06:06:20,723 INFO [trainer.py:765] (1/8) Epoch 17, batch 600, train_loss[loss=2.835, ArTop10Accuracy=0.7372, over 11792.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.747, over 11409.04 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 06:07:04,695 INFO [trainer.py:765] (1/8) Epoch 17, batch 700, train_loss[loss=2.82, ArTop10Accuracy=0.7605, over 10070.00 frames. ], tot_loss[loss=2.832, ArTop10Accuracy=0.7457, over 11578.57 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 06:07:44,896 INFO [trainer.py:765] (1/8) Epoch 17, batch 800, train_loss[loss=2.747, ArTop10Accuracy=0.761, over 9348.00 frames. ], tot_loss[loss=2.841, ArTop10Accuracy=0.7441, over 11680.49 frames. ], batch size: 11, lr: 7.07e-03 +2024-08-06 06:08:16,384 INFO [trainer.py:765] (1/8) Epoch 17, batch 900, train_loss[loss=2.798, ArTop10Accuracy=0.7556, over 12914.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.7459, over 11743.50 frames. ], batch size: 27, lr: 7.05e-03 +2024-08-06 06:08:47,995 INFO [trainer.py:765] (1/8) Epoch 17, batch 1000, train_loss[loss=2.76, ArTop10Accuracy=0.76, over 13013.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7452, over 11941.09 frames. ], batch size: 27, lr: 7.04e-03 +2024-08-06 06:09:19,134 INFO [trainer.py:765] (1/8) Epoch 17, batch 1100, train_loss[loss=2.882, ArTop10Accuracy=0.7352, over 13801.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7428, over 12001.59 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 06:09:49,445 INFO [trainer.py:765] (1/8) Epoch 17, batch 1200, train_loss[loss=2.963, ArTop10Accuracy=0.7218, over 12260.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7427, over 11943.38 frames. ], batch size: 97, lr: 7.01e-03 +2024-08-06 06:10:15,027 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 06:11:23,102 INFO [trainer.py:765] (1/8) Epoch 18, batch 100, train_loss[loss=2.965, ArTop10Accuracy=0.7235, over 14660.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.7508, over 4790.75 frames. ], batch size: 61, lr: 6.78e-03 +2024-08-06 06:12:16,260 INFO [trainer.py:765] (1/8) Epoch 18, batch 200, train_loss[loss=2.711, ArTop10Accuracy=0.7669, over 13790.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7517, over 7796.93 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 06:12:40,317 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 06:12:48,991 INFO [trainer.py:811] (1/8) Epoch 18, validation: loss=2.916, ArTop10Accuracy=0.7343, over 1829298.00 frames. +2024-08-06 06:12:48,992 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30932MB +2024-08-06 06:12:49,335 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.377e+02 1.476e+02 1.588e+02 2.450e+02, threshold=2.952e+02, percent-clipped=0.0 +2024-08-06 06:13:07,115 INFO [trainer.py:765] (1/8) Epoch 18, batch 300, train_loss[loss=2.858, ArTop10Accuracy=0.7411, over 14602.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7514, over 9444.02 frames. ], batch size: 44, lr: 6.75e-03 +2024-08-06 06:13:54,097 INFO [trainer.py:765] (1/8) Epoch 18, batch 400, train_loss[loss=2.723, ArTop10Accuracy=0.7649, over 10493.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7511, over 10353.84 frames. ], batch size: 14, lr: 6.74e-03 +2024-08-06 06:14:38,487 INFO [trainer.py:765] (1/8) Epoch 18, batch 500, train_loss[loss=2.816, ArTop10Accuracy=0.7537, over 12205.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.7506, over 10904.51 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 06:15:23,627 INFO [trainer.py:765] (1/8) Epoch 18, batch 600, train_loss[loss=2.738, ArTop10Accuracy=0.7657, over 11528.00 frames. ], tot_loss[loss=2.813, ArTop10Accuracy=0.7495, over 11416.40 frames. ], batch size: 18, lr: 6.71e-03 +2024-08-06 06:16:17,342 INFO [trainer.py:765] (1/8) Epoch 18, batch 700, train_loss[loss=2.747, ArTop10Accuracy=0.7664, over 10033.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.747, over 11580.43 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 06:16:51,427 INFO [trainer.py:765] (1/8) Epoch 18, batch 800, train_loss[loss=2.85, ArTop10Accuracy=0.7552, over 9297.00 frames. ], tot_loss[loss=2.832, ArTop10Accuracy=0.7458, over 11673.28 frames. ], batch size: 11, lr: 6.68e-03 +2024-08-06 06:17:22,912 INFO [trainer.py:765] (1/8) Epoch 18, batch 900, train_loss[loss=2.729, ArTop10Accuracy=0.7666, over 12886.00 frames. ], tot_loss[loss=2.819, ArTop10Accuracy=0.7482, over 11720.71 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 06:17:54,528 INFO [trainer.py:765] (1/8) Epoch 18, batch 1000, train_loss[loss=2.824, ArTop10Accuracy=0.7476, over 12904.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7477, over 11932.81 frames. ], batch size: 27, lr: 6.65e-03 +2024-08-06 06:18:25,662 INFO [trainer.py:765] (1/8) Epoch 18, batch 1100, train_loss[loss=2.767, ArTop10Accuracy=0.7569, over 13569.00 frames. ], tot_loss[loss=2.829, ArTop10Accuracy=0.746, over 11989.03 frames. ], batch size: 34, lr: 6.64e-03 +2024-08-06 06:18:55,971 INFO [trainer.py:765] (1/8) Epoch 18, batch 1200, train_loss[loss=2.946, ArTop10Accuracy=0.7211, over 11982.00 frames. ], tot_loss[loss=2.833, ArTop10Accuracy=0.7452, over 11972.69 frames. ], batch size: 97, lr: 6.63e-03 +2024-08-06 06:19:19,163 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.387e+02 1.492e+02 1.607e+02 2.982e+02, threshold=2.983e+02, percent-clipped=0.1 +2024-08-06 06:19:23,696 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 06:20:29,728 INFO [trainer.py:765] (1/8) Epoch 19, batch 100, train_loss[loss=2.858, ArTop10Accuracy=0.7397, over 14846.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.7512, over 4786.28 frames. ], batch size: 61, lr: 6.43e-03 +2024-08-06 06:21:11,274 INFO [trainer.py:765] (1/8) Epoch 19, batch 200, train_loss[loss=2.735, ArTop10Accuracy=0.7636, over 13973.00 frames. ], tot_loss[loss=2.796, ArTop10Accuracy=0.7534, over 7791.32 frames. ], batch size: 35, lr: 6.41e-03 +2024-08-06 06:21:56,078 INFO [trainer.py:765] (1/8) Epoch 19, batch 300, train_loss[loss=2.783, ArTop10Accuracy=0.7603, over 14131.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.752, over 9428.20 frames. ], batch size: 44, lr: 6.40e-03 +2024-08-06 06:22:36,013 INFO [trainer.py:765] (1/8) Epoch 19, batch 400, train_loss[loss=2.814, ArTop10Accuracy=0.7476, over 10290.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7524, over 10336.07 frames. ], batch size: 14, lr: 6.39e-03 +2024-08-06 06:23:18,997 INFO [trainer.py:765] (1/8) Epoch 19, batch 500, train_loss[loss=2.787, ArTop10Accuracy=0.7602, over 12204.00 frames. ], tot_loss[loss=2.796, ArTop10Accuracy=0.7527, over 10904.92 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 06:24:03,685 INFO [trainer.py:765] (1/8) Epoch 19, batch 600, train_loss[loss=2.633, ArTop10Accuracy=0.7812, over 11618.00 frames. ], tot_loss[loss=2.803, ArTop10Accuracy=0.7514, over 11425.50 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 06:24:46,185 INFO [trainer.py:765] (1/8) Epoch 19, batch 700, train_loss[loss=2.762, ArTop10Accuracy=0.756, over 9400.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.7503, over 11575.14 frames. ], batch size: 11, lr: 6.35e-03 +2024-08-06 06:25:22,355 INFO [trainer.py:765] (1/8) Epoch 19, batch 800, train_loss[loss=2.787, ArTop10Accuracy=0.7534, over 10182.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.7486, over 11704.71 frames. ], batch size: 12, lr: 6.33e-03 +2024-08-06 06:25:53,624 INFO [trainer.py:765] (1/8) Epoch 19, batch 900, train_loss[loss=2.798, ArTop10Accuracy=0.7465, over 12857.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.749, over 11749.94 frames. ], batch size: 27, lr: 6.32e-03 +2024-08-06 06:26:21,772 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 06:26:30,765 INFO [trainer.py:811] (1/8) Epoch 19, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 06:26:30,766 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30932MB +2024-08-06 06:26:31,053 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.416e+02 1.525e+02 1.662e+02 2.849e+02, threshold=3.050e+02, percent-clipped=0.0 +2024-08-06 06:26:34,030 INFO [trainer.py:765] (1/8) Epoch 19, batch 1000, train_loss[loss=2.831, ArTop10Accuracy=0.7502, over 12917.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.7485, over 11946.58 frames. ], batch size: 27, lr: 6.31e-03 +2024-08-06 06:27:05,190 INFO [trainer.py:765] (1/8) Epoch 19, batch 1100, train_loss[loss=2.865, ArTop10Accuracy=0.7391, over 13739.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7469, over 12006.14 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 06:27:35,454 INFO [trainer.py:765] (1/8) Epoch 19, batch 1200, train_loss[loss=2.875, ArTop10Accuracy=0.7384, over 12216.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7467, over 11964.15 frames. ], batch size: 98, lr: 6.28e-03 +2024-08-06 06:28:00,649 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 06:29:08,985 INFO [trainer.py:765] (1/8) Epoch 20, batch 100, train_loss[loss=2.812, ArTop10Accuracy=0.7492, over 14594.00 frames. ], tot_loss[loss=2.794, ArTop10Accuracy=0.7537, over 4778.56 frames. ], batch size: 61, lr: 6.10e-03 +2024-08-06 06:29:50,318 INFO [trainer.py:765] (1/8) Epoch 20, batch 200, train_loss[loss=2.711, ArTop10Accuracy=0.761, over 13932.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7537, over 7787.63 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 06:30:37,106 INFO [trainer.py:765] (1/8) Epoch 20, batch 300, train_loss[loss=2.765, ArTop10Accuracy=0.7561, over 14305.00 frames. ], tot_loss[loss=2.792, ArTop10Accuracy=0.7542, over 9419.91 frames. ], batch size: 44, lr: 6.08e-03 +2024-08-06 06:31:16,354 INFO [trainer.py:765] (1/8) Epoch 20, batch 400, train_loss[loss=2.805, ArTop10Accuracy=0.7542, over 10359.00 frames. ], tot_loss[loss=2.789, ArTop10Accuracy=0.7546, over 10335.06 frames. ], batch size: 14, lr: 6.07e-03 +2024-08-06 06:32:03,759 INFO [trainer.py:765] (1/8) Epoch 20, batch 500, train_loss[loss=2.784, ArTop10Accuracy=0.7469, over 12345.00 frames. ], tot_loss[loss=2.786, ArTop10Accuracy=0.7548, over 10906.17 frames. ], batch size: 22, lr: 6.05e-03 +2024-08-06 06:32:43,357 INFO [trainer.py:765] (1/8) Epoch 20, batch 600, train_loss[loss=2.663, ArTop10Accuracy=0.7778, over 11678.00 frames. ], tot_loss[loss=2.791, ArTop10Accuracy=0.7537, over 11425.12 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 06:33:36,752 INFO [trainer.py:765] (1/8) Epoch 20, batch 700, train_loss[loss=2.749, ArTop10Accuracy=0.7658, over 10069.00 frames. ], tot_loss[loss=2.8, ArTop10Accuracy=0.752, over 11566.64 frames. ], batch size: 12, lr: 6.03e-03 +2024-08-06 06:33:43,829 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.417e+02 1.526e+02 1.639e+02 3.791e+02, threshold=3.052e+02, percent-clipped=0.1 +2024-08-06 06:34:13,304 INFO [trainer.py:765] (1/8) Epoch 20, batch 800, train_loss[loss=2.735, ArTop10Accuracy=0.765, over 9984.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7511, over 11687.19 frames. ], batch size: 12, lr: 6.02e-03 +2024-08-06 06:34:44,580 INFO [trainer.py:765] (1/8) Epoch 20, batch 900, train_loss[loss=2.91, ArTop10Accuracy=0.7331, over 12961.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7511, over 11731.76 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 06:35:16,139 INFO [trainer.py:765] (1/8) Epoch 20, batch 1000, train_loss[loss=2.738, ArTop10Accuracy=0.7649, over 13143.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.7502, over 11926.52 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 06:35:47,214 INFO [trainer.py:765] (1/8) Epoch 20, batch 1100, train_loss[loss=2.702, ArTop10Accuracy=0.7708, over 13670.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.7488, over 11972.22 frames. ], batch size: 34, lr: 5.99e-03 +2024-08-06 06:36:17,439 INFO [trainer.py:765] (1/8) Epoch 20, batch 1200, train_loss[loss=2.978, ArTop10Accuracy=0.7192, over 12645.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7482, over 11919.35 frames. ], batch size: 98, lr: 5.97e-03 +2024-08-06 06:36:42,651 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 06:36:42,654 INFO [trainer.py:1069] (1/8) Done! diff --git a/libritts/log/log-train-2024-08-06-03-39-40-2 b/libritts/log/log-train-2024-08-06-03-39-40-2 new file mode 100644 index 0000000000000000000000000000000000000000..7ad635733478efbdef2c90ef00d389099aebd065 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-39-40-2 @@ -0,0 +1,336 @@ +2024-08-06 03:39:40,318 INFO [trainer.py:870] (2/8) Training started +2024-08-06 03:39:40,319 INFO [trainer.py:889] (2/8) Device: cuda:2 +2024-08-06 03:39:40,319 INFO [trainer.py:890] (2/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:39:40,319 INFO [trainer.py:892] (2/8) About to create model +2024-08-06 03:39:41,079 INFO [trainer.py:899] (2/8) Number of model parameters: 367386628 +2024-08-06 03:39:41,905 INFO [trainer.py:914] (2/8) Using DDP +2024-08-06 03:39:43,993 INFO [datamodule.py:427] (2/8) About to get train cuts +2024-08-06 03:39:43,995 INFO [datamodule.py:434] (2/8) About to get dev cuts +2024-08-06 03:39:43,997 INFO [datamodule.py:292] (2/8) Disable SpecAugment +2024-08-06 03:39:43,997 INFO [datamodule.py:294] (2/8) About to create train dataset +2024-08-06 03:39:43,998 INFO [datamodule.py:323] (2/8) Using DynamicBucketingSampler +2024-08-06 03:39:44,608 INFO [datamodule.py:344] (2/8) About to create train dataloader +2024-08-06 03:39:44,608 INFO [datamodule.py:367] (2/8) About to create dev dataset +2024-08-06 03:39:44,934 INFO [datamodule.py:388] (2/8) About to create dev dataloader +2024-08-06 03:40:39,570 INFO [trainer.py:765] (2/8) Epoch 1, batch 100, train_loss[loss=4.238, ArTop10Accuracy=0.4902, over 14599.00 frames. ], tot_loss[loss=4.784, ArTop10Accuracy=0.3953, over 4788.28 frames. ], batch size: 61, lr: 2.25e-02 +2024-08-06 03:41:16,922 INFO [trainer.py:765] (2/8) Epoch 1, batch 200, train_loss[loss=3.97, ArTop10Accuracy=0.5311, over 13617.00 frames. ], tot_loss[loss=4.305, ArTop10Accuracy=0.4754, over 7786.16 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 03:41:57,950 INFO [trainer.py:765] (2/8) Epoch 1, batch 300, train_loss[loss=3.828, ArTop10Accuracy=0.5518, over 14061.00 frames. ], tot_loss[loss=4.093, ArTop10Accuracy=0.5099, over 9425.13 frames. ], batch size: 44, lr: 3.00e-02 +2024-08-06 03:42:33,080 INFO [trainer.py:765] (2/8) Epoch 1, batch 400, train_loss[loss=3.717, ArTop10Accuracy=0.5717, over 11056.00 frames. ], tot_loss[loss=3.942, ArTop10Accuracy=0.5348, over 10340.47 frames. ], batch size: 15, lr: 3.00e-02 +2024-08-06 03:43:11,271 INFO [trainer.py:765] (2/8) Epoch 1, batch 500, train_loss[loss=3.542, ArTop10Accuracy=0.6033, over 12246.00 frames. ], tot_loss[loss=3.831, ArTop10Accuracy=0.5535, over 10902.49 frames. ], batch size: 22, lr: 2.99e-02 +2024-08-06 03:43:46,593 INFO [trainer.py:765] (2/8) Epoch 1, batch 600, train_loss[loss=3.484, ArTop10Accuracy=0.6182, over 11327.00 frames. ], tot_loss[loss=3.746, ArTop10Accuracy=0.568, over 11421.62 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 03:44:27,899 INFO [trainer.py:765] (2/8) Epoch 1, batch 700, train_loss[loss=3.646, ArTop10Accuracy=0.5823, over 10104.00 frames. ], tot_loss[loss=3.685, ArTop10Accuracy=0.579, over 11567.62 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 03:45:01,514 INFO [trainer.py:765] (2/8) Epoch 1, batch 800, train_loss[loss=3.438, ArTop10Accuracy=0.6215, over 10209.00 frames. ], tot_loss[loss=3.637, ArTop10Accuracy=0.5874, over 11679.20 frames. ], batch size: 12, lr: 2.98e-02 +2024-08-06 03:45:32,558 INFO [trainer.py:765] (2/8) Epoch 1, batch 900, train_loss[loss=3.594, ArTop10Accuracy=0.5968, over 12892.00 frames. ], tot_loss[loss=3.583, ArTop10Accuracy=0.5973, over 11727.14 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 03:46:03,649 INFO [trainer.py:765] (2/8) Epoch 1, batch 1000, train_loss[loss=3.459, ArTop10Accuracy=0.62, over 12809.00 frames. ], tot_loss[loss=3.55, ArTop10Accuracy=0.6035, over 11929.18 frames. ], batch size: 27, lr: 2.97e-02 +2024-08-06 03:46:07,988 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 8.169e+01 1.565e+02 2.239e+02 3.485e+02 9.105e+03, threshold=4.478e+02, percent-clipped=0.0 +2024-08-06 03:46:38,612 INFO [trainer.py:765] (2/8) Epoch 1, batch 1100, train_loss[loss=3.54, ArTop10Accuracy=0.6066, over 13569.00 frames. ], tot_loss[loss=3.526, ArTop10Accuracy=0.6082, over 11994.07 frames. ], batch size: 34, lr: 2.96e-02 +2024-08-06 03:47:08,745 INFO [trainer.py:765] (2/8) Epoch 1, batch 1200, train_loss[loss=3.542, ArTop10Accuracy=0.606, over 11052.00 frames. ], tot_loss[loss=3.496, ArTop10Accuracy=0.6136, over 11929.77 frames. ], batch size: 99, lr: 2.96e-02 +2024-08-06 03:47:33,759 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 03:48:38,677 INFO [trainer.py:765] (2/8) Epoch 2, batch 100, train_loss[loss=3.423, ArTop10Accuracy=0.6282, over 14455.00 frames. ], tot_loss[loss=3.441, ArTop10Accuracy=0.6243, over 4794.60 frames. ], batch size: 61, lr: 2.90e-02 +2024-08-06 03:49:14,597 INFO [trainer.py:765] (2/8) Epoch 2, batch 200, train_loss[loss=3.527, ArTop10Accuracy=0.6072, over 13942.00 frames. ], tot_loss[loss=3.438, ArTop10Accuracy=0.6251, over 7807.13 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 03:49:56,520 INFO [trainer.py:765] (2/8) Epoch 2, batch 300, train_loss[loss=3.462, ArTop10Accuracy=0.6197, over 14367.00 frames. ], tot_loss[loss=3.426, ArTop10Accuracy=0.6271, over 9433.56 frames. ], batch size: 44, lr: 2.89e-02 +2024-08-06 03:50:32,000 INFO [trainer.py:765] (2/8) Epoch 2, batch 400, train_loss[loss=3.259, ArTop10Accuracy=0.6613, over 10127.00 frames. ], tot_loss[loss=3.414, ArTop10Accuracy=0.6297, over 10340.39 frames. ], batch size: 14, lr: 2.88e-02 +2024-08-06 03:51:17,110 INFO [trainer.py:765] (2/8) Epoch 2, batch 500, train_loss[loss=3.289, ArTop10Accuracy=0.6452, over 12078.00 frames. ], tot_loss[loss=3.407, ArTop10Accuracy=0.631, over 10899.71 frames. ], batch size: 22, lr: 2.87e-02 +2024-08-06 03:51:53,206 INFO [trainer.py:765] (2/8) Epoch 2, batch 600, train_loss[loss=3.37, ArTop10Accuracy=0.6379, over 11569.00 frames. ], tot_loss[loss=3.397, ArTop10Accuracy=0.633, over 11439.27 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 03:52:38,994 INFO [trainer.py:765] (2/8) Epoch 2, batch 700, train_loss[loss=3.362, ArTop10Accuracy=0.6345, over 10167.00 frames. ], tot_loss[loss=3.396, ArTop10Accuracy=0.6331, over 11591.15 frames. ], batch size: 12, lr: 2.85e-02 +2024-08-06 03:52:47,092 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 03:52:56,023 INFO [trainer.py:811] (2/8) Epoch 2, validation: loss=3.327, ArTop10Accuracy=0.6492, over 1829298.00 frames. +2024-08-06 03:52:56,024 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 28804MB +2024-08-06 03:52:56,541 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 8.181e+01 1.431e+02 1.849e+02 2.730e+02 2.344e+03, threshold=3.697e+02, percent-clipped=7.2 +2024-08-06 03:53:21,881 INFO [trainer.py:765] (2/8) Epoch 2, batch 800, train_loss[loss=3.249, ArTop10Accuracy=0.6537, over 10208.00 frames. ], tot_loss[loss=3.386, ArTop10Accuracy=0.6347, over 11706.94 frames. ], batch size: 12, lr: 2.84e-02 +2024-08-06 03:53:53,299 INFO [trainer.py:765] (2/8) Epoch 2, batch 900, train_loss[loss=3.354, ArTop10Accuracy=0.6334, over 12977.00 frames. ], tot_loss[loss=3.37, ArTop10Accuracy=0.638, over 11741.79 frames. ], batch size: 27, lr: 2.83e-02 +2024-08-06 03:54:24,809 INFO [trainer.py:765] (2/8) Epoch 2, batch 1000, train_loss[loss=3.322, ArTop10Accuracy=0.6474, over 13032.00 frames. ], tot_loss[loss=3.363, ArTop10Accuracy=0.6394, over 11951.43 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 03:54:56,006 INFO [trainer.py:765] (2/8) Epoch 2, batch 1100, train_loss[loss=3.301, ArTop10Accuracy=0.6477, over 13736.00 frames. ], tot_loss[loss=3.362, ArTop10Accuracy=0.6397, over 11995.67 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 03:55:26,228 INFO [trainer.py:765] (2/8) Epoch 2, batch 1200, train_loss[loss=3.427, ArTop10Accuracy=0.632, over 12590.00 frames. ], tot_loss[loss=3.354, ArTop10Accuracy=0.6413, over 11940.59 frames. ], batch size: 99, lr: 2.80e-02 +2024-08-06 03:55:51,132 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 03:57:04,102 INFO [trainer.py:765] (2/8) Epoch 3, batch 100, train_loss[loss=3.315, ArTop10Accuracy=0.6471, over 14358.00 frames. ], tot_loss[loss=3.315, ArTop10Accuracy=0.6487, over 4780.00 frames. ], batch size: 61, lr: 2.67e-02 +2024-08-06 03:57:50,979 INFO [trainer.py:765] (2/8) Epoch 3, batch 200, train_loss[loss=3.234, ArTop10Accuracy=0.666, over 13654.00 frames. ], tot_loss[loss=3.293, ArTop10Accuracy=0.6534, over 7796.59 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 03:58:26,074 INFO [trainer.py:765] (2/8) Epoch 3, batch 300, train_loss[loss=3.247, ArTop10Accuracy=0.6639, over 14242.00 frames. ], tot_loss[loss=3.279, ArTop10Accuracy=0.6563, over 9417.80 frames. ], batch size: 44, lr: 2.64e-02 +2024-08-06 03:59:11,253 INFO [trainer.py:765] (2/8) Epoch 3, batch 400, train_loss[loss=3.091, ArTop10Accuracy=0.6961, over 10417.00 frames. ], tot_loss[loss=3.26, ArTop10Accuracy=0.6596, over 10337.38 frames. ], batch size: 14, lr: 2.63e-02 +2024-08-06 03:59:29,674 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 8.720e+01 1.461e+02 1.775e+02 2.344e+02 9.150e+02, threshold=3.550e+02, percent-clipped=5.2 +2024-08-06 03:59:49,303 INFO [trainer.py:765] (2/8) Epoch 3, batch 500, train_loss[loss=3.186, ArTop10Accuracy=0.6673, over 12323.00 frames. ], tot_loss[loss=3.249, ArTop10Accuracy=0.6617, over 10910.34 frames. ], batch size: 22, lr: 2.62e-02 +2024-08-06 04:00:35,095 INFO [trainer.py:765] (2/8) Epoch 3, batch 600, train_loss[loss=3.144, ArTop10Accuracy=0.6824, over 11789.00 frames. ], tot_loss[loss=3.23, ArTop10Accuracy=0.6655, over 11415.53 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 04:01:22,060 INFO [trainer.py:765] (2/8) Epoch 3, batch 700, train_loss[loss=3.153, ArTop10Accuracy=0.6802, over 10131.00 frames. ], tot_loss[loss=3.226, ArTop10Accuracy=0.6665, over 11576.97 frames. ], batch size: 12, lr: 2.60e-02 +2024-08-06 04:01:56,270 INFO [trainer.py:765] (2/8) Epoch 3, batch 800, train_loss[loss=2.947, ArTop10Accuracy=0.7092, over 10105.00 frames. ], tot_loss[loss=3.216, ArTop10Accuracy=0.6688, over 11691.18 frames. ], batch size: 12, lr: 2.59e-02 +2024-08-06 04:02:27,741 INFO [trainer.py:765] (2/8) Epoch 3, batch 900, train_loss[loss=3.278, ArTop10Accuracy=0.6582, over 12969.00 frames. ], tot_loss[loss=3.199, ArTop10Accuracy=0.6721, over 11735.44 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 04:02:59,284 INFO [trainer.py:765] (2/8) Epoch 3, batch 1000, train_loss[loss=3.106, ArTop10Accuracy=0.6993, over 13160.00 frames. ], tot_loss[loss=3.195, ArTop10Accuracy=0.6727, over 11942.81 frames. ], batch size: 27, lr: 2.56e-02 +2024-08-06 04:03:30,942 INFO [trainer.py:765] (2/8) Epoch 3, batch 1100, train_loss[loss=3.136, ArTop10Accuracy=0.6876, over 13802.00 frames. ], tot_loss[loss=3.193, ArTop10Accuracy=0.6733, over 11997.13 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 04:04:01,313 INFO [trainer.py:765] (2/8) Epoch 3, batch 1200, train_loss[loss=3.282, ArTop10Accuracy=0.6577, over 12318.00 frames. ], tot_loss[loss=3.183, ArTop10Accuracy=0.6751, over 11945.69 frames. ], batch size: 99, lr: 2.54e-02 +2024-08-06 04:04:26,694 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 04:05:43,369 INFO [trainer.py:765] (2/8) Epoch 4, batch 100, train_loss[loss=3.177, ArTop10Accuracy=0.6706, over 14302.00 frames. ], tot_loss[loss=3.14, ArTop10Accuracy=0.6842, over 4789.47 frames. ], batch size: 61, lr: 2.38e-02 +2024-08-06 04:06:07,077 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 04:06:16,404 INFO [trainer.py:811] (2/8) Epoch 4, validation: loss=3.063, ArTop10Accuracy=0.7031, over 1829298.00 frames. +2024-08-06 04:06:16,404 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33194MB +2024-08-06 04:06:16,746 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.493e+02 1.709e+02 2.068e+02 7.969e+02, threshold=3.418e+02, percent-clipped=2.9 +2024-08-06 04:06:31,826 INFO [trainer.py:765] (2/8) Epoch 4, batch 200, train_loss[loss=3.072, ArTop10Accuracy=0.7041, over 13627.00 frames. ], tot_loss[loss=3.122, ArTop10Accuracy=0.6886, over 7782.21 frames. ], batch size: 34, lr: 2.37e-02 +2024-08-06 04:07:18,544 INFO [trainer.py:765] (2/8) Epoch 4, batch 300, train_loss[loss=3.159, ArTop10Accuracy=0.6799, over 14517.00 frames. ], tot_loss[loss=3.116, ArTop10Accuracy=0.6894, over 9421.67 frames. ], batch size: 44, lr: 2.36e-02 +2024-08-06 04:08:01,910 INFO [trainer.py:765] (2/8) Epoch 4, batch 400, train_loss[loss=3.091, ArTop10Accuracy=0.6954, over 10874.00 frames. ], tot_loss[loss=3.109, ArTop10Accuracy=0.69, over 10347.99 frames. ], batch size: 15, lr: 2.34e-02 +2024-08-06 04:08:45,344 INFO [trainer.py:765] (2/8) Epoch 4, batch 500, train_loss[loss=3.166, ArTop10Accuracy=0.6733, over 12378.00 frames. ], tot_loss[loss=3.105, ArTop10Accuracy=0.6905, over 10907.40 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 04:09:37,071 INFO [trainer.py:765] (2/8) Epoch 4, batch 600, train_loss[loss=3.147, ArTop10Accuracy=0.6757, over 11511.00 frames. ], tot_loss[loss=3.11, ArTop10Accuracy=0.6894, over 11418.96 frames. ], batch size: 18, lr: 2.32e-02 +2024-08-06 04:10:13,501 INFO [trainer.py:765] (2/8) Epoch 4, batch 700, train_loss[loss=3.185, ArTop10Accuracy=0.6861, over 10217.00 frames. ], tot_loss[loss=3.117, ArTop10Accuracy=0.6885, over 11566.09 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 04:10:51,959 INFO [trainer.py:765] (2/8) Epoch 4, batch 800, train_loss[loss=3.132, ArTop10Accuracy=0.689, over 10153.00 frames. ], tot_loss[loss=3.116, ArTop10Accuracy=0.6886, over 11678.64 frames. ], batch size: 12, lr: 2.30e-02 +2024-08-06 04:11:23,331 INFO [trainer.py:765] (2/8) Epoch 4, batch 900, train_loss[loss=3.056, ArTop10Accuracy=0.701, over 13147.00 frames. ], tot_loss[loss=3.106, ArTop10Accuracy=0.6906, over 11728.67 frames. ], batch size: 28, lr: 2.29e-02 +2024-08-06 04:11:54,826 INFO [trainer.py:765] (2/8) Epoch 4, batch 1000, train_loss[loss=2.987, ArTop10Accuracy=0.7195, over 12903.00 frames. ], tot_loss[loss=3.102, ArTop10Accuracy=0.691, over 11945.47 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 04:12:25,959 INFO [trainer.py:765] (2/8) Epoch 4, batch 1100, train_loss[loss=3.082, ArTop10Accuracy=0.693, over 13680.00 frames. ], tot_loss[loss=3.107, ArTop10Accuracy=0.6902, over 11976.90 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 04:12:48,545 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.106e+02 1.440e+02 1.608e+02 1.893e+02 7.925e+02, threshold=3.216e+02, percent-clipped=2.0 +2024-08-06 04:12:58,827 INFO [trainer.py:765] (2/8) Epoch 4, batch 1200, train_loss[loss=3.199, ArTop10Accuracy=0.675, over 11681.00 frames. ], tot_loss[loss=3.103, ArTop10Accuracy=0.6913, over 11926.91 frames. ], batch size: 98, lr: 2.25e-02 +2024-08-06 04:13:24,340 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 04:14:38,685 INFO [trainer.py:765] (2/8) Epoch 5, batch 100, train_loss[loss=3.067, ArTop10Accuracy=0.6987, over 14655.00 frames. ], tot_loss[loss=3.059, ArTop10Accuracy=0.7007, over 4800.60 frames. ], batch size: 61, lr: 2.10e-02 +2024-08-06 04:15:26,826 INFO [trainer.py:765] (2/8) Epoch 5, batch 200, train_loss[loss=3.074, ArTop10Accuracy=0.7022, over 13615.00 frames. ], tot_loss[loss=3.054, ArTop10Accuracy=0.7018, over 7801.70 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 04:16:08,011 INFO [trainer.py:765] (2/8) Epoch 5, batch 300, train_loss[loss=3.081, ArTop10Accuracy=0.6943, over 14161.00 frames. ], tot_loss[loss=3.047, ArTop10Accuracy=0.7033, over 9412.98 frames. ], batch size: 44, lr: 2.08e-02 +2024-08-06 04:16:53,134 INFO [trainer.py:765] (2/8) Epoch 5, batch 400, train_loss[loss=3.186, ArTop10Accuracy=0.6745, over 10281.00 frames. ], tot_loss[loss=3.051, ArTop10Accuracy=0.7019, over 10329.99 frames. ], batch size: 14, lr: 2.07e-02 +2024-08-06 04:17:36,638 INFO [trainer.py:765] (2/8) Epoch 5, batch 500, train_loss[loss=3.071, ArTop10Accuracy=0.6972, over 12362.00 frames. ], tot_loss[loss=3.048, ArTop10Accuracy=0.7025, over 10896.69 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 04:18:22,114 INFO [trainer.py:765] (2/8) Epoch 5, batch 600, train_loss[loss=3.095, ArTop10Accuracy=0.6862, over 11747.00 frames. ], tot_loss[loss=3.048, ArTop10Accuracy=0.7022, over 11433.37 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 04:19:17,033 INFO [trainer.py:765] (2/8) Epoch 5, batch 700, train_loss[loss=2.958, ArTop10Accuracy=0.7146, over 10286.00 frames. ], tot_loss[loss=3.058, ArTop10Accuracy=0.7003, over 11570.08 frames. ], batch size: 12, lr: 2.04e-02 +2024-08-06 04:19:51,066 INFO [trainer.py:765] (2/8) Epoch 5, batch 800, train_loss[loss=3.108, ArTop10Accuracy=0.6899, over 10094.00 frames. ], tot_loss[loss=3.062, ArTop10Accuracy=0.6995, over 11685.00 frames. ], batch size: 12, lr: 2.03e-02 +2024-08-06 04:20:18,214 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 04:20:27,476 INFO [trainer.py:811] (2/8) Epoch 5, validation: loss=2.998, ArTop10Accuracy=0.7157, over 1829298.00 frames. +2024-08-06 04:20:27,476 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33194MB +2024-08-06 04:20:27,781 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.057e+02 1.385e+02 1.542e+02 1.759e+02 7.741e+02, threshold=3.083e+02, percent-clipped=0.7 +2024-08-06 04:20:31,767 INFO [trainer.py:765] (2/8) Epoch 5, batch 900, train_loss[loss=3.109, ArTop10Accuracy=0.6952, over 12959.00 frames. ], tot_loss[loss=3.053, ArTop10Accuracy=0.7008, over 11728.45 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 04:21:03,306 INFO [trainer.py:765] (2/8) Epoch 5, batch 1000, train_loss[loss=3.045, ArTop10Accuracy=0.7074, over 12927.00 frames. ], tot_loss[loss=3.053, ArTop10Accuracy=0.7014, over 11930.08 frames. ], batch size: 27, lr: 2.01e-02 +2024-08-06 04:21:34,451 INFO [trainer.py:765] (2/8) Epoch 5, batch 1100, train_loss[loss=3.074, ArTop10Accuracy=0.6958, over 13779.00 frames. ], tot_loss[loss=3.058, ArTop10Accuracy=0.7003, over 11984.06 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 04:22:04,752 INFO [trainer.py:765] (2/8) Epoch 5, batch 1200, train_loss[loss=3.174, ArTop10Accuracy=0.6778, over 12273.00 frames. ], tot_loss[loss=3.057, ArTop10Accuracy=0.7005, over 11929.53 frames. ], batch size: 99, lr: 1.99e-02 +2024-08-06 04:22:30,397 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 04:23:46,282 INFO [trainer.py:765] (2/8) Epoch 6, batch 100, train_loss[loss=3.099, ArTop10Accuracy=0.6947, over 14271.00 frames. ], tot_loss[loss=3.027, ArTop10Accuracy=0.7069, over 4796.55 frames. ], batch size: 61, lr: 1.85e-02 +2024-08-06 04:24:35,256 INFO [trainer.py:765] (2/8) Epoch 6, batch 200, train_loss[loss=3.011, ArTop10Accuracy=0.7076, over 13806.00 frames. ], tot_loss[loss=3.02, ArTop10Accuracy=0.7086, over 7806.97 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 04:25:16,676 INFO [trainer.py:765] (2/8) Epoch 6, batch 300, train_loss[loss=3.069, ArTop10Accuracy=0.6979, over 14163.00 frames. ], tot_loss[loss=3.018, ArTop10Accuracy=0.7089, over 9428.27 frames. ], batch size: 44, lr: 1.83e-02 +2024-08-06 04:26:08,924 INFO [trainer.py:765] (2/8) Epoch 6, batch 400, train_loss[loss=2.913, ArTop10Accuracy=0.7321, over 10243.00 frames. ], tot_loss[loss=3.012, ArTop10Accuracy=0.7096, over 10322.01 frames. ], batch size: 14, lr: 1.83e-02 +2024-08-06 04:26:51,486 INFO [trainer.py:765] (2/8) Epoch 6, batch 500, train_loss[loss=3.093, ArTop10Accuracy=0.6874, over 12358.00 frames. ], tot_loss[loss=3.009, ArTop10Accuracy=0.71, over 10891.35 frames. ], batch size: 22, lr: 1.82e-02 +2024-08-06 04:27:39,298 INFO [trainer.py:765] (2/8) Epoch 6, batch 600, train_loss[loss=2.981, ArTop10Accuracy=0.7169, over 11535.00 frames. ], tot_loss[loss=3.014, ArTop10Accuracy=0.7088, over 11414.81 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 04:27:46,370 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.343e+02 1.474e+02 1.660e+02 8.574e+02, threshold=2.947e+02, percent-clipped=0.6 +2024-08-06 04:28:33,239 INFO [trainer.py:765] (2/8) Epoch 6, batch 700, train_loss[loss=2.864, ArTop10Accuracy=0.7439, over 10079.00 frames. ], tot_loss[loss=3.02, ArTop10Accuracy=0.7075, over 11550.48 frames. ], batch size: 12, lr: 1.80e-02 +2024-08-06 04:29:11,216 INFO [trainer.py:765] (2/8) Epoch 6, batch 800, train_loss[loss=3.082, ArTop10Accuracy=0.6921, over 10204.00 frames. ], tot_loss[loss=3.026, ArTop10Accuracy=0.7065, over 11675.56 frames. ], batch size: 12, lr: 1.79e-02 +2024-08-06 04:29:42,752 INFO [trainer.py:765] (2/8) Epoch 6, batch 900, train_loss[loss=3.021, ArTop10Accuracy=0.7036, over 12896.00 frames. ], tot_loss[loss=3.021, ArTop10Accuracy=0.7078, over 11744.03 frames. ], batch size: 27, lr: 1.78e-02 +2024-08-06 04:30:14,306 INFO [trainer.py:765] (2/8) Epoch 6, batch 1000, train_loss[loss=3.099, ArTop10Accuracy=0.6958, over 12892.00 frames. ], tot_loss[loss=3.025, ArTop10Accuracy=0.7068, over 11954.61 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 04:30:45,384 INFO [trainer.py:765] (2/8) Epoch 6, batch 1100, train_loss[loss=3.054, ArTop10Accuracy=0.7074, over 13657.00 frames. ], tot_loss[loss=3.031, ArTop10Accuracy=0.7058, over 11989.60 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 04:31:15,673 INFO [trainer.py:765] (2/8) Epoch 6, batch 1200, train_loss[loss=3.197, ArTop10Accuracy=0.6765, over 11611.00 frames. ], tot_loss[loss=3.026, ArTop10Accuracy=0.7065, over 11928.69 frames. ], batch size: 98, lr: 1.76e-02 +2024-08-06 04:31:40,624 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 04:32:52,405 INFO [trainer.py:765] (2/8) Epoch 7, batch 100, train_loss[loss=3.098, ArTop10Accuracy=0.6977, over 14447.00 frames. ], tot_loss[loss=2.988, ArTop10Accuracy=0.7144, over 4767.78 frames. ], batch size: 61, lr: 1.64e-02 +2024-08-06 04:33:38,224 INFO [trainer.py:765] (2/8) Epoch 7, batch 200, train_loss[loss=3.006, ArTop10Accuracy=0.7124, over 13782.00 frames. ], tot_loss[loss=2.984, ArTop10Accuracy=0.7154, over 7764.47 frames. ], batch size: 34, lr: 1.64e-02 +2024-08-06 04:34:22,609 INFO [trainer.py:765] (2/8) Epoch 7, batch 300, train_loss[loss=3.026, ArTop10Accuracy=0.7082, over 14388.00 frames. ], tot_loss[loss=2.987, ArTop10Accuracy=0.7148, over 9405.42 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 04:34:36,848 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 04:34:45,809 INFO [trainer.py:811] (2/8) Epoch 7, validation: loss=2.963, ArTop10Accuracy=0.7233, over 1829298.00 frames. +2024-08-06 04:34:45,810 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33194MB +2024-08-06 04:34:46,125 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.306e+02 1.435e+02 1.599e+02 8.689e+02, threshold=2.871e+02, percent-clipped=0.9 +2024-08-06 04:35:17,147 INFO [trainer.py:765] (2/8) Epoch 7, batch 400, train_loss[loss=3.029, ArTop10Accuracy=0.7126, over 10839.00 frames. ], tot_loss[loss=2.988, ArTop10Accuracy=0.7148, over 10324.61 frames. ], batch size: 15, lr: 1.62e-02 +2024-08-06 04:36:01,711 INFO [trainer.py:765] (2/8) Epoch 7, batch 500, train_loss[loss=2.873, ArTop10Accuracy=0.7396, over 12169.00 frames. ], tot_loss[loss=2.987, ArTop10Accuracy=0.7146, over 10888.61 frames. ], batch size: 22, lr: 1.61e-02 +2024-08-06 04:36:48,812 INFO [trainer.py:765] (2/8) Epoch 7, batch 600, train_loss[loss=2.969, ArTop10Accuracy=0.7169, over 11577.00 frames. ], tot_loss[loss=2.989, ArTop10Accuracy=0.7139, over 11406.76 frames. ], batch size: 18, lr: 1.61e-02 +2024-08-06 04:37:34,800 INFO [trainer.py:765] (2/8) Epoch 7, batch 700, train_loss[loss=2.885, ArTop10Accuracy=0.7319, over 10136.00 frames. ], tot_loss[loss=2.996, ArTop10Accuracy=0.7127, over 11568.04 frames. ], batch size: 12, lr: 1.60e-02 +2024-08-06 04:38:13,614 INFO [trainer.py:765] (2/8) Epoch 7, batch 800, train_loss[loss=2.811, ArTop10Accuracy=0.7477, over 10693.00 frames. ], tot_loss[loss=3, ArTop10Accuracy=0.712, over 11681.76 frames. ], batch size: 13, lr: 1.59e-02 +2024-08-06 04:38:45,110 INFO [trainer.py:765] (2/8) Epoch 7, batch 900, train_loss[loss=3.053, ArTop10Accuracy=0.703, over 13248.00 frames. ], tot_loss[loss=2.991, ArTop10Accuracy=0.7138, over 11741.89 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 04:39:16,575 INFO [trainer.py:765] (2/8) Epoch 7, batch 1000, train_loss[loss=2.957, ArTop10Accuracy=0.7173, over 12975.00 frames. ], tot_loss[loss=2.991, ArTop10Accuracy=0.7137, over 11959.74 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 04:39:47,571 INFO [trainer.py:765] (2/8) Epoch 7, batch 1100, train_loss[loss=3.027, ArTop10Accuracy=0.7077, over 13800.00 frames. ], tot_loss[loss=3, ArTop10Accuracy=0.712, over 12002.90 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 04:40:17,989 INFO [trainer.py:765] (2/8) Epoch 7, batch 1200, train_loss[loss=3.136, ArTop10Accuracy=0.6861, over 12887.00 frames. ], tot_loss[loss=2.996, ArTop10Accuracy=0.7127, over 11943.73 frames. ], batch size: 99, lr: 1.57e-02 +2024-08-06 04:40:43,363 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 04:41:37,492 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 9.816e+01 1.295e+02 1.411e+02 1.574e+02 4.953e+02, threshold=2.821e+02, percent-clipped=1.1 +2024-08-06 04:41:58,371 INFO [trainer.py:765] (2/8) Epoch 8, batch 100, train_loss[loss=3.046, ArTop10Accuracy=0.7047, over 14552.00 frames. ], tot_loss[loss=2.971, ArTop10Accuracy=0.7182, over 4792.34 frames. ], batch size: 61, lr: 1.47e-02 +2024-08-06 04:42:44,986 INFO [trainer.py:765] (2/8) Epoch 8, batch 200, train_loss[loss=2.974, ArTop10Accuracy=0.7225, over 13235.00 frames. ], tot_loss[loss=2.95, ArTop10Accuracy=0.7225, over 7789.63 frames. ], batch size: 33, lr: 1.46e-02 +2024-08-06 04:43:28,045 INFO [trainer.py:765] (2/8) Epoch 8, batch 300, train_loss[loss=3.041, ArTop10Accuracy=0.7051, over 14321.00 frames. ], tot_loss[loss=2.949, ArTop10Accuracy=0.7227, over 9418.52 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 04:44:14,461 INFO [trainer.py:765] (2/8) Epoch 8, batch 400, train_loss[loss=3.094, ArTop10Accuracy=0.7061, over 10312.00 frames. ], tot_loss[loss=2.954, ArTop10Accuracy=0.7218, over 10342.89 frames. ], batch size: 14, lr: 1.45e-02 +2024-08-06 04:45:00,692 INFO [trainer.py:765] (2/8) Epoch 8, batch 500, train_loss[loss=2.953, ArTop10Accuracy=0.7192, over 12418.00 frames. ], tot_loss[loss=2.951, ArTop10Accuracy=0.722, over 10914.11 frames. ], batch size: 22, lr: 1.45e-02 +2024-08-06 04:45:45,393 INFO [trainer.py:765] (2/8) Epoch 8, batch 600, train_loss[loss=2.951, ArTop10Accuracy=0.7179, over 11643.00 frames. ], tot_loss[loss=2.959, ArTop10Accuracy=0.7201, over 11430.71 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 04:46:34,037 INFO [trainer.py:765] (2/8) Epoch 8, batch 700, train_loss[loss=2.88, ArTop10Accuracy=0.7407, over 10089.00 frames. ], tot_loss[loss=2.97, ArTop10Accuracy=0.7179, over 11557.64 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 04:47:10,207 INFO [trainer.py:765] (2/8) Epoch 8, batch 800, train_loss[loss=2.942, ArTop10Accuracy=0.7197, over 9999.00 frames. ], tot_loss[loss=2.973, ArTop10Accuracy=0.7171, over 11684.33 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 04:47:41,605 INFO [trainer.py:765] (2/8) Epoch 8, batch 900, train_loss[loss=2.918, ArTop10Accuracy=0.7281, over 13035.00 frames. ], tot_loss[loss=2.969, ArTop10Accuracy=0.7179, over 11727.15 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:13,033 INFO [trainer.py:765] (2/8) Epoch 8, batch 1000, train_loss[loss=2.956, ArTop10Accuracy=0.7221, over 12840.00 frames. ], tot_loss[loss=2.97, ArTop10Accuracy=0.7178, over 11936.16 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:28,828 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 04:48:37,663 INFO [trainer.py:811] (2/8) Epoch 8, validation: loss=2.946, ArTop10Accuracy=0.7266, over 1829298.00 frames. +2024-08-06 04:48:37,664 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33194MB +2024-08-06 04:48:37,951 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.289e+02 1.393e+02 1.532e+02 3.557e+02, threshold=2.786e+02, percent-clipped=0.2 +2024-08-06 04:48:52,931 INFO [trainer.py:765] (2/8) Epoch 8, batch 1100, train_loss[loss=2.905, ArTop10Accuracy=0.7247, over 13471.00 frames. ], tot_loss[loss=2.974, ArTop10Accuracy=0.7167, over 11991.47 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 04:49:23,202 INFO [trainer.py:765] (2/8) Epoch 8, batch 1200, train_loss[loss=3.151, ArTop10Accuracy=0.6828, over 12836.00 frames. ], tot_loss[loss=2.978, ArTop10Accuracy=0.7161, over 11952.83 frames. ], batch size: 97, lr: 1.40e-02 +2024-08-06 04:49:48,393 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 04:51:01,547 INFO [trainer.py:765] (2/8) Epoch 9, batch 100, train_loss[loss=3.072, ArTop10Accuracy=0.6984, over 14479.00 frames. ], tot_loss[loss=2.952, ArTop10Accuracy=0.7219, over 4767.15 frames. ], batch size: 61, lr: 1.32e-02 +2024-08-06 04:51:45,414 INFO [trainer.py:765] (2/8) Epoch 9, batch 200, train_loss[loss=3.028, ArTop10Accuracy=0.7102, over 13817.00 frames. ], tot_loss[loss=2.94, ArTop10Accuracy=0.7243, over 7779.31 frames. ], batch size: 34, lr: 1.32e-02 +2024-08-06 04:52:29,082 INFO [trainer.py:765] (2/8) Epoch 9, batch 300, train_loss[loss=2.961, ArTop10Accuracy=0.7219, over 14264.00 frames. ], tot_loss[loss=2.938, ArTop10Accuracy=0.7249, over 9398.27 frames. ], batch size: 44, lr: 1.31e-02 +2024-08-06 04:53:16,431 INFO [trainer.py:765] (2/8) Epoch 9, batch 400, train_loss[loss=2.907, ArTop10Accuracy=0.7297, over 10737.00 frames. ], tot_loss[loss=2.936, ArTop10Accuracy=0.725, over 10337.25 frames. ], batch size: 15, lr: 1.31e-02 +2024-08-06 04:53:58,143 INFO [trainer.py:765] (2/8) Epoch 9, batch 500, train_loss[loss=2.943, ArTop10Accuracy=0.7245, over 12110.00 frames. ], tot_loss[loss=2.932, ArTop10Accuracy=0.7254, over 10890.50 frames. ], batch size: 22, lr: 1.30e-02 +2024-08-06 04:54:51,077 INFO [trainer.py:765] (2/8) Epoch 9, batch 600, train_loss[loss=2.921, ArTop10Accuracy=0.7325, over 11728.00 frames. ], tot_loss[loss=2.941, ArTop10Accuracy=0.7234, over 11423.61 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 04:55:34,399 INFO [trainer.py:765] (2/8) Epoch 9, batch 700, train_loss[loss=2.84, ArTop10Accuracy=0.7416, over 10190.00 frames. ], tot_loss[loss=2.948, ArTop10Accuracy=0.7222, over 11578.40 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 04:56:04,575 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.257e+02 1.367e+02 1.507e+02 8.820e+02, threshold=2.735e+02, percent-clipped=0.5 +2024-08-06 04:56:13,598 INFO [trainer.py:765] (2/8) Epoch 9, batch 800, train_loss[loss=2.872, ArTop10Accuracy=0.7408, over 10110.00 frames. ], tot_loss[loss=2.953, ArTop10Accuracy=0.7213, over 11698.31 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 04:56:44,975 INFO [trainer.py:765] (2/8) Epoch 9, batch 900, train_loss[loss=2.936, ArTop10Accuracy=0.7295, over 13064.00 frames. ], tot_loss[loss=2.949, ArTop10Accuracy=0.7222, over 11748.18 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:16,491 INFO [trainer.py:765] (2/8) Epoch 9, batch 1000, train_loss[loss=2.958, ArTop10Accuracy=0.7276, over 13016.00 frames. ], tot_loss[loss=2.955, ArTop10Accuracy=0.7213, over 11944.72 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:47,657 INFO [trainer.py:765] (2/8) Epoch 9, batch 1100, train_loss[loss=3, ArTop10Accuracy=0.7154, over 13793.00 frames. ], tot_loss[loss=2.965, ArTop10Accuracy=0.7189, over 12005.74 frames. ], batch size: 34, lr: 1.27e-02 +2024-08-06 04:58:18,093 INFO [trainer.py:765] (2/8) Epoch 9, batch 1200, train_loss[loss=3.099, ArTop10Accuracy=0.6887, over 12366.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7196, over 11971.89 frames. ], batch size: 98, lr: 1.27e-02 +2024-08-06 04:58:43,379 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 04:59:52,750 INFO [trainer.py:765] (2/8) Epoch 10, batch 100, train_loss[loss=2.978, ArTop10Accuracy=0.7164, over 14878.00 frames. ], tot_loss[loss=2.924, ArTop10Accuracy=0.7286, over 4780.23 frames. ], batch size: 63, lr: 1.20e-02 +2024-08-06 05:00:43,730 INFO [trainer.py:765] (2/8) Epoch 10, batch 200, train_loss[loss=2.88, ArTop10Accuracy=0.7405, over 13887.00 frames. ], tot_loss[loss=2.919, ArTop10Accuracy=0.7293, over 7803.44 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 05:01:20,591 INFO [trainer.py:765] (2/8) Epoch 10, batch 300, train_loss[loss=2.964, ArTop10Accuracy=0.7188, over 14173.00 frames. ], tot_loss[loss=2.912, ArTop10Accuracy=0.7301, over 9425.87 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 05:02:10,048 INFO [trainer.py:765] (2/8) Epoch 10, batch 400, train_loss[loss=2.955, ArTop10Accuracy=0.7291, over 10429.00 frames. ], tot_loss[loss=2.914, ArTop10Accuracy=0.7295, over 10335.80 frames. ], batch size: 14, lr: 1.19e-02 +2024-08-06 05:02:46,488 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 05:02:55,377 INFO [trainer.py:811] (2/8) Epoch 10, validation: loss=2.927, ArTop10Accuracy=0.7304, over 1829298.00 frames. +2024-08-06 05:02:55,378 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33194MB +2024-08-06 05:02:55,728 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.269e+02 1.367e+02 1.518e+02 4.405e+02, threshold=2.733e+02, percent-clipped=0.4 +2024-08-06 05:02:58,361 INFO [trainer.py:765] (2/8) Epoch 10, batch 500, train_loss[loss=2.875, ArTop10Accuracy=0.7371, over 12161.00 frames. ], tot_loss[loss=2.915, ArTop10Accuracy=0.7288, over 10893.26 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 05:03:48,229 INFO [trainer.py:765] (2/8) Epoch 10, batch 600, train_loss[loss=2.871, ArTop10Accuracy=0.7365, over 11569.00 frames. ], tot_loss[loss=2.918, ArTop10Accuracy=0.7282, over 11428.81 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 05:04:36,715 INFO [trainer.py:765] (2/8) Epoch 10, batch 700, train_loss[loss=2.612, ArTop10Accuracy=0.7818, over 10102.00 frames. ], tot_loss[loss=2.931, ArTop10Accuracy=0.7254, over 11580.42 frames. ], batch size: 12, lr: 1.18e-02 +2024-08-06 05:05:10,726 INFO [trainer.py:765] (2/8) Epoch 10, batch 800, train_loss[loss=2.908, ArTop10Accuracy=0.7233, over 10210.00 frames. ], tot_loss[loss=2.939, ArTop10Accuracy=0.7238, over 11684.13 frames. ], batch size: 12, lr: 1.17e-02 +2024-08-06 05:05:42,245 INFO [trainer.py:765] (2/8) Epoch 10, batch 900, train_loss[loss=2.944, ArTop10Accuracy=0.7194, over 12966.00 frames. ], tot_loss[loss=2.928, ArTop10Accuracy=0.7258, over 11742.25 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 05:06:13,844 INFO [trainer.py:765] (2/8) Epoch 10, batch 1000, train_loss[loss=2.983, ArTop10Accuracy=0.7219, over 12821.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.7266, over 11939.45 frames. ], batch size: 27, lr: 1.16e-02 +2024-08-06 05:06:45,055 INFO [trainer.py:765] (2/8) Epoch 10, batch 1100, train_loss[loss=3.068, ArTop10Accuracy=0.7031, over 13737.00 frames. ], tot_loss[loss=2.941, ArTop10Accuracy=0.7238, over 12009.40 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 05:07:15,484 INFO [trainer.py:765] (2/8) Epoch 10, batch 1200, train_loss[loss=3.01, ArTop10Accuracy=0.7078, over 12358.00 frames. ], tot_loss[loss=2.94, ArTop10Accuracy=0.724, over 11950.99 frames. ], batch size: 97, lr: 1.16e-02 +2024-08-06 05:07:40,243 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 05:08:52,966 INFO [trainer.py:765] (2/8) Epoch 11, batch 100, train_loss[loss=2.925, ArTop10Accuracy=0.7288, over 14495.00 frames. ], tot_loss[loss=2.906, ArTop10Accuracy=0.7314, over 4770.40 frames. ], batch size: 61, lr: 1.10e-02 +2024-08-06 05:09:41,277 INFO [trainer.py:765] (2/8) Epoch 11, batch 200, train_loss[loss=2.929, ArTop10Accuracy=0.7319, over 13841.00 frames. ], tot_loss[loss=2.905, ArTop10Accuracy=0.7314, over 7772.06 frames. ], batch size: 34, lr: 1.10e-02 +2024-08-06 05:09:51,176 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.278e+02 1.371e+02 1.502e+02 3.785e+02, threshold=2.743e+02, percent-clipped=0.3 +2024-08-06 05:10:24,720 INFO [trainer.py:765] (2/8) Epoch 11, batch 300, train_loss[loss=2.998, ArTop10Accuracy=0.7166, over 14187.00 frames. ], tot_loss[loss=2.901, ArTop10Accuracy=0.7321, over 9389.76 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 05:11:11,784 INFO [trainer.py:765] (2/8) Epoch 11, batch 400, train_loss[loss=2.766, ArTop10Accuracy=0.7571, over 10857.00 frames. ], tot_loss[loss=2.9, ArTop10Accuracy=0.7322, over 10329.19 frames. ], batch size: 15, lr: 1.09e-02 +2024-08-06 05:11:52,692 INFO [trainer.py:765] (2/8) Epoch 11, batch 500, train_loss[loss=2.9, ArTop10Accuracy=0.7279, over 12227.00 frames. ], tot_loss[loss=2.904, ArTop10Accuracy=0.7313, over 10891.57 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 05:12:40,287 INFO [trainer.py:765] (2/8) Epoch 11, batch 600, train_loss[loss=2.773, ArTop10Accuracy=0.7548, over 11681.00 frames. ], tot_loss[loss=2.909, ArTop10Accuracy=0.7303, over 11432.08 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 05:13:25,708 INFO [trainer.py:765] (2/8) Epoch 11, batch 700, train_loss[loss=2.781, ArTop10Accuracy=0.7469, over 10182.00 frames. ], tot_loss[loss=2.918, ArTop10Accuracy=0.7287, over 11569.39 frames. ], batch size: 12, lr: 1.08e-02 +2024-08-06 05:14:04,206 INFO [trainer.py:765] (2/8) Epoch 11, batch 800, train_loss[loss=2.793, ArTop10Accuracy=0.7451, over 10069.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.728, over 11681.47 frames. ], batch size: 12, lr: 1.07e-02 +2024-08-06 05:14:35,666 INFO [trainer.py:765] (2/8) Epoch 11, batch 900, train_loss[loss=2.867, ArTop10Accuracy=0.7368, over 12934.00 frames. ], tot_loss[loss=2.914, ArTop10Accuracy=0.7294, over 11728.60 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 05:15:07,263 INFO [trainer.py:765] (2/8) Epoch 11, batch 1000, train_loss[loss=2.997, ArTop10Accuracy=0.7159, over 13352.00 frames. ], tot_loss[loss=2.914, ArTop10Accuracy=0.7291, over 11927.60 frames. ], batch size: 28, lr: 1.07e-02 +2024-08-06 05:15:38,259 INFO [trainer.py:765] (2/8) Epoch 11, batch 1100, train_loss[loss=2.882, ArTop10Accuracy=0.732, over 13622.00 frames. ], tot_loss[loss=2.92, ArTop10Accuracy=0.7277, over 11984.60 frames. ], batch size: 34, lr: 1.06e-02 +2024-08-06 05:16:08,497 INFO [trainer.py:765] (2/8) Epoch 11, batch 1200, train_loss[loss=3.096, ArTop10Accuracy=0.693, over 12237.00 frames. ], tot_loss[loss=2.923, ArTop10Accuracy=0.7274, over 11936.43 frames. ], batch size: 98, lr: 1.06e-02 +2024-08-06 05:16:12,697 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 05:16:21,622 INFO [trainer.py:811] (2/8) Epoch 11, validation: loss=2.923, ArTop10Accuracy=0.7318, over 1829298.00 frames. +2024-08-06 05:16:21,623 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33194MB +2024-08-06 05:16:21,949 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.268e+02 1.368e+02 1.481e+02 4.790e+02, threshold=2.736e+02, percent-clipped=0.6 +2024-08-06 05:16:42,805 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 05:18:03,006 INFO [trainer.py:765] (2/8) Epoch 12, batch 100, train_loss[loss=2.985, ArTop10Accuracy=0.7173, over 14457.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.7351, over 4751.58 frames. ], batch size: 62, lr: 1.01e-02 +2024-08-06 05:18:46,005 INFO [trainer.py:765] (2/8) Epoch 12, batch 200, train_loss[loss=2.876, ArTop10Accuracy=0.7382, over 13936.00 frames. ], tot_loss[loss=2.878, ArTop10Accuracy=0.7367, over 7778.74 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 05:19:31,947 INFO [trainer.py:765] (2/8) Epoch 12, batch 300, train_loss[loss=2.918, ArTop10Accuracy=0.7335, over 13852.00 frames. ], tot_loss[loss=2.874, ArTop10Accuracy=0.7374, over 9395.43 frames. ], batch size: 43, lr: 1.01e-02 +2024-08-06 05:20:12,431 INFO [trainer.py:765] (2/8) Epoch 12, batch 400, train_loss[loss=2.814, ArTop10Accuracy=0.7557, over 10314.00 frames. ], tot_loss[loss=2.882, ArTop10Accuracy=0.736, over 10309.33 frames. ], batch size: 14, lr: 1.00e-02 +2024-08-06 05:21:00,640 INFO [trainer.py:765] (2/8) Epoch 12, batch 500, train_loss[loss=2.952, ArTop10Accuracy=0.7228, over 12138.00 frames. ], tot_loss[loss=2.884, ArTop10Accuracy=0.7356, over 10884.38 frames. ], batch size: 22, lr: 9.99e-03 +2024-08-06 05:21:43,916 INFO [trainer.py:765] (2/8) Epoch 12, batch 600, train_loss[loss=2.802, ArTop10Accuracy=0.7451, over 11621.00 frames. ], tot_loss[loss=2.89, ArTop10Accuracy=0.734, over 11423.38 frames. ], batch size: 18, lr: 9.96e-03 +2024-08-06 05:22:32,206 INFO [trainer.py:765] (2/8) Epoch 12, batch 700, train_loss[loss=2.767, ArTop10Accuracy=0.7568, over 9980.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7326, over 11570.51 frames. ], batch size: 12, lr: 9.93e-03 +2024-08-06 05:23:08,912 INFO [trainer.py:765] (2/8) Epoch 12, batch 800, train_loss[loss=2.875, ArTop10Accuracy=0.7385, over 10064.00 frames. ], tot_loss[loss=2.905, ArTop10Accuracy=0.731, over 11669.44 frames. ], batch size: 12, lr: 9.90e-03 +2024-08-06 05:23:40,460 INFO [trainer.py:765] (2/8) Epoch 12, batch 900, train_loss[loss=2.835, ArTop10Accuracy=0.7463, over 13139.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7327, over 11730.86 frames. ], batch size: 27, lr: 9.87e-03 +2024-08-06 05:23:54,576 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.273e+02 1.376e+02 1.503e+02 4.050e+02, threshold=2.752e+02, percent-clipped=0.4 +2024-08-06 05:24:14,346 INFO [trainer.py:765] (2/8) Epoch 12, batch 1000, train_loss[loss=2.817, ArTop10Accuracy=0.7436, over 13009.00 frames. ], tot_loss[loss=2.899, ArTop10Accuracy=0.7322, over 11941.24 frames. ], batch size: 27, lr: 9.84e-03 +2024-08-06 05:24:45,502 INFO [trainer.py:765] (2/8) Epoch 12, batch 1100, train_loss[loss=2.936, ArTop10Accuracy=0.7239, over 13711.00 frames. ], tot_loss[loss=2.906, ArTop10Accuracy=0.7308, over 12005.16 frames. ], batch size: 34, lr: 9.81e-03 +2024-08-06 05:25:15,882 INFO [trainer.py:765] (2/8) Epoch 12, batch 1200, train_loss[loss=3.01, ArTop10Accuracy=0.7104, over 12131.00 frames. ], tot_loss[loss=2.907, ArTop10Accuracy=0.7307, over 11944.94 frames. ], batch size: 98, lr: 9.78e-03 +2024-08-06 05:25:41,436 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 05:26:46,787 INFO [trainer.py:765] (2/8) Epoch 13, batch 100, train_loss[loss=2.932, ArTop10Accuracy=0.7276, over 14692.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7377, over 4774.15 frames. ], batch size: 61, lr: 9.36e-03 +2024-08-06 05:27:32,552 INFO [trainer.py:765] (2/8) Epoch 13, batch 200, train_loss[loss=2.878, ArTop10Accuracy=0.7415, over 13615.00 frames. ], tot_loss[loss=2.874, ArTop10Accuracy=0.7378, over 7798.34 frames. ], batch size: 34, lr: 9.34e-03 +2024-08-06 05:28:16,036 INFO [trainer.py:765] (2/8) Epoch 13, batch 300, train_loss[loss=2.861, ArTop10Accuracy=0.7421, over 14256.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7385, over 9419.42 frames. ], batch size: 44, lr: 9.31e-03 +2024-08-06 05:29:00,149 INFO [trainer.py:765] (2/8) Epoch 13, batch 400, train_loss[loss=2.646, ArTop10Accuracy=0.7744, over 10261.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7378, over 10316.57 frames. ], batch size: 14, lr: 9.28e-03 +2024-08-06 05:29:43,967 INFO [trainer.py:765] (2/8) Epoch 13, batch 500, train_loss[loss=2.773, ArTop10Accuracy=0.7575, over 12363.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.738, over 10885.74 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 05:30:24,247 INFO [trainer.py:765] (2/8) Epoch 13, batch 600, train_loss[loss=2.885, ArTop10Accuracy=0.7434, over 11525.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7367, over 11421.59 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 05:30:58,110 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 05:31:07,054 INFO [trainer.py:811] (2/8) Epoch 13, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 05:31:07,054 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33194MB +2024-08-06 05:31:07,351 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.049e+02 1.283e+02 1.389e+02 1.496e+02 2.729e+02, threshold=2.779e+02, percent-clipped=0.0 +2024-08-06 05:31:24,043 INFO [trainer.py:765] (2/8) Epoch 13, batch 700, train_loss[loss=2.894, ArTop10Accuracy=0.7257, over 9882.00 frames. ], tot_loss[loss=2.884, ArTop10Accuracy=0.7352, over 11544.89 frames. ], batch size: 12, lr: 9.20e-03 +2024-08-06 05:32:00,147 INFO [trainer.py:765] (2/8) Epoch 13, batch 800, train_loss[loss=2.705, ArTop10Accuracy=0.7712, over 10220.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.7348, over 11668.46 frames. ], batch size: 12, lr: 9.18e-03 +2024-08-06 05:32:31,521 INFO [trainer.py:765] (2/8) Epoch 13, batch 900, train_loss[loss=2.834, ArTop10Accuracy=0.7448, over 12923.00 frames. ], tot_loss[loss=2.884, ArTop10Accuracy=0.7353, over 11731.17 frames. ], batch size: 27, lr: 9.15e-03 +2024-08-06 05:33:03,043 INFO [trainer.py:765] (2/8) Epoch 13, batch 1000, train_loss[loss=2.729, ArTop10Accuracy=0.7598, over 12946.00 frames. ], tot_loss[loss=2.883, ArTop10Accuracy=0.7352, over 11939.27 frames. ], batch size: 27, lr: 9.13e-03 +2024-08-06 05:33:34,233 INFO [trainer.py:765] (2/8) Epoch 13, batch 1100, train_loss[loss=2.955, ArTop10Accuracy=0.7218, over 13772.00 frames. ], tot_loss[loss=2.893, ArTop10Accuracy=0.7333, over 12016.87 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 05:34:04,519 INFO [trainer.py:765] (2/8) Epoch 13, batch 1200, train_loss[loss=3.045, ArTop10Accuracy=0.7098, over 11902.00 frames. ], tot_loss[loss=2.892, ArTop10Accuracy=0.7331, over 11935.20 frames. ], batch size: 97, lr: 9.07e-03 +2024-08-06 05:34:29,356 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 05:35:39,198 INFO [trainer.py:765] (2/8) Epoch 14, batch 100, train_loss[loss=2.872, ArTop10Accuracy=0.7365, over 14687.00 frames. ], tot_loss[loss=2.862, ArTop10Accuracy=0.7403, over 4792.39 frames. ], batch size: 61, lr: 8.71e-03 +2024-08-06 05:36:23,063 INFO [trainer.py:765] (2/8) Epoch 14, batch 200, train_loss[loss=2.867, ArTop10Accuracy=0.7411, over 13728.00 frames. ], tot_loss[loss=2.856, ArTop10Accuracy=0.7417, over 7798.85 frames. ], batch size: 34, lr: 8.68e-03 +2024-08-06 05:37:09,309 INFO [trainer.py:765] (2/8) Epoch 14, batch 300, train_loss[loss=2.913, ArTop10Accuracy=0.7299, over 14314.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7418, over 9424.44 frames. ], batch size: 44, lr: 8.66e-03 +2024-08-06 05:37:46,030 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.304e+02 1.410e+02 1.531e+02 2.912e+02, threshold=2.820e+02, percent-clipped=0.2 +2024-08-06 05:37:55,139 INFO [trainer.py:765] (2/8) Epoch 14, batch 400, train_loss[loss=2.777, ArTop10Accuracy=0.7459, over 10801.00 frames. ], tot_loss[loss=2.853, ArTop10Accuracy=0.7419, over 10315.88 frames. ], batch size: 15, lr: 8.64e-03 +2024-08-06 05:38:42,025 INFO [trainer.py:765] (2/8) Epoch 14, batch 500, train_loss[loss=2.76, ArTop10Accuracy=0.7576, over 12396.00 frames. ], tot_loss[loss=2.852, ArTop10Accuracy=0.7416, over 10890.81 frames. ], batch size: 22, lr: 8.61e-03 +2024-08-06 05:39:22,374 INFO [trainer.py:765] (2/8) Epoch 14, batch 600, train_loss[loss=2.873, ArTop10Accuracy=0.7326, over 11490.00 frames. ], tot_loss[loss=2.858, ArTop10Accuracy=0.7404, over 11418.29 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 05:40:15,143 INFO [trainer.py:765] (2/8) Epoch 14, batch 700, train_loss[loss=2.907, ArTop10Accuracy=0.7269, over 10082.00 frames. ], tot_loss[loss=2.866, ArTop10Accuracy=0.7385, over 11551.12 frames. ], batch size: 12, lr: 8.57e-03 +2024-08-06 05:40:49,136 INFO [trainer.py:765] (2/8) Epoch 14, batch 800, train_loss[loss=2.667, ArTop10Accuracy=0.7775, over 9850.00 frames. ], tot_loss[loss=2.875, ArTop10Accuracy=0.7373, over 11668.77 frames. ], batch size: 12, lr: 8.55e-03 +2024-08-06 05:41:20,466 INFO [trainer.py:765] (2/8) Epoch 14, batch 900, train_loss[loss=2.876, ArTop10Accuracy=0.7369, over 12986.00 frames. ], tot_loss[loss=2.867, ArTop10Accuracy=0.7386, over 11723.01 frames. ], batch size: 27, lr: 8.52e-03 +2024-08-06 05:41:51,995 INFO [trainer.py:765] (2/8) Epoch 14, batch 1000, train_loss[loss=2.83, ArTop10Accuracy=0.7426, over 13044.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7377, over 11933.16 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 05:42:23,217 INFO [trainer.py:765] (2/8) Epoch 14, batch 1100, train_loss[loss=2.842, ArTop10Accuracy=0.7439, over 13773.00 frames. ], tot_loss[loss=2.879, ArTop10Accuracy=0.7363, over 11999.74 frames. ], batch size: 34, lr: 8.48e-03 +2024-08-06 05:42:53,549 INFO [trainer.py:765] (2/8) Epoch 14, batch 1200, train_loss[loss=3.028, ArTop10Accuracy=0.7076, over 12237.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7364, over 11949.06 frames. ], batch size: 98, lr: 8.46e-03 +2024-08-06 05:43:19,085 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 05:44:28,572 INFO [trainer.py:765] (2/8) Epoch 15, batch 100, train_loss[loss=2.963, ArTop10Accuracy=0.7197, over 14826.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.7422, over 4788.38 frames. ], batch size: 62, lr: 8.14e-03 +2024-08-06 05:44:29,214 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 05:44:38,023 INFO [trainer.py:811] (2/8) Epoch 15, validation: loss=2.913, ArTop10Accuracy=0.7339, over 1829298.00 frames. +2024-08-06 05:44:38,024 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33246MB +2024-08-06 05:44:38,413 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.307e+02 1.417e+02 1.528e+02 2.981e+02, threshold=2.833e+02, percent-clipped=0.1 +2024-08-06 05:45:20,185 INFO [trainer.py:765] (2/8) Epoch 15, batch 200, train_loss[loss=2.807, ArTop10Accuracy=0.7537, over 13710.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7439, over 7805.06 frames. ], batch size: 34, lr: 8.11e-03 +2024-08-06 05:46:04,647 INFO [trainer.py:765] (2/8) Epoch 15, batch 300, train_loss[loss=2.942, ArTop10Accuracy=0.7258, over 14508.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.7439, over 9429.96 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 05:46:51,902 INFO [trainer.py:765] (2/8) Epoch 15, batch 400, train_loss[loss=2.655, ArTop10Accuracy=0.7743, over 10217.00 frames. ], tot_loss[loss=2.842, ArTop10Accuracy=0.7438, over 10322.02 frames. ], batch size: 14, lr: 8.07e-03 +2024-08-06 05:47:36,911 INFO [trainer.py:765] (2/8) Epoch 15, batch 500, train_loss[loss=2.736, ArTop10Accuracy=0.7632, over 12303.00 frames. ], tot_loss[loss=2.842, ArTop10Accuracy=0.7434, over 10881.57 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 05:48:24,723 INFO [trainer.py:765] (2/8) Epoch 15, batch 600, train_loss[loss=2.761, ArTop10Accuracy=0.7587, over 11526.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7425, over 11419.26 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 05:49:11,856 INFO [trainer.py:765] (2/8) Epoch 15, batch 700, train_loss[loss=2.846, ArTop10Accuracy=0.7335, over 10062.00 frames. ], tot_loss[loss=2.855, ArTop10Accuracy=0.7405, over 11564.98 frames. ], batch size: 12, lr: 8.01e-03 +2024-08-06 05:49:45,779 INFO [trainer.py:765] (2/8) Epoch 15, batch 800, train_loss[loss=2.925, ArTop10Accuracy=0.7188, over 9224.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.7391, over 11660.66 frames. ], batch size: 11, lr: 7.99e-03 +2024-08-06 05:50:17,210 INFO [trainer.py:765] (2/8) Epoch 15, batch 900, train_loss[loss=3.033, ArTop10Accuracy=0.7154, over 13057.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7409, over 11730.28 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 05:50:48,830 INFO [trainer.py:765] (2/8) Epoch 15, batch 1000, train_loss[loss=2.834, ArTop10Accuracy=0.7484, over 12972.00 frames. ], tot_loss[loss=2.858, ArTop10Accuracy=0.7404, over 11937.53 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 05:51:20,070 INFO [trainer.py:765] (2/8) Epoch 15, batch 1100, train_loss[loss=2.899, ArTop10Accuracy=0.7337, over 13832.00 frames. ], tot_loss[loss=2.867, ArTop10Accuracy=0.7386, over 11992.96 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 05:51:23,515 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.337e+02 1.431e+02 1.541e+02 2.784e+02, threshold=2.862e+02, percent-clipped=0.0 +2024-08-06 05:51:53,082 INFO [trainer.py:765] (2/8) Epoch 15, batch 1200, train_loss[loss=2.971, ArTop10Accuracy=0.7191, over 12900.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.738, over 11941.95 frames. ], batch size: 99, lr: 7.91e-03 +2024-08-06 05:52:18,086 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 05:53:29,263 INFO [trainer.py:765] (2/8) Epoch 16, batch 100, train_loss[loss=2.894, ArTop10Accuracy=0.7377, over 14427.00 frames. ], tot_loss[loss=2.834, ArTop10Accuracy=0.7468, over 4785.00 frames. ], batch size: 61, lr: 7.63e-03 +2024-08-06 05:54:12,877 INFO [trainer.py:765] (2/8) Epoch 16, batch 200, train_loss[loss=2.831, ArTop10Accuracy=0.7462, over 13780.00 frames. ], tot_loss[loss=2.833, ArTop10Accuracy=0.7467, over 7781.14 frames. ], batch size: 34, lr: 7.61e-03 +2024-08-06 05:54:59,737 INFO [trainer.py:765] (2/8) Epoch 16, batch 300, train_loss[loss=2.906, ArTop10Accuracy=0.7335, over 14155.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.7468, over 9420.48 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 05:55:41,930 INFO [trainer.py:765] (2/8) Epoch 16, batch 400, train_loss[loss=2.686, ArTop10Accuracy=0.7733, over 10961.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.7466, over 10323.32 frames. ], batch size: 15, lr: 7.58e-03 +2024-08-06 05:56:27,680 INFO [trainer.py:765] (2/8) Epoch 16, batch 500, train_loss[loss=2.809, ArTop10Accuracy=0.7594, over 12164.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7463, over 10905.04 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 05:57:12,439 INFO [trainer.py:765] (2/8) Epoch 16, batch 600, train_loss[loss=2.71, ArTop10Accuracy=0.7724, over 11487.00 frames. ], tot_loss[loss=2.834, ArTop10Accuracy=0.7452, over 11438.14 frames. ], batch size: 18, lr: 7.54e-03 +2024-08-06 05:58:00,040 INFO [trainer.py:765] (2/8) Epoch 16, batch 700, train_loss[loss=2.843, ArTop10Accuracy=0.7465, over 9292.00 frames. ], tot_loss[loss=2.84, ArTop10Accuracy=0.7441, over 11558.69 frames. ], batch size: 11, lr: 7.52e-03 +2024-08-06 05:58:34,024 INFO [trainer.py:765] (2/8) Epoch 16, batch 800, train_loss[loss=2.79, ArTop10Accuracy=0.7507, over 10225.00 frames. ], tot_loss[loss=2.85, ArTop10Accuracy=0.7423, over 11679.46 frames. ], batch size: 12, lr: 7.50e-03 +2024-08-06 05:58:41,569 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 05:58:50,426 INFO [trainer.py:811] (2/8) Epoch 16, validation: loss=2.915, ArTop10Accuracy=0.7338, over 1829298.00 frames. +2024-08-06 05:58:50,427 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33340MB +2024-08-06 05:58:50,730 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.335e+02 1.445e+02 1.570e+02 3.252e+02, threshold=2.890e+02, percent-clipped=0.1 +2024-08-06 05:59:14,321 INFO [trainer.py:765] (2/8) Epoch 16, batch 900, train_loss[loss=2.791, ArTop10Accuracy=0.7515, over 12798.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7429, over 11723.23 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 05:59:45,915 INFO [trainer.py:765] (2/8) Epoch 16, batch 1000, train_loss[loss=2.776, ArTop10Accuracy=0.7557, over 12832.00 frames. ], tot_loss[loss=2.85, ArTop10Accuracy=0.7421, over 11918.58 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 06:00:17,091 INFO [trainer.py:765] (2/8) Epoch 16, batch 1100, train_loss[loss=2.883, ArTop10Accuracy=0.7371, over 13757.00 frames. ], tot_loss[loss=2.86, ArTop10Accuracy=0.7403, over 11985.14 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 06:00:47,464 INFO [trainer.py:765] (2/8) Epoch 16, batch 1200, train_loss[loss=3.013, ArTop10Accuracy=0.7111, over 12027.00 frames. ], tot_loss[loss=2.856, ArTop10Accuracy=0.7408, over 11942.83 frames. ], batch size: 97, lr: 7.43e-03 +2024-08-06 06:01:12,361 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 06:02:27,261 INFO [trainer.py:765] (2/8) Epoch 17, batch 100, train_loss[loss=2.949, ArTop10Accuracy=0.7286, over 14418.00 frames. ], tot_loss[loss=2.835, ArTop10Accuracy=0.7461, over 4798.04 frames. ], batch size: 61, lr: 7.18e-03 +2024-08-06 06:03:11,850 INFO [trainer.py:765] (2/8) Epoch 17, batch 200, train_loss[loss=2.77, ArTop10Accuracy=0.7507, over 13471.00 frames. ], tot_loss[loss=2.834, ArTop10Accuracy=0.746, over 7806.93 frames. ], batch size: 34, lr: 7.17e-03 +2024-08-06 06:03:57,502 INFO [trainer.py:765] (2/8) Epoch 17, batch 300, train_loss[loss=2.929, ArTop10Accuracy=0.7311, over 14126.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.748, over 9433.07 frames. ], batch size: 44, lr: 7.15e-03 +2024-08-06 06:04:42,838 INFO [trainer.py:765] (2/8) Epoch 17, batch 400, train_loss[loss=2.643, ArTop10Accuracy=0.7722, over 10347.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7477, over 10349.35 frames. ], batch size: 14, lr: 7.13e-03 +2024-08-06 06:05:29,004 INFO [trainer.py:765] (2/8) Epoch 17, batch 500, train_loss[loss=2.906, ArTop10Accuracy=0.7349, over 12205.00 frames. ], tot_loss[loss=2.818, ArTop10Accuracy=0.7484, over 10923.35 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 06:05:49,551 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.359e+02 1.445e+02 1.551e+02 2.741e+02, threshold=2.891e+02, percent-clipped=0.0 +2024-08-06 06:06:20,723 INFO [trainer.py:765] (2/8) Epoch 17, batch 600, train_loss[loss=2.804, ArTop10Accuracy=0.7508, over 11660.00 frames. ], tot_loss[loss=2.829, ArTop10Accuracy=0.7461, over 11452.61 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 06:07:04,695 INFO [trainer.py:765] (2/8) Epoch 17, batch 700, train_loss[loss=2.703, ArTop10Accuracy=0.7668, over 10055.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7458, over 11584.29 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 06:07:44,896 INFO [trainer.py:765] (2/8) Epoch 17, batch 800, train_loss[loss=2.682, ArTop10Accuracy=0.7718, over 9902.00 frames. ], tot_loss[loss=2.833, ArTop10Accuracy=0.7453, over 11687.60 frames. ], batch size: 12, lr: 7.07e-03 +2024-08-06 06:08:16,384 INFO [trainer.py:765] (2/8) Epoch 17, batch 900, train_loss[loss=2.868, ArTop10Accuracy=0.7369, over 13122.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.7456, over 11742.05 frames. ], batch size: 27, lr: 7.05e-03 +2024-08-06 06:08:47,995 INFO [trainer.py:765] (2/8) Epoch 17, batch 1000, train_loss[loss=2.71, ArTop10Accuracy=0.7683, over 12741.00 frames. ], tot_loss[loss=2.838, ArTop10Accuracy=0.7443, over 11942.29 frames. ], batch size: 27, lr: 7.04e-03 +2024-08-06 06:09:19,134 INFO [trainer.py:765] (2/8) Epoch 17, batch 1100, train_loss[loss=2.813, ArTop10Accuracy=0.7484, over 13705.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7428, over 11995.99 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 06:09:49,446 INFO [trainer.py:765] (2/8) Epoch 17, batch 1200, train_loss[loss=2.972, ArTop10Accuracy=0.7206, over 12667.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.7434, over 11937.41 frames. ], batch size: 100, lr: 7.01e-03 +2024-08-06 06:10:15,269 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 06:11:23,101 INFO [trainer.py:765] (2/8) Epoch 18, batch 100, train_loss[loss=2.985, ArTop10Accuracy=0.7228, over 14784.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.7484, over 4797.01 frames. ], batch size: 62, lr: 6.78e-03 +2024-08-06 06:12:16,259 INFO [trainer.py:765] (2/8) Epoch 18, batch 200, train_loss[loss=2.809, ArTop10Accuracy=0.7482, over 13618.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7491, over 7816.57 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 06:12:40,317 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 06:12:48,991 INFO [trainer.py:811] (2/8) Epoch 18, validation: loss=2.916, ArTop10Accuracy=0.7343, over 1829298.00 frames. +2024-08-06 06:12:48,992 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33340MB +2024-08-06 06:12:49,335 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.377e+02 1.476e+02 1.588e+02 2.450e+02, threshold=2.952e+02, percent-clipped=0.0 +2024-08-06 06:13:07,116 INFO [trainer.py:765] (2/8) Epoch 18, batch 300, train_loss[loss=2.862, ArTop10Accuracy=0.747, over 14437.00 frames. ], tot_loss[loss=2.811, ArTop10Accuracy=0.7507, over 9441.11 frames. ], batch size: 44, lr: 6.75e-03 +2024-08-06 06:13:54,098 INFO [trainer.py:765] (2/8) Epoch 18, batch 400, train_loss[loss=2.705, ArTop10Accuracy=0.7705, over 10374.00 frames. ], tot_loss[loss=2.81, ArTop10Accuracy=0.7506, over 10324.79 frames. ], batch size: 14, lr: 6.74e-03 +2024-08-06 06:14:38,488 INFO [trainer.py:765] (2/8) Epoch 18, batch 500, train_loss[loss=2.831, ArTop10Accuracy=0.7445, over 12176.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7513, over 10888.06 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 06:15:23,628 INFO [trainer.py:765] (2/8) Epoch 18, batch 600, train_loss[loss=2.787, ArTop10Accuracy=0.7518, over 12008.00 frames. ], tot_loss[loss=2.814, ArTop10Accuracy=0.7496, over 11428.27 frames. ], batch size: 19, lr: 6.71e-03 +2024-08-06 06:16:17,342 INFO [trainer.py:765] (2/8) Epoch 18, batch 700, train_loss[loss=2.535, ArTop10Accuracy=0.7898, over 10077.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7477, over 11595.39 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 06:16:51,428 INFO [trainer.py:765] (2/8) Epoch 18, batch 800, train_loss[loss=2.81, ArTop10Accuracy=0.7513, over 10196.00 frames. ], tot_loss[loss=2.832, ArTop10Accuracy=0.7458, over 11703.66 frames. ], batch size: 12, lr: 6.68e-03 +2024-08-06 06:17:22,913 INFO [trainer.py:765] (2/8) Epoch 18, batch 900, train_loss[loss=2.845, ArTop10Accuracy=0.7459, over 12829.00 frames. ], tot_loss[loss=2.826, ArTop10Accuracy=0.7472, over 11742.87 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 06:17:54,529 INFO [trainer.py:765] (2/8) Epoch 18, batch 1000, train_loss[loss=2.83, ArTop10Accuracy=0.7482, over 12975.00 frames. ], tot_loss[loss=2.833, ArTop10Accuracy=0.7459, over 11949.79 frames. ], batch size: 27, lr: 6.65e-03 +2024-08-06 06:18:25,664 INFO [trainer.py:765] (2/8) Epoch 18, batch 1100, train_loss[loss=2.837, ArTop10Accuracy=0.7422, over 13645.00 frames. ], tot_loss[loss=2.842, ArTop10Accuracy=0.744, over 12011.69 frames. ], batch size: 34, lr: 6.64e-03 +2024-08-06 06:18:55,972 INFO [trainer.py:765] (2/8) Epoch 18, batch 1200, train_loss[loss=3.045, ArTop10Accuracy=0.6986, over 11883.00 frames. ], tot_loss[loss=2.841, ArTop10Accuracy=0.7439, over 11955.71 frames. ], batch size: 97, lr: 6.63e-03 +2024-08-06 06:19:19,163 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.387e+02 1.492e+02 1.607e+02 2.982e+02, threshold=2.983e+02, percent-clipped=0.1 +2024-08-06 06:19:23,732 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 06:20:29,727 INFO [trainer.py:765] (2/8) Epoch 19, batch 100, train_loss[loss=2.794, ArTop10Accuracy=0.7559, over 14034.00 frames. ], tot_loss[loss=2.8, ArTop10Accuracy=0.7529, over 4769.06 frames. ], batch size: 61, lr: 6.43e-03 +2024-08-06 06:21:11,274 INFO [trainer.py:765] (2/8) Epoch 19, batch 200, train_loss[loss=2.774, ArTop10Accuracy=0.7534, over 13748.00 frames. ], tot_loss[loss=2.806, ArTop10Accuracy=0.752, over 7787.60 frames. ], batch size: 34, lr: 6.41e-03 +2024-08-06 06:21:56,077 INFO [trainer.py:765] (2/8) Epoch 19, batch 300, train_loss[loss=2.935, ArTop10Accuracy=0.7293, over 14360.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7514, over 9416.01 frames. ], batch size: 44, lr: 6.40e-03 +2024-08-06 06:22:36,014 INFO [trainer.py:765] (2/8) Epoch 19, batch 400, train_loss[loss=2.635, ArTop10Accuracy=0.7794, over 10369.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.752, over 10332.28 frames. ], batch size: 14, lr: 6.39e-03 +2024-08-06 06:23:18,997 INFO [trainer.py:765] (2/8) Epoch 19, batch 500, train_loss[loss=2.699, ArTop10Accuracy=0.774, over 12306.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7526, over 10926.80 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 06:24:03,685 INFO [trainer.py:765] (2/8) Epoch 19, batch 600, train_loss[loss=2.775, ArTop10Accuracy=0.7611, over 11535.00 frames. ], tot_loss[loss=2.806, ArTop10Accuracy=0.7513, over 11443.41 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 06:24:46,184 INFO [trainer.py:765] (2/8) Epoch 19, batch 700, train_loss[loss=2.588, ArTop10Accuracy=0.7913, over 10357.00 frames. ], tot_loss[loss=2.81, ArTop10Accuracy=0.7502, over 11593.46 frames. ], batch size: 12, lr: 6.35e-03 +2024-08-06 06:25:22,355 INFO [trainer.py:765] (2/8) Epoch 19, batch 800, train_loss[loss=2.72, ArTop10Accuracy=0.7633, over 10066.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.749, over 11715.86 frames. ], batch size: 12, lr: 6.33e-03 +2024-08-06 06:25:53,624 INFO [trainer.py:765] (2/8) Epoch 19, batch 900, train_loss[loss=2.68, ArTop10Accuracy=0.7632, over 13113.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.7503, over 11748.63 frames. ], batch size: 27, lr: 6.32e-03 +2024-08-06 06:26:21,772 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 06:26:30,765 INFO [trainer.py:811] (2/8) Epoch 19, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 06:26:30,766 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 33340MB +2024-08-06 06:26:31,053 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.416e+02 1.525e+02 1.662e+02 2.849e+02, threshold=3.050e+02, percent-clipped=0.0 +2024-08-06 06:26:34,032 INFO [trainer.py:765] (2/8) Epoch 19, batch 1000, train_loss[loss=2.78, ArTop10Accuracy=0.7612, over 12888.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.7487, over 11942.50 frames. ], batch size: 27, lr: 6.31e-03 +2024-08-06 06:27:05,190 INFO [trainer.py:765] (2/8) Epoch 19, batch 1100, train_loss[loss=2.828, ArTop10Accuracy=0.7477, over 13837.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7468, over 11994.26 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 06:27:35,454 INFO [trainer.py:765] (2/8) Epoch 19, batch 1200, train_loss[loss=2.986, ArTop10Accuracy=0.7209, over 12040.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7469, over 11918.07 frames. ], batch size: 101, lr: 6.28e-03 +2024-08-06 06:28:00,542 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 06:29:08,984 INFO [trainer.py:765] (2/8) Epoch 20, batch 100, train_loss[loss=2.887, ArTop10Accuracy=0.7307, over 14720.00 frames. ], tot_loss[loss=2.791, ArTop10Accuracy=0.7544, over 4788.15 frames. ], batch size: 61, lr: 6.10e-03 +2024-08-06 06:29:50,318 INFO [trainer.py:765] (2/8) Epoch 20, batch 200, train_loss[loss=2.798, ArTop10Accuracy=0.7507, over 13550.00 frames. ], tot_loss[loss=2.788, ArTop10Accuracy=0.755, over 7785.68 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 06:30:37,105 INFO [trainer.py:765] (2/8) Epoch 20, batch 300, train_loss[loss=2.85, ArTop10Accuracy=0.746, over 14568.00 frames. ], tot_loss[loss=2.787, ArTop10Accuracy=0.7551, over 9422.94 frames. ], batch size: 44, lr: 6.08e-03 +2024-08-06 06:31:16,354 INFO [trainer.py:765] (2/8) Epoch 20, batch 400, train_loss[loss=2.774, ArTop10Accuracy=0.7555, over 10346.00 frames. ], tot_loss[loss=2.785, ArTop10Accuracy=0.7553, over 10315.08 frames. ], batch size: 14, lr: 6.07e-03 +2024-08-06 06:32:03,758 INFO [trainer.py:765] (2/8) Epoch 20, batch 500, train_loss[loss=2.859, ArTop10Accuracy=0.7423, over 12227.00 frames. ], tot_loss[loss=2.785, ArTop10Accuracy=0.755, over 10888.33 frames. ], batch size: 22, lr: 6.05e-03 +2024-08-06 06:32:43,356 INFO [trainer.py:765] (2/8) Epoch 20, batch 600, train_loss[loss=2.668, ArTop10Accuracy=0.7744, over 11616.00 frames. ], tot_loss[loss=2.793, ArTop10Accuracy=0.7532, over 11413.38 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 06:33:36,751 INFO [trainer.py:765] (2/8) Epoch 20, batch 700, train_loss[loss=2.82, ArTop10Accuracy=0.7457, over 10272.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.751, over 11570.07 frames. ], batch size: 12, lr: 6.03e-03 +2024-08-06 06:33:43,830 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.417e+02 1.526e+02 1.639e+02 3.791e+02, threshold=3.052e+02, percent-clipped=0.1 +2024-08-06 06:34:13,304 INFO [trainer.py:765] (2/8) Epoch 20, batch 800, train_loss[loss=2.598, ArTop10Accuracy=0.7936, over 10115.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7505, over 11678.62 frames. ], batch size: 12, lr: 6.02e-03 +2024-08-06 06:34:44,580 INFO [trainer.py:765] (2/8) Epoch 20, batch 900, train_loss[loss=2.811, ArTop10Accuracy=0.7549, over 13034.00 frames. ], tot_loss[loss=2.803, ArTop10Accuracy=0.7514, over 11749.17 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 06:35:16,139 INFO [trainer.py:765] (2/8) Epoch 20, batch 1000, train_loss[loss=2.783, ArTop10Accuracy=0.7539, over 12951.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7507, over 11955.33 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 06:35:47,214 INFO [trainer.py:765] (2/8) Epoch 20, batch 1100, train_loss[loss=2.809, ArTop10Accuracy=0.7529, over 13720.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.7487, over 12010.35 frames. ], batch size: 34, lr: 5.99e-03 +2024-08-06 06:36:17,439 INFO [trainer.py:765] (2/8) Epoch 20, batch 1200, train_loss[loss=2.914, ArTop10Accuracy=0.7276, over 13237.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7483, over 11953.30 frames. ], batch size: 100, lr: 5.97e-03 +2024-08-06 06:36:42,597 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 06:36:42,600 INFO [trainer.py:1069] (2/8) Done! diff --git a/libritts/log/log-train-2024-08-06-03-39-40-3 b/libritts/log/log-train-2024-08-06-03-39-40-3 new file mode 100644 index 0000000000000000000000000000000000000000..c38eb0ee4363e48d80e0b38dd6c30daca2248312 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-39-40-3 @@ -0,0 +1,336 @@ +2024-08-06 03:39:40,351 INFO [trainer.py:870] (3/8) Training started +2024-08-06 03:39:40,352 INFO [trainer.py:889] (3/8) Device: cuda:3 +2024-08-06 03:39:40,352 INFO [trainer.py:890] (3/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:39:40,352 INFO [trainer.py:892] (3/8) About to create model +2024-08-06 03:39:41,135 INFO [trainer.py:899] (3/8) Number of model parameters: 367386628 +2024-08-06 03:39:41,929 INFO [trainer.py:914] (3/8) Using DDP +2024-08-06 03:39:43,993 INFO [datamodule.py:427] (3/8) About to get train cuts +2024-08-06 03:39:43,995 INFO [datamodule.py:434] (3/8) About to get dev cuts +2024-08-06 03:39:43,997 INFO [datamodule.py:292] (3/8) Disable SpecAugment +2024-08-06 03:39:43,997 INFO [datamodule.py:294] (3/8) About to create train dataset +2024-08-06 03:39:43,997 INFO [datamodule.py:323] (3/8) Using DynamicBucketingSampler +2024-08-06 03:39:44,600 INFO [datamodule.py:344] (3/8) About to create train dataloader +2024-08-06 03:39:44,601 INFO [datamodule.py:367] (3/8) About to create dev dataset +2024-08-06 03:39:44,925 INFO [datamodule.py:388] (3/8) About to create dev dataloader +2024-08-06 03:40:39,571 INFO [trainer.py:765] (3/8) Epoch 1, batch 100, train_loss[loss=4.117, ArTop10Accuracy=0.5062, over 14603.00 frames. ], tot_loss[loss=4.78, ArTop10Accuracy=0.3964, over 4781.19 frames. ], batch size: 61, lr: 2.25e-02 +2024-08-06 03:41:16,923 INFO [trainer.py:765] (3/8) Epoch 1, batch 200, train_loss[loss=3.931, ArTop10Accuracy=0.5322, over 13756.00 frames. ], tot_loss[loss=4.301, ArTop10Accuracy=0.4761, over 7785.24 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 03:41:57,952 INFO [trainer.py:765] (3/8) Epoch 1, batch 300, train_loss[loss=3.707, ArTop10Accuracy=0.5742, over 14094.00 frames. ], tot_loss[loss=4.085, ArTop10Accuracy=0.5112, over 9421.78 frames. ], batch size: 44, lr: 3.00e-02 +2024-08-06 03:42:33,081 INFO [trainer.py:765] (3/8) Epoch 1, batch 400, train_loss[loss=3.665, ArTop10Accuracy=0.5802, over 11425.00 frames. ], tot_loss[loss=3.935, ArTop10Accuracy=0.5362, over 10359.77 frames. ], batch size: 16, lr: 3.00e-02 +2024-08-06 03:43:11,270 INFO [trainer.py:765] (3/8) Epoch 1, batch 500, train_loss[loss=3.688, ArTop10Accuracy=0.5737, over 12449.00 frames. ], tot_loss[loss=3.824, ArTop10Accuracy=0.5544, over 10935.99 frames. ], batch size: 22, lr: 2.99e-02 +2024-08-06 03:43:46,592 INFO [trainer.py:765] (3/8) Epoch 1, batch 600, train_loss[loss=3.448, ArTop10Accuracy=0.6227, over 11667.00 frames. ], tot_loss[loss=3.748, ArTop10Accuracy=0.5672, over 11463.43 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 03:44:27,899 INFO [trainer.py:765] (3/8) Epoch 1, batch 700, train_loss[loss=3.317, ArTop10Accuracy=0.6414, over 10103.00 frames. ], tot_loss[loss=3.684, ArTop10Accuracy=0.5787, over 11608.91 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 03:45:01,514 INFO [trainer.py:765] (3/8) Epoch 1, batch 800, train_loss[loss=3.523, ArTop10Accuracy=0.6122, over 10016.00 frames. ], tot_loss[loss=3.635, ArTop10Accuracy=0.5878, over 11702.15 frames. ], batch size: 12, lr: 2.98e-02 +2024-08-06 03:45:32,557 INFO [trainer.py:765] (3/8) Epoch 1, batch 900, train_loss[loss=3.604, ArTop10Accuracy=0.5968, over 12774.00 frames. ], tot_loss[loss=3.591, ArTop10Accuracy=0.5955, over 11758.18 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 03:46:03,648 INFO [trainer.py:765] (3/8) Epoch 1, batch 1000, train_loss[loss=3.45, ArTop10Accuracy=0.6238, over 12891.00 frames. ], tot_loss[loss=3.558, ArTop10Accuracy=0.6017, over 11949.02 frames. ], batch size: 27, lr: 2.97e-02 +2024-08-06 03:46:07,988 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 8.169e+01 1.565e+02 2.239e+02 3.485e+02 9.105e+03, threshold=4.478e+02, percent-clipped=0.0 +2024-08-06 03:46:38,612 INFO [trainer.py:765] (3/8) Epoch 1, batch 1100, train_loss[loss=3.4, ArTop10Accuracy=0.6345, over 13647.00 frames. ], tot_loss[loss=3.529, ArTop10Accuracy=0.6075, over 11997.67 frames. ], batch size: 34, lr: 2.96e-02 +2024-08-06 03:47:08,744 INFO [trainer.py:765] (3/8) Epoch 1, batch 1200, train_loss[loss=3.464, ArTop10Accuracy=0.6213, over 12395.00 frames. ], tot_loss[loss=3.505, ArTop10Accuracy=0.6118, over 11950.16 frames. ], batch size: 98, lr: 2.96e-02 +2024-08-06 03:47:33,886 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 03:48:38,677 INFO [trainer.py:765] (3/8) Epoch 2, batch 100, train_loss[loss=3.395, ArTop10Accuracy=0.6306, over 14871.00 frames. ], tot_loss[loss=3.444, ArTop10Accuracy=0.6242, over 4775.70 frames. ], batch size: 62, lr: 2.90e-02 +2024-08-06 03:49:14,597 INFO [trainer.py:765] (3/8) Epoch 2, batch 200, train_loss[loss=3.51, ArTop10Accuracy=0.6083, over 13599.00 frames. ], tot_loss[loss=3.437, ArTop10Accuracy=0.6255, over 7786.20 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 03:49:56,520 INFO [trainer.py:765] (3/8) Epoch 2, batch 300, train_loss[loss=3.402, ArTop10Accuracy=0.632, over 14250.00 frames. ], tot_loss[loss=3.423, ArTop10Accuracy=0.6278, over 9407.39 frames. ], batch size: 44, lr: 2.89e-02 +2024-08-06 03:50:32,000 INFO [trainer.py:765] (3/8) Epoch 2, batch 400, train_loss[loss=3.288, ArTop10Accuracy=0.6533, over 10338.00 frames. ], tot_loss[loss=3.412, ArTop10Accuracy=0.6301, over 10318.90 frames. ], batch size: 14, lr: 2.88e-02 +2024-08-06 03:51:17,110 INFO [trainer.py:765] (3/8) Epoch 2, batch 500, train_loss[loss=3.311, ArTop10Accuracy=0.6521, over 12407.00 frames. ], tot_loss[loss=3.401, ArTop10Accuracy=0.6318, over 10872.65 frames. ], batch size: 22, lr: 2.87e-02 +2024-08-06 03:51:53,203 INFO [trainer.py:765] (3/8) Epoch 2, batch 600, train_loss[loss=3.302, ArTop10Accuracy=0.6409, over 11361.00 frames. ], tot_loss[loss=3.396, ArTop10Accuracy=0.6329, over 11397.19 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 03:52:38,994 INFO [trainer.py:765] (3/8) Epoch 2, batch 700, train_loss[loss=3.244, ArTop10Accuracy=0.6669, over 10124.00 frames. ], tot_loss[loss=3.389, ArTop10Accuracy=0.6344, over 11553.92 frames. ], batch size: 12, lr: 2.85e-02 +2024-08-06 03:52:47,091 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 03:52:56,023 INFO [trainer.py:811] (3/8) Epoch 2, validation: loss=3.327, ArTop10Accuracy=0.6492, over 1829298.00 frames. +2024-08-06 03:52:56,024 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29102MB +2024-08-06 03:52:56,542 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 8.181e+01 1.431e+02 1.849e+02 2.730e+02 2.344e+03, threshold=3.697e+02, percent-clipped=7.2 +2024-08-06 03:53:21,882 INFO [trainer.py:765] (3/8) Epoch 2, batch 800, train_loss[loss=3.329, ArTop10Accuracy=0.6433, over 10016.00 frames. ], tot_loss[loss=3.385, ArTop10Accuracy=0.6351, over 11677.95 frames. ], batch size: 12, lr: 2.84e-02 +2024-08-06 03:53:53,299 INFO [trainer.py:765] (3/8) Epoch 2, batch 900, train_loss[loss=3.378, ArTop10Accuracy=0.635, over 12946.00 frames. ], tot_loss[loss=3.373, ArTop10Accuracy=0.6376, over 11718.66 frames. ], batch size: 27, lr: 2.83e-02 +2024-08-06 03:54:24,809 INFO [trainer.py:765] (3/8) Epoch 2, batch 1000, train_loss[loss=3.299, ArTop10Accuracy=0.6502, over 13069.00 frames. ], tot_loss[loss=3.366, ArTop10Accuracy=0.6388, over 11934.56 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 03:54:56,007 INFO [trainer.py:765] (3/8) Epoch 2, batch 1100, train_loss[loss=3.333, ArTop10Accuracy=0.6416, over 13596.00 frames. ], tot_loss[loss=3.363, ArTop10Accuracy=0.6397, over 11988.81 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 03:55:26,228 INFO [trainer.py:765] (3/8) Epoch 2, batch 1200, train_loss[loss=3.439, ArTop10Accuracy=0.6277, over 12294.00 frames. ], tot_loss[loss=3.353, ArTop10Accuracy=0.6416, over 11932.91 frames. ], batch size: 97, lr: 2.80e-02 +2024-08-06 03:55:51,107 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 03:57:04,102 INFO [trainer.py:765] (3/8) Epoch 3, batch 100, train_loss[loss=3.394, ArTop10Accuracy=0.6345, over 14671.00 frames. ], tot_loss[loss=3.319, ArTop10Accuracy=0.6495, over 4772.81 frames. ], batch size: 61, lr: 2.67e-02 +2024-08-06 03:57:50,980 INFO [trainer.py:765] (3/8) Epoch 3, batch 200, train_loss[loss=3.354, ArTop10Accuracy=0.6448, over 13549.00 frames. ], tot_loss[loss=3.291, ArTop10Accuracy=0.6545, over 7781.13 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 03:58:26,074 INFO [trainer.py:765] (3/8) Epoch 3, batch 300, train_loss[loss=3.155, ArTop10Accuracy=0.6776, over 14361.00 frames. ], tot_loss[loss=3.28, ArTop10Accuracy=0.6568, over 9430.23 frames. ], batch size: 44, lr: 2.64e-02 +2024-08-06 03:59:11,254 INFO [trainer.py:765] (3/8) Epoch 3, batch 400, train_loss[loss=3.139, ArTop10Accuracy=0.6806, over 10285.00 frames. ], tot_loss[loss=3.261, ArTop10Accuracy=0.6599, over 10348.15 frames. ], batch size: 14, lr: 2.63e-02 +2024-08-06 03:59:29,675 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 8.720e+01 1.461e+02 1.775e+02 2.344e+02 9.150e+02, threshold=3.550e+02, percent-clipped=5.2 +2024-08-06 03:59:49,303 INFO [trainer.py:765] (3/8) Epoch 3, batch 500, train_loss[loss=3.219, ArTop10Accuracy=0.6659, over 12307.00 frames. ], tot_loss[loss=3.248, ArTop10Accuracy=0.6625, over 10894.10 frames. ], batch size: 22, lr: 2.62e-02 +2024-08-06 04:00:35,095 INFO [trainer.py:765] (3/8) Epoch 3, batch 600, train_loss[loss=3.088, ArTop10Accuracy=0.6917, over 11554.00 frames. ], tot_loss[loss=3.239, ArTop10Accuracy=0.6638, over 11407.55 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 04:01:22,058 INFO [trainer.py:765] (3/8) Epoch 3, batch 700, train_loss[loss=3.011, ArTop10Accuracy=0.712, over 10154.00 frames. ], tot_loss[loss=3.231, ArTop10Accuracy=0.6651, over 11541.26 frames. ], batch size: 12, lr: 2.60e-02 +2024-08-06 04:01:56,269 INFO [trainer.py:765] (3/8) Epoch 3, batch 800, train_loss[loss=3.31, ArTop10Accuracy=0.6535, over 9953.00 frames. ], tot_loss[loss=3.223, ArTop10Accuracy=0.6668, over 11683.25 frames. ], batch size: 12, lr: 2.59e-02 +2024-08-06 04:02:27,741 INFO [trainer.py:765] (3/8) Epoch 3, batch 900, train_loss[loss=3.056, ArTop10Accuracy=0.6991, over 13210.00 frames. ], tot_loss[loss=3.202, ArTop10Accuracy=0.6712, over 11723.85 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 04:02:59,285 INFO [trainer.py:765] (3/8) Epoch 3, batch 1000, train_loss[loss=3.225, ArTop10Accuracy=0.6681, over 12919.00 frames. ], tot_loss[loss=3.197, ArTop10Accuracy=0.6725, over 11926.63 frames. ], batch size: 27, lr: 2.56e-02 +2024-08-06 04:03:30,942 INFO [trainer.py:765] (3/8) Epoch 3, batch 1100, train_loss[loss=3.108, ArTop10Accuracy=0.6862, over 13416.00 frames. ], tot_loss[loss=3.185, ArTop10Accuracy=0.6745, over 11991.79 frames. ], batch size: 33, lr: 2.55e-02 +2024-08-06 04:04:01,312 INFO [trainer.py:765] (3/8) Epoch 3, batch 1200, train_loss[loss=3.245, ArTop10Accuracy=0.6651, over 13034.00 frames. ], tot_loss[loss=3.182, ArTop10Accuracy=0.6755, over 11923.96 frames. ], batch size: 97, lr: 2.54e-02 +2024-08-06 04:04:26,890 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 04:05:43,368 INFO [trainer.py:765] (3/8) Epoch 4, batch 100, train_loss[loss=3.192, ArTop10Accuracy=0.6686, over 14625.00 frames. ], tot_loss[loss=3.133, ArTop10Accuracy=0.6861, over 4780.75 frames. ], batch size: 61, lr: 2.38e-02 +2024-08-06 04:06:07,077 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 04:06:16,404 INFO [trainer.py:811] (3/8) Epoch 4, validation: loss=3.063, ArTop10Accuracy=0.7031, over 1829298.00 frames. +2024-08-06 04:06:16,405 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29102MB +2024-08-06 04:06:16,746 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.493e+02 1.709e+02 2.068e+02 7.969e+02, threshold=3.418e+02, percent-clipped=2.9 +2024-08-06 04:06:31,826 INFO [trainer.py:765] (3/8) Epoch 4, batch 200, train_loss[loss=3.115, ArTop10Accuracy=0.6888, over 13871.00 frames. ], tot_loss[loss=3.125, ArTop10Accuracy=0.6873, over 7774.58 frames. ], batch size: 34, lr: 2.37e-02 +2024-08-06 04:07:18,545 INFO [trainer.py:765] (3/8) Epoch 4, batch 300, train_loss[loss=3.145, ArTop10Accuracy=0.6877, over 14535.00 frames. ], tot_loss[loss=3.11, ArTop10Accuracy=0.69, over 9414.29 frames. ], batch size: 45, lr: 2.36e-02 +2024-08-06 04:08:01,911 INFO [trainer.py:765] (3/8) Epoch 4, batch 400, train_loss[loss=2.906, ArTop10Accuracy=0.7286, over 10254.00 frames. ], tot_loss[loss=3.112, ArTop10Accuracy=0.6895, over 10316.87 frames. ], batch size: 14, lr: 2.34e-02 +2024-08-06 04:08:45,345 INFO [trainer.py:765] (3/8) Epoch 4, batch 500, train_loss[loss=3.018, ArTop10Accuracy=0.7041, over 12270.00 frames. ], tot_loss[loss=3.109, ArTop10Accuracy=0.6899, over 10894.30 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 04:09:37,072 INFO [trainer.py:765] (3/8) Epoch 4, batch 600, train_loss[loss=3.075, ArTop10Accuracy=0.6964, over 11444.00 frames. ], tot_loss[loss=3.112, ArTop10Accuracy=0.6892, over 11425.80 frames. ], batch size: 18, lr: 2.32e-02 +2024-08-06 04:10:13,502 INFO [trainer.py:765] (3/8) Epoch 4, batch 700, train_loss[loss=3.172, ArTop10Accuracy=0.677, over 10201.00 frames. ], tot_loss[loss=3.116, ArTop10Accuracy=0.6883, over 11575.73 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 04:10:51,960 INFO [trainer.py:765] (3/8) Epoch 4, batch 800, train_loss[loss=3.028, ArTop10Accuracy=0.7033, over 9970.00 frames. ], tot_loss[loss=3.118, ArTop10Accuracy=0.6882, over 11685.41 frames. ], batch size: 12, lr: 2.30e-02 +2024-08-06 04:11:23,331 INFO [trainer.py:765] (3/8) Epoch 4, batch 900, train_loss[loss=3.04, ArTop10Accuracy=0.6989, over 13042.00 frames. ], tot_loss[loss=3.108, ArTop10Accuracy=0.6899, over 11745.06 frames. ], batch size: 27, lr: 2.29e-02 +2024-08-06 04:11:54,826 INFO [trainer.py:765] (3/8) Epoch 4, batch 1000, train_loss[loss=3.113, ArTop10Accuracy=0.689, over 12874.00 frames. ], tot_loss[loss=3.109, ArTop10Accuracy=0.6901, over 11957.34 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 04:12:25,960 INFO [trainer.py:765] (3/8) Epoch 4, batch 1100, train_loss[loss=3.1, ArTop10Accuracy=0.6923, over 13520.00 frames. ], tot_loss[loss=3.111, ArTop10Accuracy=0.6896, over 12003.54 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 04:12:48,545 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.106e+02 1.440e+02 1.608e+02 1.893e+02 7.925e+02, threshold=3.216e+02, percent-clipped=2.0 +2024-08-06 04:12:58,828 INFO [trainer.py:765] (3/8) Epoch 4, batch 1200, train_loss[loss=3.197, ArTop10Accuracy=0.6713, over 12150.00 frames. ], tot_loss[loss=3.108, ArTop10Accuracy=0.6898, over 11931.86 frames. ], batch size: 97, lr: 2.25e-02 +2024-08-06 04:13:24,264 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 04:14:38,685 INFO [trainer.py:765] (3/8) Epoch 5, batch 100, train_loss[loss=3.076, ArTop10Accuracy=0.7003, over 14590.00 frames. ], tot_loss[loss=3.073, ArTop10Accuracy=0.6981, over 4780.89 frames. ], batch size: 61, lr: 2.10e-02 +2024-08-06 04:15:26,826 INFO [trainer.py:765] (3/8) Epoch 5, batch 200, train_loss[loss=3.105, ArTop10Accuracy=0.6934, over 13687.00 frames. ], tot_loss[loss=3.064, ArTop10Accuracy=0.6996, over 7787.69 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 04:16:08,011 INFO [trainer.py:765] (3/8) Epoch 5, batch 300, train_loss[loss=3.108, ArTop10Accuracy=0.6873, over 14152.00 frames. ], tot_loss[loss=3.051, ArTop10Accuracy=0.7021, over 9416.94 frames. ], batch size: 44, lr: 2.08e-02 +2024-08-06 04:16:53,134 INFO [trainer.py:765] (3/8) Epoch 5, batch 400, train_loss[loss=3.045, ArTop10Accuracy=0.702, over 10392.00 frames. ], tot_loss[loss=3.052, ArTop10Accuracy=0.7016, over 10331.86 frames. ], batch size: 14, lr: 2.07e-02 +2024-08-06 04:17:36,638 INFO [trainer.py:765] (3/8) Epoch 5, batch 500, train_loss[loss=3.079, ArTop10Accuracy=0.7032, over 12258.00 frames. ], tot_loss[loss=3.053, ArTop10Accuracy=0.7015, over 10900.58 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 04:18:22,114 INFO [trainer.py:765] (3/8) Epoch 5, batch 600, train_loss[loss=2.986, ArTop10Accuracy=0.7162, over 11917.00 frames. ], tot_loss[loss=3.057, ArTop10Accuracy=0.7007, over 11418.43 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 04:19:17,033 INFO [trainer.py:765] (3/8) Epoch 5, batch 700, train_loss[loss=3.021, ArTop10Accuracy=0.7032, over 10015.00 frames. ], tot_loss[loss=3.063, ArTop10Accuracy=0.6994, over 11557.04 frames. ], batch size: 12, lr: 2.04e-02 +2024-08-06 04:19:51,066 INFO [trainer.py:765] (3/8) Epoch 5, batch 800, train_loss[loss=3.075, ArTop10Accuracy=0.7002, over 10101.00 frames. ], tot_loss[loss=3.059, ArTop10Accuracy=0.7, over 11657.55 frames. ], batch size: 12, lr: 2.03e-02 +2024-08-06 04:20:18,214 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 04:20:27,476 INFO [trainer.py:811] (3/8) Epoch 5, validation: loss=2.998, ArTop10Accuracy=0.7157, over 1829298.00 frames. +2024-08-06 04:20:27,476 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 29103MB +2024-08-06 04:20:27,781 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.057e+02 1.385e+02 1.542e+02 1.759e+02 7.741e+02, threshold=3.083e+02, percent-clipped=0.7 +2024-08-06 04:20:31,767 INFO [trainer.py:765] (3/8) Epoch 5, batch 900, train_loss[loss=3.144, ArTop10Accuracy=0.6824, over 12758.00 frames. ], tot_loss[loss=3.053, ArTop10Accuracy=0.7012, over 11710.26 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 04:21:03,307 INFO [trainer.py:765] (3/8) Epoch 5, batch 1000, train_loss[loss=3.006, ArTop10Accuracy=0.7101, over 13050.00 frames. ], tot_loss[loss=3.054, ArTop10Accuracy=0.7009, over 11920.51 frames. ], batch size: 27, lr: 2.01e-02 +2024-08-06 04:21:34,452 INFO [trainer.py:765] (3/8) Epoch 5, batch 1100, train_loss[loss=2.968, ArTop10Accuracy=0.7161, over 13843.00 frames. ], tot_loss[loss=3.06, ArTop10Accuracy=0.6999, over 11990.06 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 04:22:04,752 INFO [trainer.py:765] (3/8) Epoch 5, batch 1200, train_loss[loss=3.215, ArTop10Accuracy=0.669, over 11392.00 frames. ], tot_loss[loss=3.064, ArTop10Accuracy=0.6994, over 11951.58 frames. ], batch size: 99, lr: 1.99e-02 +2024-08-06 04:22:29,987 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 04:23:46,282 INFO [trainer.py:765] (3/8) Epoch 6, batch 100, train_loss[loss=3.134, ArTop10Accuracy=0.6869, over 14513.00 frames. ], tot_loss[loss=3.02, ArTop10Accuracy=0.7085, over 4789.93 frames. ], batch size: 61, lr: 1.85e-02 +2024-08-06 04:24:35,255 INFO [trainer.py:765] (3/8) Epoch 6, batch 200, train_loss[loss=3.044, ArTop10Accuracy=0.7055, over 13840.00 frames. ], tot_loss[loss=3.016, ArTop10Accuracy=0.709, over 7807.33 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 04:25:16,677 INFO [trainer.py:765] (3/8) Epoch 6, batch 300, train_loss[loss=2.981, ArTop10Accuracy=0.7146, over 14359.00 frames. ], tot_loss[loss=3.012, ArTop10Accuracy=0.7098, over 9434.11 frames. ], batch size: 44, lr: 1.83e-02 +2024-08-06 04:26:08,924 INFO [trainer.py:765] (3/8) Epoch 6, batch 400, train_loss[loss=2.998, ArTop10Accuracy=0.7114, over 10950.00 frames. ], tot_loss[loss=3.014, ArTop10Accuracy=0.7092, over 10353.19 frames. ], batch size: 15, lr: 1.83e-02 +2024-08-06 04:26:51,485 INFO [trainer.py:765] (3/8) Epoch 6, batch 500, train_loss[loss=2.865, ArTop10Accuracy=0.7214, over 12318.00 frames. ], tot_loss[loss=3.009, ArTop10Accuracy=0.7095, over 10916.04 frames. ], batch size: 22, lr: 1.82e-02 +2024-08-06 04:27:39,297 INFO [trainer.py:765] (3/8) Epoch 6, batch 600, train_loss[loss=3.039, ArTop10Accuracy=0.7088, over 11495.00 frames. ], tot_loss[loss=3.015, ArTop10Accuracy=0.7081, over 11432.62 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 04:27:46,369 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.343e+02 1.474e+02 1.660e+02 8.574e+02, threshold=2.947e+02, percent-clipped=0.6 +2024-08-06 04:28:33,239 INFO [trainer.py:765] (3/8) Epoch 6, batch 700, train_loss[loss=2.872, ArTop10Accuracy=0.7345, over 9409.00 frames. ], tot_loss[loss=3.023, ArTop10Accuracy=0.7068, over 11580.74 frames. ], batch size: 11, lr: 1.80e-02 +2024-08-06 04:29:11,216 INFO [trainer.py:765] (3/8) Epoch 6, batch 800, train_loss[loss=2.957, ArTop10Accuracy=0.7191, over 10045.00 frames. ], tot_loss[loss=3.023, ArTop10Accuracy=0.7067, over 11695.78 frames. ], batch size: 12, lr: 1.79e-02 +2024-08-06 04:29:42,750 INFO [trainer.py:765] (3/8) Epoch 6, batch 900, train_loss[loss=3.082, ArTop10Accuracy=0.7008, over 12930.00 frames. ], tot_loss[loss=3.016, ArTop10Accuracy=0.7082, over 11734.18 frames. ], batch size: 27, lr: 1.78e-02 +2024-08-06 04:30:14,309 INFO [trainer.py:765] (3/8) Epoch 6, batch 1000, train_loss[loss=3.073, ArTop10Accuracy=0.7002, over 12994.00 frames. ], tot_loss[loss=3.021, ArTop10Accuracy=0.7075, over 11944.70 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 04:30:45,383 INFO [trainer.py:765] (3/8) Epoch 6, batch 1100, train_loss[loss=3.012, ArTop10Accuracy=0.7152, over 13668.00 frames. ], tot_loss[loss=3.028, ArTop10Accuracy=0.7063, over 11983.05 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 04:31:15,673 INFO [trainer.py:765] (3/8) Epoch 6, batch 1200, train_loss[loss=3.158, ArTop10Accuracy=0.6804, over 12112.00 frames. ], tot_loss[loss=3.025, ArTop10Accuracy=0.7066, over 11929.60 frames. ], batch size: 99, lr: 1.76e-02 +2024-08-06 04:31:40,712 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 04:32:52,405 INFO [trainer.py:765] (3/8) Epoch 7, batch 100, train_loss[loss=3.11, ArTop10Accuracy=0.6943, over 14759.00 frames. ], tot_loss[loss=2.988, ArTop10Accuracy=0.7143, over 4763.95 frames. ], batch size: 61, lr: 1.64e-02 +2024-08-06 04:33:38,224 INFO [trainer.py:765] (3/8) Epoch 7, batch 200, train_loss[loss=2.918, ArTop10Accuracy=0.7272, over 13834.00 frames. ], tot_loss[loss=2.987, ArTop10Accuracy=0.7149, over 7783.14 frames. ], batch size: 34, lr: 1.64e-02 +2024-08-06 04:34:22,609 INFO [trainer.py:765] (3/8) Epoch 7, batch 300, train_loss[loss=2.993, ArTop10Accuracy=0.7134, over 14447.00 frames. ], tot_loss[loss=2.983, ArTop10Accuracy=0.7155, over 9417.10 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 04:34:36,847 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 04:34:45,808 INFO [trainer.py:811] (3/8) Epoch 7, validation: loss=2.963, ArTop10Accuracy=0.7233, over 1829298.00 frames. +2024-08-06 04:34:45,809 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30085MB +2024-08-06 04:34:46,125 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.306e+02 1.435e+02 1.599e+02 8.689e+02, threshold=2.871e+02, percent-clipped=0.9 +2024-08-06 04:35:17,147 INFO [trainer.py:765] (3/8) Epoch 7, batch 400, train_loss[loss=2.748, ArTop10Accuracy=0.7597, over 11049.00 frames. ], tot_loss[loss=2.981, ArTop10Accuracy=0.716, over 10326.75 frames. ], batch size: 15, lr: 1.62e-02 +2024-08-06 04:36:01,711 INFO [trainer.py:765] (3/8) Epoch 7, batch 500, train_loss[loss=2.9, ArTop10Accuracy=0.7251, over 12184.00 frames. ], tot_loss[loss=2.981, ArTop10Accuracy=0.7157, over 10894.49 frames. ], batch size: 22, lr: 1.61e-02 +2024-08-06 04:36:48,811 INFO [trainer.py:765] (3/8) Epoch 7, batch 600, train_loss[loss=2.959, ArTop10Accuracy=0.7207, over 11711.00 frames. ], tot_loss[loss=2.986, ArTop10Accuracy=0.7145, over 11431.27 frames. ], batch size: 18, lr: 1.61e-02 +2024-08-06 04:37:34,800 INFO [trainer.py:765] (3/8) Epoch 7, batch 700, train_loss[loss=2.757, ArTop10Accuracy=0.7526, over 10147.00 frames. ], tot_loss[loss=2.991, ArTop10Accuracy=0.7138, over 11568.19 frames. ], batch size: 12, lr: 1.60e-02 +2024-08-06 04:38:13,614 INFO [trainer.py:765] (3/8) Epoch 7, batch 800, train_loss[loss=2.937, ArTop10Accuracy=0.7287, over 10128.00 frames. ], tot_loss[loss=2.998, ArTop10Accuracy=0.7124, over 11693.65 frames. ], batch size: 12, lr: 1.59e-02 +2024-08-06 04:38:45,110 INFO [trainer.py:765] (3/8) Epoch 7, batch 900, train_loss[loss=2.95, ArTop10Accuracy=0.7118, over 12937.00 frames. ], tot_loss[loss=2.987, ArTop10Accuracy=0.7144, over 11744.62 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 04:39:16,575 INFO [trainer.py:765] (3/8) Epoch 7, batch 1000, train_loss[loss=2.964, ArTop10Accuracy=0.7254, over 12895.00 frames. ], tot_loss[loss=2.987, ArTop10Accuracy=0.7138, over 11956.71 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 04:39:47,571 INFO [trainer.py:765] (3/8) Epoch 7, batch 1100, train_loss[loss=3.093, ArTop10Accuracy=0.6937, over 13650.00 frames. ], tot_loss[loss=2.999, ArTop10Accuracy=0.7118, over 12011.48 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 04:40:17,989 INFO [trainer.py:765] (3/8) Epoch 7, batch 1200, train_loss[loss=3.156, ArTop10Accuracy=0.6803, over 12005.00 frames. ], tot_loss[loss=3, ArTop10Accuracy=0.7115, over 11969.64 frames. ], batch size: 98, lr: 1.57e-02 +2024-08-06 04:40:43,400 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 04:41:37,492 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 9.816e+01 1.295e+02 1.411e+02 1.574e+02 4.953e+02, threshold=2.821e+02, percent-clipped=1.1 +2024-08-06 04:41:58,372 INFO [trainer.py:765] (3/8) Epoch 8, batch 100, train_loss[loss=2.982, ArTop10Accuracy=0.7194, over 14751.00 frames. ], tot_loss[loss=2.975, ArTop10Accuracy=0.7173, over 4764.96 frames. ], batch size: 62, lr: 1.47e-02 +2024-08-06 04:42:44,986 INFO [trainer.py:765] (3/8) Epoch 8, batch 200, train_loss[loss=2.902, ArTop10Accuracy=0.7297, over 13499.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7192, over 7773.45 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 04:43:28,045 INFO [trainer.py:765] (3/8) Epoch 8, batch 300, train_loss[loss=3.009, ArTop10Accuracy=0.7049, over 14179.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7201, over 9417.80 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 04:44:14,462 INFO [trainer.py:765] (3/8) Epoch 8, batch 400, train_loss[loss=2.755, ArTop10Accuracy=0.7546, over 10988.00 frames. ], tot_loss[loss=2.964, ArTop10Accuracy=0.7196, over 10319.75 frames. ], batch size: 15, lr: 1.45e-02 +2024-08-06 04:45:00,692 INFO [trainer.py:765] (3/8) Epoch 8, batch 500, train_loss[loss=2.881, ArTop10Accuracy=0.7376, over 12394.00 frames. ], tot_loss[loss=2.965, ArTop10Accuracy=0.7193, over 10884.71 frames. ], batch size: 22, lr: 1.45e-02 +2024-08-06 04:45:45,393 INFO [trainer.py:765] (3/8) Epoch 8, batch 600, train_loss[loss=2.813, ArTop10Accuracy=0.7425, over 11682.00 frames. ], tot_loss[loss=2.959, ArTop10Accuracy=0.7198, over 11409.66 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 04:46:34,038 INFO [trainer.py:765] (3/8) Epoch 8, batch 700, train_loss[loss=2.803, ArTop10Accuracy=0.7469, over 10222.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7185, over 11555.46 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 04:47:10,207 INFO [trainer.py:765] (3/8) Epoch 8, batch 800, train_loss[loss=3.033, ArTop10Accuracy=0.7028, over 10109.00 frames. ], tot_loss[loss=2.974, ArTop10Accuracy=0.7168, over 11674.50 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 04:47:41,606 INFO [trainer.py:765] (3/8) Epoch 8, batch 900, train_loss[loss=3.042, ArTop10Accuracy=0.7076, over 12873.00 frames. ], tot_loss[loss=2.968, ArTop10Accuracy=0.7181, over 11740.72 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:13,033 INFO [trainer.py:765] (3/8) Epoch 8, batch 1000, train_loss[loss=3.016, ArTop10Accuracy=0.7062, over 13180.00 frames. ], tot_loss[loss=2.976, ArTop10Accuracy=0.7167, over 11941.11 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:28,827 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 04:48:37,663 INFO [trainer.py:811] (3/8) Epoch 8, validation: loss=2.946, ArTop10Accuracy=0.7266, over 1829298.00 frames. +2024-08-06 04:48:37,664 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33253MB +2024-08-06 04:48:37,952 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.289e+02 1.393e+02 1.532e+02 3.557e+02, threshold=2.786e+02, percent-clipped=0.2 +2024-08-06 04:48:52,932 INFO [trainer.py:765] (3/8) Epoch 8, batch 1100, train_loss[loss=2.929, ArTop10Accuracy=0.724, over 13858.00 frames. ], tot_loss[loss=2.98, ArTop10Accuracy=0.7162, over 11992.15 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 04:49:23,202 INFO [trainer.py:765] (3/8) Epoch 8, batch 1200, train_loss[loss=3.093, ArTop10Accuracy=0.6968, over 12704.00 frames. ], tot_loss[loss=2.982, ArTop10Accuracy=0.7154, over 11928.99 frames. ], batch size: 97, lr: 1.40e-02 +2024-08-06 04:49:49,185 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 04:51:01,547 INFO [trainer.py:765] (3/8) Epoch 9, batch 100, train_loss[loss=2.996, ArTop10Accuracy=0.7164, over 14378.00 frames. ], tot_loss[loss=2.94, ArTop10Accuracy=0.7243, over 4788.87 frames. ], batch size: 61, lr: 1.32e-02 +2024-08-06 04:51:45,414 INFO [trainer.py:765] (3/8) Epoch 9, batch 200, train_loss[loss=2.86, ArTop10Accuracy=0.7419, over 13414.00 frames. ], tot_loss[loss=2.939, ArTop10Accuracy=0.7248, over 7783.26 frames. ], batch size: 33, lr: 1.32e-02 +2024-08-06 04:52:29,082 INFO [trainer.py:765] (3/8) Epoch 9, batch 300, train_loss[loss=2.961, ArTop10Accuracy=0.7236, over 14211.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.7254, over 9404.33 frames. ], batch size: 44, lr: 1.31e-02 +2024-08-06 04:53:16,431 INFO [trainer.py:765] (3/8) Epoch 9, batch 400, train_loss[loss=2.781, ArTop10Accuracy=0.7493, over 10467.00 frames. ], tot_loss[loss=2.939, ArTop10Accuracy=0.7245, over 10320.64 frames. ], batch size: 14, lr: 1.31e-02 +2024-08-06 04:53:58,143 INFO [trainer.py:765] (3/8) Epoch 9, batch 500, train_loss[loss=2.909, ArTop10Accuracy=0.731, over 12267.00 frames. ], tot_loss[loss=2.939, ArTop10Accuracy=0.7246, over 10887.58 frames. ], batch size: 22, lr: 1.30e-02 +2024-08-06 04:54:51,077 INFO [trainer.py:765] (3/8) Epoch 9, batch 600, train_loss[loss=2.976, ArTop10Accuracy=0.7208, over 11579.00 frames. ], tot_loss[loss=2.942, ArTop10Accuracy=0.7235, over 11417.08 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 04:55:34,399 INFO [trainer.py:765] (3/8) Epoch 9, batch 700, train_loss[loss=2.871, ArTop10Accuracy=0.7364, over 10355.00 frames. ], tot_loss[loss=2.946, ArTop10Accuracy=0.7226, over 11568.10 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 04:56:04,574 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.257e+02 1.367e+02 1.507e+02 8.820e+02, threshold=2.735e+02, percent-clipped=0.5 +2024-08-06 04:56:13,597 INFO [trainer.py:765] (3/8) Epoch 9, batch 800, train_loss[loss=2.893, ArTop10Accuracy=0.739, over 10168.00 frames. ], tot_loss[loss=2.947, ArTop10Accuracy=0.7224, over 11689.06 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 04:56:44,975 INFO [trainer.py:765] (3/8) Epoch 9, batch 900, train_loss[loss=2.95, ArTop10Accuracy=0.7181, over 13147.00 frames. ], tot_loss[loss=2.943, ArTop10Accuracy=0.7232, over 11730.66 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:16,491 INFO [trainer.py:765] (3/8) Epoch 9, batch 1000, train_loss[loss=2.925, ArTop10Accuracy=0.7328, over 13048.00 frames. ], tot_loss[loss=2.95, ArTop10Accuracy=0.7219, over 11923.21 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:47,657 INFO [trainer.py:765] (3/8) Epoch 9, batch 1100, train_loss[loss=3.045, ArTop10Accuracy=0.7019, over 13823.00 frames. ], tot_loss[loss=2.957, ArTop10Accuracy=0.7207, over 11994.80 frames. ], batch size: 34, lr: 1.27e-02 +2024-08-06 04:58:18,094 INFO [trainer.py:765] (3/8) Epoch 9, batch 1200, train_loss[loss=3.133, ArTop10Accuracy=0.6838, over 11762.00 frames. ], tot_loss[loss=2.958, ArTop10Accuracy=0.7201, over 11950.46 frames. ], batch size: 97, lr: 1.27e-02 +2024-08-06 04:58:43,862 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 04:59:52,749 INFO [trainer.py:765] (3/8) Epoch 10, batch 100, train_loss[loss=3.068, ArTop10Accuracy=0.7035, over 14397.00 frames. ], tot_loss[loss=2.92, ArTop10Accuracy=0.7286, over 4773.20 frames. ], batch size: 61, lr: 1.20e-02 +2024-08-06 05:00:43,730 INFO [trainer.py:765] (3/8) Epoch 10, batch 200, train_loss[loss=2.908, ArTop10Accuracy=0.7333, over 13703.00 frames. ], tot_loss[loss=2.91, ArTop10Accuracy=0.7306, over 7777.35 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 05:01:20,591 INFO [trainer.py:765] (3/8) Epoch 10, batch 300, train_loss[loss=2.942, ArTop10Accuracy=0.7222, over 14712.00 frames. ], tot_loss[loss=2.916, ArTop10Accuracy=0.7293, over 9414.49 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 05:02:10,048 INFO [trainer.py:765] (3/8) Epoch 10, batch 400, train_loss[loss=2.891, ArTop10Accuracy=0.7271, over 11065.00 frames. ], tot_loss[loss=2.915, ArTop10Accuracy=0.7291, over 10336.05 frames. ], batch size: 15, lr: 1.19e-02 +2024-08-06 05:02:46,488 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 05:02:55,377 INFO [trainer.py:811] (3/8) Epoch 10, validation: loss=2.927, ArTop10Accuracy=0.7304, over 1829298.00 frames. +2024-08-06 05:02:55,378 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33253MB +2024-08-06 05:02:55,728 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.269e+02 1.367e+02 1.518e+02 4.405e+02, threshold=2.733e+02, percent-clipped=0.4 +2024-08-06 05:02:58,361 INFO [trainer.py:765] (3/8) Epoch 10, batch 500, train_loss[loss=2.782, ArTop10Accuracy=0.7507, over 12150.00 frames. ], tot_loss[loss=2.919, ArTop10Accuracy=0.7283, over 10888.26 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 05:03:48,229 INFO [trainer.py:765] (3/8) Epoch 10, batch 600, train_loss[loss=2.876, ArTop10Accuracy=0.7369, over 11586.00 frames. ], tot_loss[loss=2.919, ArTop10Accuracy=0.7278, over 11427.44 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 05:04:36,715 INFO [trainer.py:765] (3/8) Epoch 10, batch 700, train_loss[loss=2.762, ArTop10Accuracy=0.7561, over 10193.00 frames. ], tot_loss[loss=2.927, ArTop10Accuracy=0.7261, over 11556.07 frames. ], batch size: 12, lr: 1.18e-02 +2024-08-06 05:05:10,725 INFO [trainer.py:765] (3/8) Epoch 10, batch 800, train_loss[loss=2.778, ArTop10Accuracy=0.7504, over 10168.00 frames. ], tot_loss[loss=2.933, ArTop10Accuracy=0.7249, over 11692.13 frames. ], batch size: 12, lr: 1.17e-02 +2024-08-06 05:05:42,246 INFO [trainer.py:765] (3/8) Epoch 10, batch 900, train_loss[loss=2.973, ArTop10Accuracy=0.7209, over 12981.00 frames. ], tot_loss[loss=2.927, ArTop10Accuracy=0.7262, over 11737.10 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 05:06:13,844 INFO [trainer.py:765] (3/8) Epoch 10, batch 1000, train_loss[loss=2.826, ArTop10Accuracy=0.7463, over 13166.00 frames. ], tot_loss[loss=2.931, ArTop10Accuracy=0.7256, over 11941.25 frames. ], batch size: 28, lr: 1.16e-02 +2024-08-06 05:06:45,055 INFO [trainer.py:765] (3/8) Epoch 10, batch 1100, train_loss[loss=2.952, ArTop10Accuracy=0.7296, over 13581.00 frames. ], tot_loss[loss=2.944, ArTop10Accuracy=0.7231, over 11996.05 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 05:07:15,484 INFO [trainer.py:765] (3/8) Epoch 10, batch 1200, train_loss[loss=3.129, ArTop10Accuracy=0.6903, over 12176.00 frames. ], tot_loss[loss=2.941, ArTop10Accuracy=0.7237, over 11945.79 frames. ], batch size: 100, lr: 1.16e-02 +2024-08-06 05:07:40,752 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 05:08:52,966 INFO [trainer.py:765] (3/8) Epoch 11, batch 100, train_loss[loss=2.987, ArTop10Accuracy=0.7171, over 14663.00 frames. ], tot_loss[loss=2.903, ArTop10Accuracy=0.7324, over 4790.15 frames. ], batch size: 61, lr: 1.10e-02 +2024-08-06 05:09:41,278 INFO [trainer.py:765] (3/8) Epoch 11, batch 200, train_loss[loss=2.819, ArTop10Accuracy=0.7454, over 13803.00 frames. ], tot_loss[loss=2.904, ArTop10Accuracy=0.7321, over 7780.78 frames. ], batch size: 34, lr: 1.10e-02 +2024-08-06 05:09:51,176 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.278e+02 1.371e+02 1.502e+02 3.785e+02, threshold=2.743e+02, percent-clipped=0.3 +2024-08-06 05:10:24,721 INFO [trainer.py:765] (3/8) Epoch 11, batch 300, train_loss[loss=2.969, ArTop10Accuracy=0.7203, over 14061.00 frames. ], tot_loss[loss=2.899, ArTop10Accuracy=0.7327, over 9405.12 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 05:11:11,784 INFO [trainer.py:765] (3/8) Epoch 11, batch 400, train_loss[loss=2.935, ArTop10Accuracy=0.7264, over 11029.00 frames. ], tot_loss[loss=2.902, ArTop10Accuracy=0.7318, over 10325.61 frames. ], batch size: 15, lr: 1.09e-02 +2024-08-06 05:11:52,692 INFO [trainer.py:765] (3/8) Epoch 11, batch 500, train_loss[loss=2.863, ArTop10Accuracy=0.7375, over 12132.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7325, over 10885.67 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 05:12:40,288 INFO [trainer.py:765] (3/8) Epoch 11, batch 600, train_loss[loss=2.888, ArTop10Accuracy=0.7428, over 11622.00 frames. ], tot_loss[loss=2.904, ArTop10Accuracy=0.7313, over 11422.12 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 05:13:25,709 INFO [trainer.py:765] (3/8) Epoch 11, batch 700, train_loss[loss=2.757, ArTop10Accuracy=0.7625, over 9981.00 frames. ], tot_loss[loss=2.908, ArTop10Accuracy=0.73, over 11549.60 frames. ], batch size: 12, lr: 1.08e-02 +2024-08-06 05:14:04,206 INFO [trainer.py:765] (3/8) Epoch 11, batch 800, train_loss[loss=2.749, ArTop10Accuracy=0.7563, over 9895.00 frames. ], tot_loss[loss=2.915, ArTop10Accuracy=0.7289, over 11677.25 frames. ], batch size: 12, lr: 1.07e-02 +2024-08-06 05:14:35,668 INFO [trainer.py:765] (3/8) Epoch 11, batch 900, train_loss[loss=2.888, ArTop10Accuracy=0.7276, over 13437.00 frames. ], tot_loss[loss=2.913, ArTop10Accuracy=0.7295, over 11732.93 frames. ], batch size: 28, lr: 1.07e-02 +2024-08-06 05:15:07,264 INFO [trainer.py:765] (3/8) Epoch 11, batch 1000, train_loss[loss=2.835, ArTop10Accuracy=0.752, over 12836.00 frames. ], tot_loss[loss=2.92, ArTop10Accuracy=0.7285, over 11944.18 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 05:15:38,260 INFO [trainer.py:765] (3/8) Epoch 11, batch 1100, train_loss[loss=2.952, ArTop10Accuracy=0.7164, over 13609.00 frames. ], tot_loss[loss=2.923, ArTop10Accuracy=0.7278, over 12003.06 frames. ], batch size: 34, lr: 1.06e-02 +2024-08-06 05:16:08,498 INFO [trainer.py:765] (3/8) Epoch 11, batch 1200, train_loss[loss=3.067, ArTop10Accuracy=0.7008, over 11546.00 frames. ], tot_loss[loss=2.924, ArTop10Accuracy=0.7275, over 11964.18 frames. ], batch size: 98, lr: 1.06e-02 +2024-08-06 05:16:12,698 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 05:16:21,622 INFO [trainer.py:811] (3/8) Epoch 11, validation: loss=2.923, ArTop10Accuracy=0.7318, over 1829298.00 frames. +2024-08-06 05:16:21,623 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33253MB +2024-08-06 05:16:21,949 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.268e+02 1.368e+02 1.481e+02 4.790e+02, threshold=2.736e+02, percent-clipped=0.6 +2024-08-06 05:16:42,650 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 05:18:03,005 INFO [trainer.py:765] (3/8) Epoch 12, batch 100, train_loss[loss=2.964, ArTop10Accuracy=0.7212, over 14866.00 frames. ], tot_loss[loss=2.908, ArTop10Accuracy=0.7322, over 4790.49 frames. ], batch size: 61, lr: 1.01e-02 +2024-08-06 05:18:46,004 INFO [trainer.py:765] (3/8) Epoch 12, batch 200, train_loss[loss=2.929, ArTop10Accuracy=0.7225, over 13722.00 frames. ], tot_loss[loss=2.893, ArTop10Accuracy=0.7344, over 7793.37 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 05:19:31,946 INFO [trainer.py:765] (3/8) Epoch 12, batch 300, train_loss[loss=2.982, ArTop10Accuracy=0.7113, over 14227.00 frames. ], tot_loss[loss=2.889, ArTop10Accuracy=0.735, over 9417.88 frames. ], batch size: 44, lr: 1.01e-02 +2024-08-06 05:20:12,431 INFO [trainer.py:765] (3/8) Epoch 12, batch 400, train_loss[loss=2.809, ArTop10Accuracy=0.7511, over 10378.00 frames. ], tot_loss[loss=2.89, ArTop10Accuracy=0.7344, over 10304.41 frames. ], batch size: 14, lr: 1.00e-02 +2024-08-06 05:21:00,640 INFO [trainer.py:765] (3/8) Epoch 12, batch 500, train_loss[loss=2.826, ArTop10Accuracy=0.7483, over 12250.00 frames. ], tot_loss[loss=2.886, ArTop10Accuracy=0.7348, over 10886.81 frames. ], batch size: 22, lr: 9.99e-03 +2024-08-06 05:21:43,915 INFO [trainer.py:765] (3/8) Epoch 12, batch 600, train_loss[loss=2.973, ArTop10Accuracy=0.7218, over 11742.00 frames. ], tot_loss[loss=2.884, ArTop10Accuracy=0.7346, over 11436.96 frames. ], batch size: 18, lr: 9.96e-03 +2024-08-06 05:22:32,206 INFO [trainer.py:765] (3/8) Epoch 12, batch 700, train_loss[loss=2.776, ArTop10Accuracy=0.7524, over 9429.00 frames. ], tot_loss[loss=2.901, ArTop10Accuracy=0.7315, over 11563.20 frames. ], batch size: 11, lr: 9.93e-03 +2024-08-06 05:23:08,912 INFO [trainer.py:765] (3/8) Epoch 12, batch 800, train_loss[loss=2.685, ArTop10Accuracy=0.7814, over 9921.00 frames. ], tot_loss[loss=2.907, ArTop10Accuracy=0.7306, over 11682.57 frames. ], batch size: 12, lr: 9.90e-03 +2024-08-06 05:23:40,460 INFO [trainer.py:765] (3/8) Epoch 12, batch 900, train_loss[loss=2.961, ArTop10Accuracy=0.7262, over 13217.00 frames. ], tot_loss[loss=2.899, ArTop10Accuracy=0.7323, over 11724.68 frames. ], batch size: 28, lr: 9.87e-03 +2024-08-06 05:23:54,576 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.273e+02 1.376e+02 1.503e+02 4.050e+02, threshold=2.752e+02, percent-clipped=0.4 +2024-08-06 05:24:14,345 INFO [trainer.py:765] (3/8) Epoch 12, batch 1000, train_loss[loss=2.969, ArTop10Accuracy=0.7143, over 13062.00 frames. ], tot_loss[loss=2.905, ArTop10Accuracy=0.7311, over 11924.71 frames. ], batch size: 27, lr: 9.84e-03 +2024-08-06 05:24:45,501 INFO [trainer.py:765] (3/8) Epoch 12, batch 1100, train_loss[loss=2.863, ArTop10Accuracy=0.739, over 13803.00 frames. ], tot_loss[loss=2.913, ArTop10Accuracy=0.7298, over 11984.26 frames. ], batch size: 34, lr: 9.81e-03 +2024-08-06 05:25:15,882 INFO [trainer.py:765] (3/8) Epoch 12, batch 1200, train_loss[loss=3.127, ArTop10Accuracy=0.6863, over 12289.00 frames. ], tot_loss[loss=2.909, ArTop10Accuracy=0.7305, over 11958.42 frames. ], batch size: 97, lr: 9.78e-03 +2024-08-06 05:25:41,445 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 05:26:46,786 INFO [trainer.py:765] (3/8) Epoch 13, batch 100, train_loss[loss=2.989, ArTop10Accuracy=0.7167, over 14543.00 frames. ], tot_loss[loss=2.884, ArTop10Accuracy=0.7364, over 4787.01 frames. ], batch size: 61, lr: 9.36e-03 +2024-08-06 05:27:32,552 INFO [trainer.py:765] (3/8) Epoch 13, batch 200, train_loss[loss=2.875, ArTop10Accuracy=0.7364, over 13686.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7383, over 7796.13 frames. ], batch size: 34, lr: 9.34e-03 +2024-08-06 05:28:16,036 INFO [trainer.py:765] (3/8) Epoch 13, batch 300, train_loss[loss=2.916, ArTop10Accuracy=0.7318, over 14690.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7386, over 9411.32 frames. ], batch size: 45, lr: 9.31e-03 +2024-08-06 05:29:00,149 INFO [trainer.py:765] (3/8) Epoch 13, batch 400, train_loss[loss=2.769, ArTop10Accuracy=0.7523, over 10141.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7381, over 10320.28 frames. ], batch size: 14, lr: 9.28e-03 +2024-08-06 05:29:43,966 INFO [trainer.py:765] (3/8) Epoch 13, batch 500, train_loss[loss=2.888, ArTop10Accuracy=0.7315, over 12288.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7381, over 10895.62 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 05:30:24,248 INFO [trainer.py:765] (3/8) Epoch 13, batch 600, train_loss[loss=2.769, ArTop10Accuracy=0.758, over 11485.00 frames. ], tot_loss[loss=2.876, ArTop10Accuracy=0.7369, over 11421.10 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 05:30:58,110 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 05:31:07,054 INFO [trainer.py:811] (3/8) Epoch 13, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 05:31:07,054 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33253MB +2024-08-06 05:31:07,352 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.049e+02 1.283e+02 1.389e+02 1.496e+02 2.729e+02, threshold=2.779e+02, percent-clipped=0.0 +2024-08-06 05:31:24,043 INFO [trainer.py:765] (3/8) Epoch 13, batch 700, train_loss[loss=2.657, ArTop10Accuracy=0.7735, over 10344.00 frames. ], tot_loss[loss=2.883, ArTop10Accuracy=0.7355, over 11581.38 frames. ], batch size: 12, lr: 9.20e-03 +2024-08-06 05:32:00,147 INFO [trainer.py:765] (3/8) Epoch 13, batch 800, train_loss[loss=2.896, ArTop10Accuracy=0.7337, over 9330.00 frames. ], tot_loss[loss=2.889, ArTop10Accuracy=0.7343, over 11692.44 frames. ], batch size: 11, lr: 9.18e-03 +2024-08-06 05:32:31,521 INFO [trainer.py:765] (3/8) Epoch 13, batch 900, train_loss[loss=2.845, ArTop10Accuracy=0.7379, over 12798.00 frames. ], tot_loss[loss=2.884, ArTop10Accuracy=0.7352, over 11731.31 frames. ], batch size: 27, lr: 9.15e-03 +2024-08-06 05:33:03,043 INFO [trainer.py:765] (3/8) Epoch 13, batch 1000, train_loss[loss=2.795, ArTop10Accuracy=0.7537, over 13511.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.7347, over 11970.04 frames. ], batch size: 28, lr: 9.13e-03 +2024-08-06 05:33:34,232 INFO [trainer.py:765] (3/8) Epoch 13, batch 1100, train_loss[loss=2.878, ArTop10Accuracy=0.7385, over 13451.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7332, over 12005.44 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 05:34:04,519 INFO [trainer.py:765] (3/8) Epoch 13, batch 1200, train_loss[loss=2.985, ArTop10Accuracy=0.717, over 11371.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7334, over 11933.83 frames. ], batch size: 97, lr: 9.07e-03 +2024-08-06 05:34:29,426 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 05:35:39,197 INFO [trainer.py:765] (3/8) Epoch 14, batch 100, train_loss[loss=2.94, ArTop10Accuracy=0.7238, over 14458.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7382, over 4779.97 frames. ], batch size: 61, lr: 8.71e-03 +2024-08-06 05:36:23,062 INFO [trainer.py:765] (3/8) Epoch 14, batch 200, train_loss[loss=2.737, ArTop10Accuracy=0.7649, over 13747.00 frames. ], tot_loss[loss=2.861, ArTop10Accuracy=0.7403, over 7786.32 frames. ], batch size: 34, lr: 8.68e-03 +2024-08-06 05:37:09,309 INFO [trainer.py:765] (3/8) Epoch 14, batch 300, train_loss[loss=2.941, ArTop10Accuracy=0.7261, over 14237.00 frames. ], tot_loss[loss=2.862, ArTop10Accuracy=0.7402, over 9429.64 frames. ], batch size: 44, lr: 8.66e-03 +2024-08-06 05:37:46,029 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.304e+02 1.410e+02 1.531e+02 2.912e+02, threshold=2.820e+02, percent-clipped=0.2 +2024-08-06 05:37:55,138 INFO [trainer.py:765] (3/8) Epoch 14, batch 400, train_loss[loss=2.747, ArTop10Accuracy=0.7601, over 10251.00 frames. ], tot_loss[loss=2.862, ArTop10Accuracy=0.7402, over 10338.19 frames. ], batch size: 14, lr: 8.64e-03 +2024-08-06 05:38:42,025 INFO [trainer.py:765] (3/8) Epoch 14, batch 500, train_loss[loss=2.865, ArTop10Accuracy=0.7422, over 12503.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.741, over 10906.36 frames. ], batch size: 22, lr: 8.61e-03 +2024-08-06 05:39:22,374 INFO [trainer.py:765] (3/8) Epoch 14, batch 600, train_loss[loss=2.679, ArTop10Accuracy=0.7766, over 11609.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.7396, over 11430.57 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 05:40:15,143 INFO [trainer.py:765] (3/8) Epoch 14, batch 700, train_loss[loss=2.685, ArTop10Accuracy=0.7764, over 9983.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7379, over 11562.83 frames. ], batch size: 12, lr: 8.57e-03 +2024-08-06 05:40:49,135 INFO [trainer.py:765] (3/8) Epoch 14, batch 800, train_loss[loss=2.703, ArTop10Accuracy=0.7643, over 9558.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7368, over 11693.89 frames. ], batch size: 11, lr: 8.55e-03 +2024-08-06 05:41:20,466 INFO [trainer.py:765] (3/8) Epoch 14, batch 900, train_loss[loss=2.95, ArTop10Accuracy=0.7291, over 12944.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7378, over 11742.33 frames. ], batch size: 27, lr: 8.52e-03 +2024-08-06 05:41:51,995 INFO [trainer.py:765] (3/8) Epoch 14, batch 1000, train_loss[loss=2.798, ArTop10Accuracy=0.7499, over 13064.00 frames. ], tot_loss[loss=2.876, ArTop10Accuracy=0.7368, over 11957.67 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 05:42:23,216 INFO [trainer.py:765] (3/8) Epoch 14, batch 1100, train_loss[loss=2.904, ArTop10Accuracy=0.7317, over 13759.00 frames. ], tot_loss[loss=2.88, ArTop10Accuracy=0.736, over 11997.58 frames. ], batch size: 34, lr: 8.48e-03 +2024-08-06 05:42:53,549 INFO [trainer.py:765] (3/8) Epoch 14, batch 1200, train_loss[loss=3.046, ArTop10Accuracy=0.7044, over 12556.00 frames. ], tot_loss[loss=2.875, ArTop10Accuracy=0.7369, over 11948.52 frames. ], batch size: 98, lr: 8.46e-03 +2024-08-06 05:43:18,281 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 05:44:28,572 INFO [trainer.py:765] (3/8) Epoch 15, batch 100, train_loss[loss=2.977, ArTop10Accuracy=0.7179, over 14227.00 frames. ], tot_loss[loss=2.844, ArTop10Accuracy=0.7442, over 4767.26 frames. ], batch size: 61, lr: 8.14e-03 +2024-08-06 05:44:29,213 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 05:44:38,024 INFO [trainer.py:811] (3/8) Epoch 15, validation: loss=2.913, ArTop10Accuracy=0.7339, over 1829298.00 frames. +2024-08-06 05:44:38,024 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33253MB +2024-08-06 05:44:38,413 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.307e+02 1.417e+02 1.528e+02 2.981e+02, threshold=2.833e+02, percent-clipped=0.1 +2024-08-06 05:45:20,184 INFO [trainer.py:765] (3/8) Epoch 15, batch 200, train_loss[loss=2.813, ArTop10Accuracy=0.7544, over 13737.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7435, over 7794.24 frames. ], batch size: 34, lr: 8.11e-03 +2024-08-06 05:46:04,647 INFO [trainer.py:765] (3/8) Epoch 15, batch 300, train_loss[loss=2.9, ArTop10Accuracy=0.735, over 14329.00 frames. ], tot_loss[loss=2.844, ArTop10Accuracy=0.7437, over 9421.04 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 05:46:51,902 INFO [trainer.py:765] (3/8) Epoch 15, batch 400, train_loss[loss=2.634, ArTop10Accuracy=0.7808, over 10879.00 frames. ], tot_loss[loss=2.839, ArTop10Accuracy=0.7444, over 10334.43 frames. ], batch size: 15, lr: 8.07e-03 +2024-08-06 05:47:36,911 INFO [trainer.py:765] (3/8) Epoch 15, batch 500, train_loss[loss=2.871, ArTop10Accuracy=0.7425, over 12345.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7444, over 10905.88 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 05:48:24,723 INFO [trainer.py:765] (3/8) Epoch 15, batch 600, train_loss[loss=2.909, ArTop10Accuracy=0.7305, over 11627.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7429, over 11433.74 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 05:49:11,855 INFO [trainer.py:765] (3/8) Epoch 15, batch 700, train_loss[loss=2.691, ArTop10Accuracy=0.7771, over 10169.00 frames. ], tot_loss[loss=2.855, ArTop10Accuracy=0.7408, over 11583.30 frames. ], batch size: 12, lr: 8.01e-03 +2024-08-06 05:49:45,779 INFO [trainer.py:765] (3/8) Epoch 15, batch 800, train_loss[loss=2.641, ArTop10Accuracy=0.7781, over 10191.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.7402, over 11695.52 frames. ], batch size: 12, lr: 7.99e-03 +2024-08-06 05:50:17,211 INFO [trainer.py:765] (3/8) Epoch 15, batch 900, train_loss[loss=2.804, ArTop10Accuracy=0.7574, over 13073.00 frames. ], tot_loss[loss=2.855, ArTop10Accuracy=0.7409, over 11740.40 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 05:50:48,829 INFO [trainer.py:765] (3/8) Epoch 15, batch 1000, train_loss[loss=2.959, ArTop10Accuracy=0.7232, over 12954.00 frames. ], tot_loss[loss=2.856, ArTop10Accuracy=0.7406, over 11944.15 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 05:51:20,069 INFO [trainer.py:765] (3/8) Epoch 15, batch 1100, train_loss[loss=2.798, ArTop10Accuracy=0.7486, over 13701.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.739, over 12012.04 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 05:51:23,514 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.337e+02 1.431e+02 1.541e+02 2.784e+02, threshold=2.862e+02, percent-clipped=0.0 +2024-08-06 05:51:53,082 INFO [trainer.py:765] (3/8) Epoch 15, batch 1200, train_loss[loss=3.047, ArTop10Accuracy=0.7004, over 12045.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7376, over 11949.03 frames. ], batch size: 97, lr: 7.91e-03 +2024-08-06 05:52:17,932 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 05:53:29,263 INFO [trainer.py:765] (3/8) Epoch 16, batch 100, train_loss[loss=2.916, ArTop10Accuracy=0.7281, over 14698.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7452, over 4799.46 frames. ], batch size: 61, lr: 7.63e-03 +2024-08-06 05:54:12,877 INFO [trainer.py:765] (3/8) Epoch 16, batch 200, train_loss[loss=2.853, ArTop10Accuracy=0.7472, over 13485.00 frames. ], tot_loss[loss=2.832, ArTop10Accuracy=0.7461, over 7786.33 frames. ], batch size: 34, lr: 7.61e-03 +2024-08-06 05:54:59,737 INFO [trainer.py:765] (3/8) Epoch 16, batch 300, train_loss[loss=2.888, ArTop10Accuracy=0.737, over 14288.00 frames. ], tot_loss[loss=2.829, ArTop10Accuracy=0.7469, over 9407.78 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 05:55:41,930 INFO [trainer.py:765] (3/8) Epoch 16, batch 400, train_loss[loss=2.797, ArTop10Accuracy=0.7545, over 10839.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.7464, over 10331.18 frames. ], batch size: 15, lr: 7.58e-03 +2024-08-06 05:56:27,680 INFO [trainer.py:765] (3/8) Epoch 16, batch 500, train_loss[loss=2.874, ArTop10Accuracy=0.7351, over 12220.00 frames. ], tot_loss[loss=2.833, ArTop10Accuracy=0.7456, over 10904.21 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 05:57:12,440 INFO [trainer.py:765] (3/8) Epoch 16, batch 600, train_loss[loss=2.868, ArTop10Accuracy=0.7482, over 11640.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7446, over 11436.05 frames. ], batch size: 18, lr: 7.54e-03 +2024-08-06 05:58:00,040 INFO [trainer.py:765] (3/8) Epoch 16, batch 700, train_loss[loss=2.699, ArTop10Accuracy=0.7755, over 10212.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.7434, over 11586.61 frames. ], batch size: 12, lr: 7.52e-03 +2024-08-06 05:58:34,024 INFO [trainer.py:765] (3/8) Epoch 16, batch 800, train_loss[loss=2.708, ArTop10Accuracy=0.7646, over 10161.00 frames. ], tot_loss[loss=2.849, ArTop10Accuracy=0.7422, over 11686.02 frames. ], batch size: 12, lr: 7.50e-03 +2024-08-06 05:58:41,569 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 05:58:50,426 INFO [trainer.py:811] (3/8) Epoch 16, validation: loss=2.915, ArTop10Accuracy=0.7338, over 1829298.00 frames. +2024-08-06 05:58:50,427 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33253MB +2024-08-06 05:58:50,730 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.335e+02 1.445e+02 1.570e+02 3.252e+02, threshold=2.890e+02, percent-clipped=0.1 +2024-08-06 05:59:14,321 INFO [trainer.py:765] (3/8) Epoch 16, batch 900, train_loss[loss=2.795, ArTop10Accuracy=0.7538, over 12753.00 frames. ], tot_loss[loss=2.838, ArTop10Accuracy=0.7443, over 11743.08 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 05:59:45,915 INFO [trainer.py:765] (3/8) Epoch 16, batch 1000, train_loss[loss=2.907, ArTop10Accuracy=0.7306, over 12969.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7429, over 11945.26 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 06:00:17,092 INFO [trainer.py:765] (3/8) Epoch 16, batch 1100, train_loss[loss=2.864, ArTop10Accuracy=0.733, over 13703.00 frames. ], tot_loss[loss=2.851, ArTop10Accuracy=0.7413, over 12008.06 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 06:00:47,464 INFO [trainer.py:765] (3/8) Epoch 16, batch 1200, train_loss[loss=3.025, ArTop10Accuracy=0.7084, over 11980.00 frames. ], tot_loss[loss=2.851, ArTop10Accuracy=0.7413, over 11947.03 frames. ], batch size: 99, lr: 7.43e-03 +2024-08-06 06:01:12,435 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 06:02:27,261 INFO [trainer.py:765] (3/8) Epoch 17, batch 100, train_loss[loss=2.848, ArTop10Accuracy=0.7414, over 14652.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7473, over 4787.17 frames. ], batch size: 61, lr: 7.18e-03 +2024-08-06 06:03:11,851 INFO [trainer.py:765] (3/8) Epoch 17, batch 200, train_loss[loss=2.864, ArTop10Accuracy=0.7452, over 13776.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7486, over 7795.02 frames. ], batch size: 34, lr: 7.17e-03 +2024-08-06 06:03:57,502 INFO [trainer.py:765] (3/8) Epoch 17, batch 300, train_loss[loss=2.958, ArTop10Accuracy=0.7282, over 14039.00 frames. ], tot_loss[loss=2.818, ArTop10Accuracy=0.749, over 9412.62 frames. ], batch size: 44, lr: 7.15e-03 +2024-08-06 06:04:42,838 INFO [trainer.py:765] (3/8) Epoch 17, batch 400, train_loss[loss=2.749, ArTop10Accuracy=0.7572, over 11132.00 frames. ], tot_loss[loss=2.819, ArTop10Accuracy=0.7485, over 10324.72 frames. ], batch size: 15, lr: 7.13e-03 +2024-08-06 06:05:29,004 INFO [trainer.py:765] (3/8) Epoch 17, batch 500, train_loss[loss=2.766, ArTop10Accuracy=0.7568, over 12265.00 frames. ], tot_loss[loss=2.818, ArTop10Accuracy=0.7484, over 10905.10 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 06:05:49,550 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.359e+02 1.445e+02 1.551e+02 2.741e+02, threshold=2.891e+02, percent-clipped=0.0 +2024-08-06 06:06:20,723 INFO [trainer.py:765] (3/8) Epoch 17, batch 600, train_loss[loss=2.671, ArTop10Accuracy=0.7666, over 11525.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.747, over 11433.66 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 06:07:04,694 INFO [trainer.py:765] (3/8) Epoch 17, batch 700, train_loss[loss=3.011, ArTop10Accuracy=0.7116, over 10163.00 frames. ], tot_loss[loss=2.833, ArTop10Accuracy=0.7454, over 11583.87 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 06:07:44,896 INFO [trainer.py:765] (3/8) Epoch 17, batch 800, train_loss[loss=2.79, ArTop10Accuracy=0.7645, over 10094.00 frames. ], tot_loss[loss=2.839, ArTop10Accuracy=0.7446, over 11689.66 frames. ], batch size: 12, lr: 7.07e-03 +2024-08-06 06:08:16,384 INFO [trainer.py:765] (3/8) Epoch 17, batch 900, train_loss[loss=2.88, ArTop10Accuracy=0.7357, over 13394.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7451, over 11745.90 frames. ], batch size: 28, lr: 7.05e-03 +2024-08-06 06:08:47,995 INFO [trainer.py:765] (3/8) Epoch 17, batch 1000, train_loss[loss=2.822, ArTop10Accuracy=0.7473, over 13019.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7447, over 11956.48 frames. ], batch size: 27, lr: 7.04e-03 +2024-08-06 06:09:19,134 INFO [trainer.py:765] (3/8) Epoch 17, batch 1100, train_loss[loss=2.841, ArTop10Accuracy=0.7501, over 13726.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7432, over 12000.29 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 06:09:49,445 INFO [trainer.py:765] (3/8) Epoch 17, batch 1200, train_loss[loss=3.041, ArTop10Accuracy=0.7052, over 12696.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7428, over 11949.34 frames. ], batch size: 98, lr: 7.01e-03 +2024-08-06 06:10:15,279 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 06:11:23,102 INFO [trainer.py:765] (3/8) Epoch 18, batch 100, train_loss[loss=2.884, ArTop10Accuracy=0.7367, over 14602.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7485, over 4781.54 frames. ], batch size: 61, lr: 6.78e-03 +2024-08-06 06:12:16,260 INFO [trainer.py:765] (3/8) Epoch 18, batch 200, train_loss[loss=2.833, ArTop10Accuracy=0.7475, over 13953.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.7495, over 7779.45 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 06:12:40,318 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 06:12:48,991 INFO [trainer.py:811] (3/8) Epoch 18, validation: loss=2.916, ArTop10Accuracy=0.7343, over 1829298.00 frames. +2024-08-06 06:12:48,992 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33253MB +2024-08-06 06:12:49,335 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.377e+02 1.476e+02 1.588e+02 2.450e+02, threshold=2.952e+02, percent-clipped=0.0 +2024-08-06 06:13:07,116 INFO [trainer.py:765] (3/8) Epoch 18, batch 300, train_loss[loss=2.817, ArTop10Accuracy=0.7439, over 14188.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.7492, over 9424.29 frames. ], batch size: 44, lr: 6.75e-03 +2024-08-06 06:13:54,098 INFO [trainer.py:765] (3/8) Epoch 18, batch 400, train_loss[loss=2.667, ArTop10Accuracy=0.7695, over 10271.00 frames. ], tot_loss[loss=2.81, ArTop10Accuracy=0.7502, over 10332.75 frames. ], batch size: 14, lr: 6.74e-03 +2024-08-06 06:14:38,488 INFO [trainer.py:765] (3/8) Epoch 18, batch 500, train_loss[loss=2.87, ArTop10Accuracy=0.7359, over 12236.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7506, over 10902.22 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 06:15:23,628 INFO [trainer.py:765] (3/8) Epoch 18, batch 600, train_loss[loss=2.729, ArTop10Accuracy=0.7691, over 11414.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.749, over 11441.89 frames. ], batch size: 18, lr: 6.71e-03 +2024-08-06 06:16:17,343 INFO [trainer.py:765] (3/8) Epoch 18, batch 700, train_loss[loss=2.832, ArTop10Accuracy=0.7482, over 10185.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7478, over 11592.47 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 06:16:51,428 INFO [trainer.py:765] (3/8) Epoch 18, batch 800, train_loss[loss=2.795, ArTop10Accuracy=0.7506, over 9998.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7468, over 11699.45 frames. ], batch size: 12, lr: 6.68e-03 +2024-08-06 06:17:22,913 INFO [trainer.py:765] (3/8) Epoch 18, batch 900, train_loss[loss=2.774, ArTop10Accuracy=0.7573, over 12972.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.7484, over 11740.80 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 06:17:54,528 INFO [trainer.py:765] (3/8) Epoch 18, batch 1000, train_loss[loss=2.823, ArTop10Accuracy=0.7471, over 12875.00 frames. ], tot_loss[loss=2.823, ArTop10Accuracy=0.7472, over 11949.16 frames. ], batch size: 27, lr: 6.65e-03 +2024-08-06 06:18:25,662 INFO [trainer.py:765] (3/8) Epoch 18, batch 1100, train_loss[loss=2.831, ArTop10Accuracy=0.7466, over 13939.00 frames. ], tot_loss[loss=2.832, ArTop10Accuracy=0.7456, over 11998.44 frames. ], batch size: 34, lr: 6.64e-03 +2024-08-06 06:18:55,971 INFO [trainer.py:765] (3/8) Epoch 18, batch 1200, train_loss[loss=3.016, ArTop10Accuracy=0.7114, over 11863.00 frames. ], tot_loss[loss=2.84, ArTop10Accuracy=0.7444, over 11921.75 frames. ], batch size: 97, lr: 6.63e-03 +2024-08-06 06:19:19,163 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.387e+02 1.492e+02 1.607e+02 2.982e+02, threshold=2.983e+02, percent-clipped=0.1 +2024-08-06 06:19:23,738 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 06:20:29,728 INFO [trainer.py:765] (3/8) Epoch 19, batch 100, train_loss[loss=2.978, ArTop10Accuracy=0.7147, over 14768.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.7493, over 4781.30 frames. ], batch size: 61, lr: 6.43e-03 +2024-08-06 06:21:11,275 INFO [trainer.py:765] (3/8) Epoch 19, batch 200, train_loss[loss=2.813, ArTop10Accuracy=0.7508, over 13606.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.7514, over 7788.65 frames. ], batch size: 34, lr: 6.41e-03 +2024-08-06 06:21:56,078 INFO [trainer.py:765] (3/8) Epoch 19, batch 300, train_loss[loss=2.879, ArTop10Accuracy=0.7366, over 14388.00 frames. ], tot_loss[loss=2.806, ArTop10Accuracy=0.752, over 9421.56 frames. ], batch size: 44, lr: 6.40e-03 +2024-08-06 06:22:36,013 INFO [trainer.py:765] (3/8) Epoch 19, batch 400, train_loss[loss=2.753, ArTop10Accuracy=0.7586, over 10932.00 frames. ], tot_loss[loss=2.803, ArTop10Accuracy=0.7525, over 10334.52 frames. ], batch size: 15, lr: 6.39e-03 +2024-08-06 06:23:18,997 INFO [trainer.py:765] (3/8) Epoch 19, batch 500, train_loss[loss=2.858, ArTop10Accuracy=0.7384, over 12377.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7524, over 10910.09 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 06:24:03,685 INFO [trainer.py:765] (3/8) Epoch 19, batch 600, train_loss[loss=2.738, ArTop10Accuracy=0.7515, over 11531.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7507, over 11440.50 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 06:24:46,185 INFO [trainer.py:765] (3/8) Epoch 19, batch 700, train_loss[loss=2.679, ArTop10Accuracy=0.7535, over 10299.00 frames. ], tot_loss[loss=2.81, ArTop10Accuracy=0.7496, over 11585.58 frames. ], batch size: 12, lr: 6.35e-03 +2024-08-06 06:25:22,354 INFO [trainer.py:765] (3/8) Epoch 19, batch 800, train_loss[loss=2.695, ArTop10Accuracy=0.7651, over 10144.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.7487, over 11699.81 frames. ], batch size: 12, lr: 6.33e-03 +2024-08-06 06:25:53,624 INFO [trainer.py:765] (3/8) Epoch 19, batch 900, train_loss[loss=2.836, ArTop10Accuracy=0.7465, over 13177.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7504, over 11753.51 frames. ], batch size: 27, lr: 6.32e-03 +2024-08-06 06:26:21,772 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 06:26:30,765 INFO [trainer.py:811] (3/8) Epoch 19, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 06:26:30,766 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 33253MB +2024-08-06 06:26:31,053 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.416e+02 1.525e+02 1.662e+02 2.849e+02, threshold=3.050e+02, percent-clipped=0.0 +2024-08-06 06:26:34,030 INFO [trainer.py:765] (3/8) Epoch 19, batch 1000, train_loss[loss=2.779, ArTop10Accuracy=0.7549, over 13053.00 frames. ], tot_loss[loss=2.814, ArTop10Accuracy=0.749, over 11956.61 frames. ], batch size: 27, lr: 6.31e-03 +2024-08-06 06:27:05,190 INFO [trainer.py:765] (3/8) Epoch 19, batch 1100, train_loss[loss=2.841, ArTop10Accuracy=0.7443, over 13080.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7477, over 12021.57 frames. ], batch size: 33, lr: 6.30e-03 +2024-08-06 06:27:35,454 INFO [trainer.py:765] (3/8) Epoch 19, batch 1200, train_loss[loss=3.016, ArTop10Accuracy=0.7059, over 12359.00 frames. ], tot_loss[loss=2.824, ArTop10Accuracy=0.7466, over 11984.14 frames. ], batch size: 98, lr: 6.28e-03 +2024-08-06 06:28:00,542 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 06:29:08,985 INFO [trainer.py:765] (3/8) Epoch 20, batch 100, train_loss[loss=2.843, ArTop10Accuracy=0.7485, over 14413.00 frames. ], tot_loss[loss=2.789, ArTop10Accuracy=0.7546, over 4784.57 frames. ], batch size: 61, lr: 6.10e-03 +2024-08-06 06:29:50,318 INFO [trainer.py:765] (3/8) Epoch 20, batch 200, train_loss[loss=2.706, ArTop10Accuracy=0.7677, over 13708.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7532, over 7794.67 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 06:30:37,106 INFO [trainer.py:765] (3/8) Epoch 20, batch 300, train_loss[loss=2.879, ArTop10Accuracy=0.7408, over 14328.00 frames. ], tot_loss[loss=2.789, ArTop10Accuracy=0.7551, over 9427.72 frames. ], batch size: 44, lr: 6.08e-03 +2024-08-06 06:31:16,353 INFO [trainer.py:765] (3/8) Epoch 20, batch 400, train_loss[loss=2.674, ArTop10Accuracy=0.7746, over 10901.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7537, over 10349.50 frames. ], batch size: 15, lr: 6.07e-03 +2024-08-06 06:32:03,759 INFO [trainer.py:765] (3/8) Epoch 20, batch 500, train_loss[loss=2.876, ArTop10Accuracy=0.7405, over 12212.00 frames. ], tot_loss[loss=2.789, ArTop10Accuracy=0.7548, over 10908.21 frames. ], batch size: 22, lr: 6.05e-03 +2024-08-06 06:32:43,357 INFO [trainer.py:765] (3/8) Epoch 20, batch 600, train_loss[loss=2.787, ArTop10Accuracy=0.7558, over 11556.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7538, over 11419.59 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 06:33:36,751 INFO [trainer.py:765] (3/8) Epoch 20, batch 700, train_loss[loss=2.836, ArTop10Accuracy=0.7457, over 10159.00 frames. ], tot_loss[loss=2.806, ArTop10Accuracy=0.7512, over 11550.65 frames. ], batch size: 12, lr: 6.03e-03 +2024-08-06 06:33:43,829 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.417e+02 1.526e+02 1.639e+02 3.791e+02, threshold=3.052e+02, percent-clipped=0.1 +2024-08-06 06:34:13,304 INFO [trainer.py:765] (3/8) Epoch 20, batch 800, train_loss[loss=2.645, ArTop10Accuracy=0.7788, over 9362.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.7507, over 11680.25 frames. ], batch size: 11, lr: 6.02e-03 +2024-08-06 06:34:44,580 INFO [trainer.py:765] (3/8) Epoch 20, batch 900, train_loss[loss=2.822, ArTop10Accuracy=0.7504, over 12957.00 frames. ], tot_loss[loss=2.801, ArTop10Accuracy=0.7519, over 11729.13 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 06:35:16,139 INFO [trainer.py:765] (3/8) Epoch 20, batch 1000, train_loss[loss=2.759, ArTop10Accuracy=0.7615, over 13110.00 frames. ], tot_loss[loss=2.809, ArTop10Accuracy=0.7506, over 11943.83 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 06:35:47,214 INFO [trainer.py:765] (3/8) Epoch 20, batch 1100, train_loss[loss=2.798, ArTop10Accuracy=0.7566, over 13668.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.7492, over 12000.36 frames. ], batch size: 34, lr: 5.99e-03 +2024-08-06 06:36:17,439 INFO [trainer.py:765] (3/8) Epoch 20, batch 1200, train_loss[loss=2.98, ArTop10Accuracy=0.717, over 12341.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.7489, over 11954.84 frames. ], batch size: 98, lr: 5.97e-03 +2024-08-06 06:36:42,271 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 06:36:42,274 INFO [trainer.py:1069] (3/8) Done! diff --git a/libritts/log/log-train-2024-08-06-03-39-40-4 b/libritts/log/log-train-2024-08-06-03-39-40-4 new file mode 100644 index 0000000000000000000000000000000000000000..9e694a9c6503a52976245d77882c8f9a4085169b --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-39-40-4 @@ -0,0 +1,336 @@ +2024-08-06 03:39:40,358 INFO [trainer.py:870] (4/8) Training started +2024-08-06 03:39:40,359 INFO [trainer.py:889] (4/8) Device: cuda:4 +2024-08-06 03:39:40,359 INFO [trainer.py:890] (4/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:39:40,359 INFO [trainer.py:892] (4/8) About to create model +2024-08-06 03:39:41,113 INFO [trainer.py:899] (4/8) Number of model parameters: 367386628 +2024-08-06 03:39:41,925 INFO [trainer.py:914] (4/8) Using DDP +2024-08-06 03:39:44,001 INFO [datamodule.py:427] (4/8) About to get train cuts +2024-08-06 03:39:44,003 INFO [datamodule.py:434] (4/8) About to get dev cuts +2024-08-06 03:39:44,004 INFO [datamodule.py:292] (4/8) Disable SpecAugment +2024-08-06 03:39:44,004 INFO [datamodule.py:294] (4/8) About to create train dataset +2024-08-06 03:39:44,005 INFO [datamodule.py:323] (4/8) Using DynamicBucketingSampler +2024-08-06 03:39:44,612 INFO [datamodule.py:344] (4/8) About to create train dataloader +2024-08-06 03:39:44,613 INFO [datamodule.py:367] (4/8) About to create dev dataset +2024-08-06 03:39:44,939 INFO [datamodule.py:388] (4/8) About to create dev dataloader +2024-08-06 03:40:39,570 INFO [trainer.py:765] (4/8) Epoch 1, batch 100, train_loss[loss=4.225, ArTop10Accuracy=0.4938, over 14414.00 frames. ], tot_loss[loss=4.777, ArTop10Accuracy=0.3971, over 4774.84 frames. ], batch size: 61, lr: 2.25e-02 +2024-08-06 03:41:16,922 INFO [trainer.py:765] (4/8) Epoch 1, batch 200, train_loss[loss=4.064, ArTop10Accuracy=0.5143, over 13538.00 frames. ], tot_loss[loss=4.301, ArTop10Accuracy=0.4758, over 7793.16 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 03:41:57,951 INFO [trainer.py:765] (4/8) Epoch 1, batch 300, train_loss[loss=3.831, ArTop10Accuracy=0.5496, over 14188.00 frames. ], tot_loss[loss=4.09, ArTop10Accuracy=0.5102, over 9413.38 frames. ], batch size: 44, lr: 3.00e-02 +2024-08-06 03:42:33,081 INFO [trainer.py:765] (4/8) Epoch 1, batch 400, train_loss[loss=3.819, ArTop10Accuracy=0.563, over 10365.00 frames. ], tot_loss[loss=3.937, ArTop10Accuracy=0.5359, over 10319.93 frames. ], batch size: 14, lr: 3.00e-02 +2024-08-06 03:43:11,270 INFO [trainer.py:765] (4/8) Epoch 1, batch 500, train_loss[loss=3.657, ArTop10Accuracy=0.5825, over 12296.00 frames. ], tot_loss[loss=3.826, ArTop10Accuracy=0.5542, over 10892.01 frames. ], batch size: 22, lr: 2.99e-02 +2024-08-06 03:43:46,593 INFO [trainer.py:765] (4/8) Epoch 1, batch 600, train_loss[loss=3.663, ArTop10Accuracy=0.5864, over 11634.00 frames. ], tot_loss[loss=3.754, ArTop10Accuracy=0.5663, over 11436.17 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 03:44:27,900 INFO [trainer.py:765] (4/8) Epoch 1, batch 700, train_loss[loss=3.492, ArTop10Accuracy=0.6121, over 10023.00 frames. ], tot_loss[loss=3.693, ArTop10Accuracy=0.5771, over 11573.93 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 03:45:01,514 INFO [trainer.py:765] (4/8) Epoch 1, batch 800, train_loss[loss=3.712, ArTop10Accuracy=0.5827, over 9355.00 frames. ], tot_loss[loss=3.637, ArTop10Accuracy=0.5873, over 11694.16 frames. ], batch size: 11, lr: 2.98e-02 +2024-08-06 03:45:32,557 INFO [trainer.py:765] (4/8) Epoch 1, batch 900, train_loss[loss=3.519, ArTop10Accuracy=0.6113, over 13171.00 frames. ], tot_loss[loss=3.589, ArTop10Accuracy=0.5962, over 11717.35 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 03:46:03,648 INFO [trainer.py:765] (4/8) Epoch 1, batch 1000, train_loss[loss=3.519, ArTop10Accuracy=0.6115, over 12835.00 frames. ], tot_loss[loss=3.562, ArTop10Accuracy=0.6014, over 11909.64 frames. ], batch size: 27, lr: 2.97e-02 +2024-08-06 03:46:07,988 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 8.169e+01 1.565e+02 2.239e+02 3.485e+02 9.105e+03, threshold=4.478e+02, percent-clipped=0.0 +2024-08-06 03:46:38,611 INFO [trainer.py:765] (4/8) Epoch 1, batch 1100, train_loss[loss=3.479, ArTop10Accuracy=0.6132, over 13681.00 frames. ], tot_loss[loss=3.528, ArTop10Accuracy=0.6072, over 11984.37 frames. ], batch size: 34, lr: 2.96e-02 +2024-08-06 03:47:08,744 INFO [trainer.py:765] (4/8) Epoch 1, batch 1200, train_loss[loss=3.55, ArTop10Accuracy=0.6048, over 12509.00 frames. ], tot_loss[loss=3.504, ArTop10Accuracy=0.612, over 11929.41 frames. ], batch size: 97, lr: 2.96e-02 +2024-08-06 03:47:33,742 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 03:48:38,677 INFO [trainer.py:765] (4/8) Epoch 2, batch 100, train_loss[loss=3.509, ArTop10Accuracy=0.6106, over 14813.00 frames. ], tot_loss[loss=3.456, ArTop10Accuracy=0.6216, over 4802.32 frames. ], batch size: 61, lr: 2.90e-02 +2024-08-06 03:49:14,597 INFO [trainer.py:765] (4/8) Epoch 2, batch 200, train_loss[loss=3.359, ArTop10Accuracy=0.6391, over 13732.00 frames. ], tot_loss[loss=3.439, ArTop10Accuracy=0.625, over 7801.46 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 03:49:56,520 INFO [trainer.py:765] (4/8) Epoch 2, batch 300, train_loss[loss=3.459, ArTop10Accuracy=0.6197, over 14142.00 frames. ], tot_loss[loss=3.424, ArTop10Accuracy=0.6276, over 9418.44 frames. ], batch size: 44, lr: 2.89e-02 +2024-08-06 03:50:32,000 INFO [trainer.py:765] (4/8) Epoch 2, batch 400, train_loss[loss=3.317, ArTop10Accuracy=0.6446, over 10925.00 frames. ], tot_loss[loss=3.412, ArTop10Accuracy=0.6301, over 10325.62 frames. ], batch size: 15, lr: 2.88e-02 +2024-08-06 03:51:17,110 INFO [trainer.py:765] (4/8) Epoch 2, batch 500, train_loss[loss=3.511, ArTop10Accuracy=0.6194, over 12380.00 frames. ], tot_loss[loss=3.404, ArTop10Accuracy=0.6313, over 10904.21 frames. ], batch size: 22, lr: 2.87e-02 +2024-08-06 03:51:53,203 INFO [trainer.py:765] (4/8) Epoch 2, batch 600, train_loss[loss=3.427, ArTop10Accuracy=0.6258, over 11647.00 frames. ], tot_loss[loss=3.398, ArTop10Accuracy=0.6323, over 11433.20 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 03:52:38,994 INFO [trainer.py:765] (4/8) Epoch 2, batch 700, train_loss[loss=3.473, ArTop10Accuracy=0.6123, over 10182.00 frames. ], tot_loss[loss=3.389, ArTop10Accuracy=0.6341, over 11584.84 frames. ], batch size: 12, lr: 2.85e-02 +2024-08-06 03:52:47,091 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 03:52:56,023 INFO [trainer.py:811] (4/8) Epoch 2, validation: loss=3.327, ArTop10Accuracy=0.6492, over 1829298.00 frames. +2024-08-06 03:52:56,024 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29920MB +2024-08-06 03:52:56,542 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 8.181e+01 1.431e+02 1.849e+02 2.730e+02 2.344e+03, threshold=3.697e+02, percent-clipped=7.2 +2024-08-06 03:53:21,882 INFO [trainer.py:765] (4/8) Epoch 2, batch 800, train_loss[loss=3.328, ArTop10Accuracy=0.6354, over 9929.00 frames. ], tot_loss[loss=3.384, ArTop10Accuracy=0.6354, over 11690.72 frames. ], batch size: 12, lr: 2.84e-02 +2024-08-06 03:53:53,300 INFO [trainer.py:765] (4/8) Epoch 2, batch 900, train_loss[loss=3.397, ArTop10Accuracy=0.6365, over 12945.00 frames. ], tot_loss[loss=3.373, ArTop10Accuracy=0.6378, over 11753.68 frames. ], batch size: 27, lr: 2.83e-02 +2024-08-06 03:54:24,810 INFO [trainer.py:765] (4/8) Epoch 2, batch 1000, train_loss[loss=3.329, ArTop10Accuracy=0.6425, over 13029.00 frames. ], tot_loss[loss=3.37, ArTop10Accuracy=0.6384, over 11942.53 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 03:54:56,008 INFO [trainer.py:765] (4/8) Epoch 2, batch 1100, train_loss[loss=3.303, ArTop10Accuracy=0.6404, over 13863.00 frames. ], tot_loss[loss=3.362, ArTop10Accuracy=0.6399, over 11997.75 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 03:55:26,229 INFO [trainer.py:765] (4/8) Epoch 2, batch 1200, train_loss[loss=3.362, ArTop10Accuracy=0.6431, over 12222.00 frames. ], tot_loss[loss=3.354, ArTop10Accuracy=0.641, over 11948.98 frames. ], batch size: 98, lr: 2.80e-02 +2024-08-06 03:55:51,347 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 03:57:04,102 INFO [trainer.py:765] (4/8) Epoch 3, batch 100, train_loss[loss=3.283, ArTop10Accuracy=0.6491, over 14263.00 frames. ], tot_loss[loss=3.314, ArTop10Accuracy=0.6485, over 4792.91 frames. ], batch size: 61, lr: 2.67e-02 +2024-08-06 03:57:50,980 INFO [trainer.py:765] (4/8) Epoch 3, batch 200, train_loss[loss=3.319, ArTop10Accuracy=0.6556, over 13758.00 frames. ], tot_loss[loss=3.29, ArTop10Accuracy=0.654, over 7801.48 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 03:58:26,074 INFO [trainer.py:765] (4/8) Epoch 3, batch 300, train_loss[loss=3.275, ArTop10Accuracy=0.6598, over 14414.00 frames. ], tot_loss[loss=3.278, ArTop10Accuracy=0.6565, over 9416.19 frames. ], batch size: 44, lr: 2.64e-02 +2024-08-06 03:59:11,253 INFO [trainer.py:765] (4/8) Epoch 3, batch 400, train_loss[loss=3.317, ArTop10Accuracy=0.6463, over 10327.00 frames. ], tot_loss[loss=3.266, ArTop10Accuracy=0.6587, over 10330.98 frames. ], batch size: 14, lr: 2.63e-02 +2024-08-06 03:59:29,675 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 8.720e+01 1.461e+02 1.775e+02 2.344e+02 9.150e+02, threshold=3.550e+02, percent-clipped=5.2 +2024-08-06 03:59:49,303 INFO [trainer.py:765] (4/8) Epoch 3, batch 500, train_loss[loss=3.173, ArTop10Accuracy=0.6763, over 12317.00 frames. ], tot_loss[loss=3.253, ArTop10Accuracy=0.6615, over 10903.54 frames. ], batch size: 22, lr: 2.62e-02 +2024-08-06 04:00:35,095 INFO [trainer.py:765] (4/8) Epoch 3, batch 600, train_loss[loss=3.139, ArTop10Accuracy=0.6844, over 11633.00 frames. ], tot_loss[loss=3.241, ArTop10Accuracy=0.6636, over 11427.62 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 04:01:22,059 INFO [trainer.py:765] (4/8) Epoch 3, batch 700, train_loss[loss=3.21, ArTop10Accuracy=0.6746, over 10135.00 frames. ], tot_loss[loss=3.237, ArTop10Accuracy=0.6645, over 11571.97 frames. ], batch size: 12, lr: 2.60e-02 +2024-08-06 04:01:56,269 INFO [trainer.py:765] (4/8) Epoch 3, batch 800, train_loss[loss=3.159, ArTop10Accuracy=0.6788, over 9202.00 frames. ], tot_loss[loss=3.23, ArTop10Accuracy=0.6654, over 11681.70 frames. ], batch size: 11, lr: 2.59e-02 +2024-08-06 04:02:27,740 INFO [trainer.py:765] (4/8) Epoch 3, batch 900, train_loss[loss=3.134, ArTop10Accuracy=0.6815, over 12832.00 frames. ], tot_loss[loss=3.207, ArTop10Accuracy=0.6698, over 11747.15 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 04:02:59,283 INFO [trainer.py:765] (4/8) Epoch 3, batch 1000, train_loss[loss=3.287, ArTop10Accuracy=0.6597, over 12772.00 frames. ], tot_loss[loss=3.2, ArTop10Accuracy=0.6716, over 11940.12 frames. ], batch size: 27, lr: 2.56e-02 +2024-08-06 04:03:30,942 INFO [trainer.py:765] (4/8) Epoch 3, batch 1100, train_loss[loss=3.113, ArTop10Accuracy=0.6847, over 13789.00 frames. ], tot_loss[loss=3.194, ArTop10Accuracy=0.6731, over 12007.84 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 04:04:01,312 INFO [trainer.py:765] (4/8) Epoch 3, batch 1200, train_loss[loss=3.147, ArTop10Accuracy=0.6767, over 12224.00 frames. ], tot_loss[loss=3.181, ArTop10Accuracy=0.6751, over 11929.36 frames. ], batch size: 97, lr: 2.54e-02 +2024-08-06 04:04:26,808 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 04:05:43,368 INFO [trainer.py:765] (4/8) Epoch 4, batch 100, train_loss[loss=3.133, ArTop10Accuracy=0.6894, over 14776.00 frames. ], tot_loss[loss=3.138, ArTop10Accuracy=0.6856, over 4792.99 frames. ], batch size: 61, lr: 2.38e-02 +2024-08-06 04:06:07,076 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 04:06:16,404 INFO [trainer.py:811] (4/8) Epoch 4, validation: loss=3.063, ArTop10Accuracy=0.7031, over 1829298.00 frames. +2024-08-06 04:06:16,405 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29920MB +2024-08-06 04:06:16,746 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.493e+02 1.709e+02 2.068e+02 7.969e+02, threshold=3.418e+02, percent-clipped=2.9 +2024-08-06 04:06:31,826 INFO [trainer.py:765] (4/8) Epoch 4, batch 200, train_loss[loss=3.072, ArTop10Accuracy=0.692, over 13695.00 frames. ], tot_loss[loss=3.129, ArTop10Accuracy=0.6871, over 7791.61 frames. ], batch size: 34, lr: 2.37e-02 +2024-08-06 04:07:18,544 INFO [trainer.py:765] (4/8) Epoch 4, batch 300, train_loss[loss=3.117, ArTop10Accuracy=0.6912, over 14420.00 frames. ], tot_loss[loss=3.116, ArTop10Accuracy=0.6892, over 9424.65 frames. ], batch size: 44, lr: 2.36e-02 +2024-08-06 04:08:01,910 INFO [trainer.py:765] (4/8) Epoch 4, batch 400, train_loss[loss=3.004, ArTop10Accuracy=0.7135, over 10974.00 frames. ], tot_loss[loss=3.112, ArTop10Accuracy=0.6895, over 10329.10 frames. ], batch size: 15, lr: 2.34e-02 +2024-08-06 04:08:45,345 INFO [trainer.py:765] (4/8) Epoch 4, batch 500, train_loss[loss=3.003, ArTop10Accuracy=0.7078, over 12410.00 frames. ], tot_loss[loss=3.104, ArTop10Accuracy=0.6904, over 10888.39 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 04:09:37,072 INFO [trainer.py:765] (4/8) Epoch 4, batch 600, train_loss[loss=3.074, ArTop10Accuracy=0.698, over 11480.00 frames. ], tot_loss[loss=3.106, ArTop10Accuracy=0.69, over 11423.54 frames. ], batch size: 18, lr: 2.32e-02 +2024-08-06 04:10:13,502 INFO [trainer.py:765] (4/8) Epoch 4, batch 700, train_loss[loss=2.934, ArTop10Accuracy=0.7253, over 10064.00 frames. ], tot_loss[loss=3.111, ArTop10Accuracy=0.6893, over 11565.47 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 04:10:51,960 INFO [trainer.py:765] (4/8) Epoch 4, batch 800, train_loss[loss=3.062, ArTop10Accuracy=0.6986, over 9211.00 frames. ], tot_loss[loss=3.115, ArTop10Accuracy=0.6885, over 11662.95 frames. ], batch size: 11, lr: 2.30e-02 +2024-08-06 04:11:23,330 INFO [trainer.py:765] (4/8) Epoch 4, batch 900, train_loss[loss=3.079, ArTop10Accuracy=0.6969, over 12860.00 frames. ], tot_loss[loss=3.105, ArTop10Accuracy=0.6903, over 11725.60 frames. ], batch size: 27, lr: 2.29e-02 +2024-08-06 04:11:54,827 INFO [trainer.py:765] (4/8) Epoch 4, batch 1000, train_loss[loss=3.122, ArTop10Accuracy=0.6855, over 12963.00 frames. ], tot_loss[loss=3.102, ArTop10Accuracy=0.6909, over 11943.30 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 04:12:25,960 INFO [trainer.py:765] (4/8) Epoch 4, batch 1100, train_loss[loss=3.094, ArTop10Accuracy=0.696, over 13731.00 frames. ], tot_loss[loss=3.105, ArTop10Accuracy=0.6905, over 12019.48 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 04:12:48,544 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.106e+02 1.440e+02 1.608e+02 1.893e+02 7.925e+02, threshold=3.216e+02, percent-clipped=2.0 +2024-08-06 04:12:58,828 INFO [trainer.py:765] (4/8) Epoch 4, batch 1200, train_loss[loss=3.256, ArTop10Accuracy=0.6624, over 12278.00 frames. ], tot_loss[loss=3.104, ArTop10Accuracy=0.6907, over 11949.92 frames. ], batch size: 97, lr: 2.25e-02 +2024-08-06 04:13:24,234 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 04:14:38,685 INFO [trainer.py:765] (4/8) Epoch 5, batch 100, train_loss[loss=3.132, ArTop10Accuracy=0.6891, over 14611.00 frames. ], tot_loss[loss=3.06, ArTop10Accuracy=0.7002, over 4791.65 frames. ], batch size: 61, lr: 2.10e-02 +2024-08-06 04:15:26,826 INFO [trainer.py:765] (4/8) Epoch 5, batch 200, train_loss[loss=3.104, ArTop10Accuracy=0.6893, over 13796.00 frames. ], tot_loss[loss=3.052, ArTop10Accuracy=0.7017, over 7789.66 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 04:16:08,011 INFO [trainer.py:765] (4/8) Epoch 5, batch 300, train_loss[loss=3.1, ArTop10Accuracy=0.6933, over 14244.00 frames. ], tot_loss[loss=3.049, ArTop10Accuracy=0.7024, over 9402.86 frames. ], batch size: 44, lr: 2.08e-02 +2024-08-06 04:16:53,134 INFO [trainer.py:765] (4/8) Epoch 5, batch 400, train_loss[loss=2.921, ArTop10Accuracy=0.7239, over 10393.00 frames. ], tot_loss[loss=3.045, ArTop10Accuracy=0.7031, over 10315.08 frames. ], batch size: 14, lr: 2.07e-02 +2024-08-06 04:17:36,638 INFO [trainer.py:765] (4/8) Epoch 5, batch 500, train_loss[loss=2.924, ArTop10Accuracy=0.7216, over 12369.00 frames. ], tot_loss[loss=3.04, ArTop10Accuracy=0.7038, over 10903.32 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 04:18:22,114 INFO [trainer.py:765] (4/8) Epoch 5, batch 600, train_loss[loss=3.035, ArTop10Accuracy=0.7049, over 11802.00 frames. ], tot_loss[loss=3.05, ArTop10Accuracy=0.7017, over 11435.13 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 04:19:17,032 INFO [trainer.py:765] (4/8) Epoch 5, batch 700, train_loss[loss=3.013, ArTop10Accuracy=0.7045, over 9967.00 frames. ], tot_loss[loss=3.059, ArTop10Accuracy=0.7, over 11555.46 frames. ], batch size: 12, lr: 2.04e-02 +2024-08-06 04:19:51,067 INFO [trainer.py:765] (4/8) Epoch 5, batch 800, train_loss[loss=3.016, ArTop10Accuracy=0.7133, over 10231.00 frames. ], tot_loss[loss=3.061, ArTop10Accuracy=0.6997, over 11673.77 frames. ], batch size: 12, lr: 2.03e-02 +2024-08-06 04:20:18,215 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 04:20:27,475 INFO [trainer.py:811] (4/8) Epoch 5, validation: loss=2.998, ArTop10Accuracy=0.7157, over 1829298.00 frames. +2024-08-06 04:20:27,476 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29920MB +2024-08-06 04:20:27,781 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.057e+02 1.385e+02 1.542e+02 1.759e+02 7.741e+02, threshold=3.083e+02, percent-clipped=0.7 +2024-08-06 04:20:31,767 INFO [trainer.py:765] (4/8) Epoch 5, batch 900, train_loss[loss=3.028, ArTop10Accuracy=0.7067, over 12863.00 frames. ], tot_loss[loss=3.057, ArTop10Accuracy=0.7, over 11733.67 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 04:21:03,306 INFO [trainer.py:765] (4/8) Epoch 5, batch 1000, train_loss[loss=3.3, ArTop10Accuracy=0.6552, over 13503.00 frames. ], tot_loss[loss=3.06, ArTop10Accuracy=0.6997, over 11937.43 frames. ], batch size: 28, lr: 2.01e-02 +2024-08-06 04:21:34,452 INFO [trainer.py:765] (4/8) Epoch 5, batch 1100, train_loss[loss=3.113, ArTop10Accuracy=0.6909, over 13946.00 frames. ], tot_loss[loss=3.061, ArTop10Accuracy=0.6995, over 12003.95 frames. ], batch size: 35, lr: 2.00e-02 +2024-08-06 04:22:04,752 INFO [trainer.py:765] (4/8) Epoch 5, batch 1200, train_loss[loss=3.16, ArTop10Accuracy=0.6794, over 12279.00 frames. ], tot_loss[loss=3.06, ArTop10Accuracy=0.6996, over 11943.93 frames. ], batch size: 98, lr: 1.99e-02 +2024-08-06 04:22:30,625 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 04:23:46,282 INFO [trainer.py:765] (4/8) Epoch 6, batch 100, train_loss[loss=3.122, ArTop10Accuracy=0.692, over 14260.00 frames. ], tot_loss[loss=3.02, ArTop10Accuracy=0.7083, over 4799.96 frames. ], batch size: 61, lr: 1.85e-02 +2024-08-06 04:24:35,255 INFO [trainer.py:765] (4/8) Epoch 6, batch 200, train_loss[loss=3.061, ArTop10Accuracy=0.7025, over 13976.00 frames. ], tot_loss[loss=3.008, ArTop10Accuracy=0.7102, over 7804.51 frames. ], batch size: 35, lr: 1.84e-02 +2024-08-06 04:25:16,677 INFO [trainer.py:765] (4/8) Epoch 6, batch 300, train_loss[loss=3.058, ArTop10Accuracy=0.6994, over 14201.00 frames. ], tot_loss[loss=3.01, ArTop10Accuracy=0.7096, over 9446.46 frames. ], batch size: 44, lr: 1.83e-02 +2024-08-06 04:26:08,924 INFO [trainer.py:765] (4/8) Epoch 6, batch 400, train_loss[loss=2.914, ArTop10Accuracy=0.7303, over 11131.00 frames. ], tot_loss[loss=3.014, ArTop10Accuracy=0.7091, over 10347.44 frames. ], batch size: 15, lr: 1.83e-02 +2024-08-06 04:26:51,485 INFO [trainer.py:765] (4/8) Epoch 6, batch 500, train_loss[loss=2.967, ArTop10Accuracy=0.7124, over 12276.00 frames. ], tot_loss[loss=3.013, ArTop10Accuracy=0.709, over 10907.37 frames. ], batch size: 22, lr: 1.82e-02 +2024-08-06 04:27:39,298 INFO [trainer.py:765] (4/8) Epoch 6, batch 600, train_loss[loss=2.928, ArTop10Accuracy=0.7291, over 11431.00 frames. ], tot_loss[loss=3.02, ArTop10Accuracy=0.7075, over 11430.49 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 04:27:46,370 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.343e+02 1.474e+02 1.660e+02 8.574e+02, threshold=2.947e+02, percent-clipped=0.6 +2024-08-06 04:28:33,240 INFO [trainer.py:765] (4/8) Epoch 6, batch 700, train_loss[loss=2.982, ArTop10Accuracy=0.7212, over 10084.00 frames. ], tot_loss[loss=3.026, ArTop10Accuracy=0.7065, over 11570.06 frames. ], batch size: 12, lr: 1.80e-02 +2024-08-06 04:29:11,216 INFO [trainer.py:765] (4/8) Epoch 6, batch 800, train_loss[loss=3.062, ArTop10Accuracy=0.7029, over 10197.00 frames. ], tot_loss[loss=3.035, ArTop10Accuracy=0.7049, over 11685.27 frames. ], batch size: 12, lr: 1.79e-02 +2024-08-06 04:29:42,752 INFO [trainer.py:765] (4/8) Epoch 6, batch 900, train_loss[loss=2.966, ArTop10Accuracy=0.7166, over 13025.00 frames. ], tot_loss[loss=3.028, ArTop10Accuracy=0.7063, over 11719.96 frames. ], batch size: 27, lr: 1.78e-02 +2024-08-06 04:30:14,306 INFO [trainer.py:765] (4/8) Epoch 6, batch 1000, train_loss[loss=2.927, ArTop10Accuracy=0.7283, over 12929.00 frames. ], tot_loss[loss=3.028, ArTop10Accuracy=0.7064, over 11927.37 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 04:30:45,383 INFO [trainer.py:765] (4/8) Epoch 6, batch 1100, train_loss[loss=3.131, ArTop10Accuracy=0.6875, over 13936.00 frames. ], tot_loss[loss=3.031, ArTop10Accuracy=0.7056, over 11984.93 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 04:31:15,673 INFO [trainer.py:765] (4/8) Epoch 6, batch 1200, train_loss[loss=3.146, ArTop10Accuracy=0.6814, over 12354.00 frames. ], tot_loss[loss=3.024, ArTop10Accuracy=0.7069, over 11942.03 frames. ], batch size: 97, lr: 1.76e-02 +2024-08-06 04:31:40,531 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 04:32:52,404 INFO [trainer.py:765] (4/8) Epoch 7, batch 100, train_loss[loss=3.01, ArTop10Accuracy=0.708, over 14772.00 frames. ], tot_loss[loss=2.998, ArTop10Accuracy=0.7132, over 4777.61 frames. ], batch size: 61, lr: 1.64e-02 +2024-08-06 04:33:38,223 INFO [trainer.py:765] (4/8) Epoch 7, batch 200, train_loss[loss=3.02, ArTop10Accuracy=0.7097, over 13816.00 frames. ], tot_loss[loss=2.987, ArTop10Accuracy=0.7153, over 7783.94 frames. ], batch size: 34, lr: 1.64e-02 +2024-08-06 04:34:22,608 INFO [trainer.py:765] (4/8) Epoch 7, batch 300, train_loss[loss=3.065, ArTop10Accuracy=0.7053, over 14016.00 frames. ], tot_loss[loss=2.978, ArTop10Accuracy=0.7167, over 9409.69 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 04:34:36,846 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 04:34:45,808 INFO [trainer.py:811] (4/8) Epoch 7, validation: loss=2.963, ArTop10Accuracy=0.7233, over 1829298.00 frames. +2024-08-06 04:34:45,809 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30637MB +2024-08-06 04:34:46,125 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.306e+02 1.435e+02 1.599e+02 8.689e+02, threshold=2.871e+02, percent-clipped=0.9 +2024-08-06 04:35:17,147 INFO [trainer.py:765] (4/8) Epoch 7, batch 400, train_loss[loss=2.883, ArTop10Accuracy=0.7337, over 10409.00 frames. ], tot_loss[loss=2.984, ArTop10Accuracy=0.7152, over 10324.52 frames. ], batch size: 14, lr: 1.62e-02 +2024-08-06 04:36:01,711 INFO [trainer.py:765] (4/8) Epoch 7, batch 500, train_loss[loss=3.003, ArTop10Accuracy=0.7134, over 12279.00 frames. ], tot_loss[loss=2.982, ArTop10Accuracy=0.7156, over 10900.54 frames. ], batch size: 22, lr: 1.61e-02 +2024-08-06 04:36:48,811 INFO [trainer.py:765] (4/8) Epoch 7, batch 600, train_loss[loss=2.925, ArTop10Accuracy=0.7251, over 11571.00 frames. ], tot_loss[loss=2.988, ArTop10Accuracy=0.7144, over 11435.96 frames. ], batch size: 18, lr: 1.61e-02 +2024-08-06 04:37:34,800 INFO [trainer.py:765] (4/8) Epoch 7, batch 700, train_loss[loss=2.839, ArTop10Accuracy=0.7372, over 10210.00 frames. ], tot_loss[loss=2.989, ArTop10Accuracy=0.7138, over 11590.32 frames. ], batch size: 12, lr: 1.60e-02 +2024-08-06 04:38:13,613 INFO [trainer.py:765] (4/8) Epoch 7, batch 800, train_loss[loss=2.971, ArTop10Accuracy=0.7187, over 10144.00 frames. ], tot_loss[loss=2.996, ArTop10Accuracy=0.7125, over 11703.22 frames. ], batch size: 12, lr: 1.59e-02 +2024-08-06 04:38:45,110 INFO [trainer.py:765] (4/8) Epoch 7, batch 900, train_loss[loss=3.03, ArTop10Accuracy=0.7042, over 12925.00 frames. ], tot_loss[loss=2.986, ArTop10Accuracy=0.7145, over 11749.92 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 04:39:16,575 INFO [trainer.py:765] (4/8) Epoch 7, batch 1000, train_loss[loss=2.882, ArTop10Accuracy=0.7297, over 13000.00 frames. ], tot_loss[loss=2.988, ArTop10Accuracy=0.7141, over 11947.80 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 04:39:47,571 INFO [trainer.py:765] (4/8) Epoch 7, batch 1100, train_loss[loss=2.899, ArTop10Accuracy=0.7268, over 13606.00 frames. ], tot_loss[loss=2.996, ArTop10Accuracy=0.7124, over 12012.42 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 04:40:17,990 INFO [trainer.py:765] (4/8) Epoch 7, batch 1200, train_loss[loss=3.131, ArTop10Accuracy=0.6906, over 11600.00 frames. ], tot_loss[loss=2.996, ArTop10Accuracy=0.7125, over 11954.33 frames. ], batch size: 98, lr: 1.57e-02 +2024-08-06 04:40:43,358 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 04:41:37,492 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 9.816e+01 1.295e+02 1.411e+02 1.574e+02 4.953e+02, threshold=2.821e+02, percent-clipped=1.1 +2024-08-06 04:41:58,372 INFO [trainer.py:765] (4/8) Epoch 8, batch 100, train_loss[loss=3.1, ArTop10Accuracy=0.6938, over 14501.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.72, over 4786.83 frames. ], batch size: 61, lr: 1.47e-02 +2024-08-06 04:42:44,986 INFO [trainer.py:765] (4/8) Epoch 8, batch 200, train_loss[loss=2.889, ArTop10Accuracy=0.7374, over 13851.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.72, over 7801.74 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 04:43:28,046 INFO [trainer.py:765] (4/8) Epoch 8, batch 300, train_loss[loss=2.986, ArTop10Accuracy=0.713, over 14154.00 frames. ], tot_loss[loss=2.958, ArTop10Accuracy=0.721, over 9413.82 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 04:44:14,462 INFO [trainer.py:765] (4/8) Epoch 8, batch 400, train_loss[loss=2.864, ArTop10Accuracy=0.7329, over 10910.00 frames. ], tot_loss[loss=2.955, ArTop10Accuracy=0.7213, over 10338.72 frames. ], batch size: 15, lr: 1.45e-02 +2024-08-06 04:45:00,692 INFO [trainer.py:765] (4/8) Epoch 8, batch 500, train_loss[loss=2.845, ArTop10Accuracy=0.7395, over 12286.00 frames. ], tot_loss[loss=2.958, ArTop10Accuracy=0.7203, over 10909.66 frames. ], batch size: 22, lr: 1.45e-02 +2024-08-06 04:45:45,393 INFO [trainer.py:765] (4/8) Epoch 8, batch 600, train_loss[loss=2.96, ArTop10Accuracy=0.7252, over 11530.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7194, over 11434.01 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 04:46:34,038 INFO [trainer.py:765] (4/8) Epoch 8, batch 700, train_loss[loss=3.068, ArTop10Accuracy=0.6944, over 10155.00 frames. ], tot_loss[loss=2.968, ArTop10Accuracy=0.7181, over 11588.80 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 04:47:10,208 INFO [trainer.py:765] (4/8) Epoch 8, batch 800, train_loss[loss=2.976, ArTop10Accuracy=0.7146, over 10233.00 frames. ], tot_loss[loss=2.979, ArTop10Accuracy=0.716, over 11691.92 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 04:47:41,606 INFO [trainer.py:765] (4/8) Epoch 8, batch 900, train_loss[loss=3.003, ArTop10Accuracy=0.7131, over 13057.00 frames. ], tot_loss[loss=2.975, ArTop10Accuracy=0.7167, over 11737.58 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:13,033 INFO [trainer.py:765] (4/8) Epoch 8, batch 1000, train_loss[loss=3.01, ArTop10Accuracy=0.7155, over 12863.00 frames. ], tot_loss[loss=2.975, ArTop10Accuracy=0.7168, over 11934.73 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:28,827 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 04:48:37,663 INFO [trainer.py:811] (4/8) Epoch 8, validation: loss=2.946, ArTop10Accuracy=0.7266, over 1829298.00 frames. +2024-08-06 04:48:37,664 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 30637MB +2024-08-06 04:48:37,951 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.289e+02 1.393e+02 1.532e+02 3.557e+02, threshold=2.786e+02, percent-clipped=0.2 +2024-08-06 04:48:52,931 INFO [trainer.py:765] (4/8) Epoch 8, batch 1100, train_loss[loss=2.965, ArTop10Accuracy=0.7182, over 13839.00 frames. ], tot_loss[loss=2.977, ArTop10Accuracy=0.7163, over 11996.48 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 04:49:23,202 INFO [trainer.py:765] (4/8) Epoch 8, batch 1200, train_loss[loss=3.102, ArTop10Accuracy=0.6867, over 11927.00 frames. ], tot_loss[loss=2.976, ArTop10Accuracy=0.7165, over 11938.45 frames. ], batch size: 97, lr: 1.40e-02 +2024-08-06 04:49:48,811 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 04:51:01,547 INFO [trainer.py:765] (4/8) Epoch 9, batch 100, train_loss[loss=3.046, ArTop10Accuracy=0.7049, over 14359.00 frames. ], tot_loss[loss=2.936, ArTop10Accuracy=0.7252, over 4789.11 frames. ], batch size: 61, lr: 1.32e-02 +2024-08-06 04:51:45,414 INFO [trainer.py:765] (4/8) Epoch 9, batch 200, train_loss[loss=2.947, ArTop10Accuracy=0.7281, over 13636.00 frames. ], tot_loss[loss=2.93, ArTop10Accuracy=0.7262, over 7790.54 frames. ], batch size: 34, lr: 1.32e-02 +2024-08-06 04:52:29,082 INFO [trainer.py:765] (4/8) Epoch 9, batch 300, train_loss[loss=2.944, ArTop10Accuracy=0.7193, over 14377.00 frames. ], tot_loss[loss=2.932, ArTop10Accuracy=0.726, over 9417.20 frames. ], batch size: 44, lr: 1.31e-02 +2024-08-06 04:53:16,431 INFO [trainer.py:765] (4/8) Epoch 9, batch 400, train_loss[loss=2.759, ArTop10Accuracy=0.7496, over 10397.00 frames. ], tot_loss[loss=2.929, ArTop10Accuracy=0.726, over 10330.64 frames. ], batch size: 14, lr: 1.31e-02 +2024-08-06 04:53:58,143 INFO [trainer.py:765] (4/8) Epoch 9, batch 500, train_loss[loss=2.922, ArTop10Accuracy=0.7289, over 12308.00 frames. ], tot_loss[loss=2.93, ArTop10Accuracy=0.7255, over 10916.58 frames. ], batch size: 22, lr: 1.30e-02 +2024-08-06 04:54:51,076 INFO [trainer.py:765] (4/8) Epoch 9, batch 600, train_loss[loss=2.953, ArTop10Accuracy=0.72, over 11601.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.7243, over 11434.27 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 04:55:34,398 INFO [trainer.py:765] (4/8) Epoch 9, batch 700, train_loss[loss=2.89, ArTop10Accuracy=0.7233, over 10423.00 frames. ], tot_loss[loss=2.943, ArTop10Accuracy=0.7231, over 11585.46 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 04:56:04,573 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.257e+02 1.367e+02 1.507e+02 8.820e+02, threshold=2.735e+02, percent-clipped=0.5 +2024-08-06 04:56:13,596 INFO [trainer.py:765] (4/8) Epoch 9, batch 800, train_loss[loss=2.943, ArTop10Accuracy=0.7266, over 10072.00 frames. ], tot_loss[loss=2.951, ArTop10Accuracy=0.7217, over 11700.92 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 04:56:44,974 INFO [trainer.py:765] (4/8) Epoch 9, batch 900, train_loss[loss=2.991, ArTop10Accuracy=0.7131, over 12851.00 frames. ], tot_loss[loss=2.946, ArTop10Accuracy=0.7226, over 11745.92 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:16,489 INFO [trainer.py:765] (4/8) Epoch 9, batch 1000, train_loss[loss=2.907, ArTop10Accuracy=0.7311, over 12892.00 frames. ], tot_loss[loss=2.944, ArTop10Accuracy=0.7227, over 11932.99 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:47,655 INFO [trainer.py:765] (4/8) Epoch 9, batch 1100, train_loss[loss=2.979, ArTop10Accuracy=0.7163, over 13811.00 frames. ], tot_loss[loss=2.95, ArTop10Accuracy=0.7218, over 12008.16 frames. ], batch size: 34, lr: 1.27e-02 +2024-08-06 04:58:18,092 INFO [trainer.py:765] (4/8) Epoch 9, batch 1200, train_loss[loss=3.042, ArTop10Accuracy=0.7009, over 11821.00 frames. ], tot_loss[loss=2.951, ArTop10Accuracy=0.7213, over 11934.24 frames. ], batch size: 98, lr: 1.27e-02 +2024-08-06 04:58:43,095 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 04:59:52,749 INFO [trainer.py:765] (4/8) Epoch 10, batch 100, train_loss[loss=2.92, ArTop10Accuracy=0.7225, over 14409.00 frames. ], tot_loss[loss=2.922, ArTop10Accuracy=0.7286, over 4787.17 frames. ], batch size: 61, lr: 1.20e-02 +2024-08-06 05:00:43,729 INFO [trainer.py:765] (4/8) Epoch 10, batch 200, train_loss[loss=2.916, ArTop10Accuracy=0.7339, over 14185.00 frames. ], tot_loss[loss=2.916, ArTop10Accuracy=0.7298, over 7792.68 frames. ], batch size: 35, lr: 1.20e-02 +2024-08-06 05:01:20,591 INFO [trainer.py:765] (4/8) Epoch 10, batch 300, train_loss[loss=3.034, ArTop10Accuracy=0.7081, over 14117.00 frames. ], tot_loss[loss=2.918, ArTop10Accuracy=0.7296, over 9422.72 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 05:02:10,048 INFO [trainer.py:765] (4/8) Epoch 10, batch 400, train_loss[loss=2.807, ArTop10Accuracy=0.7502, over 10796.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.729, over 10324.66 frames. ], batch size: 15, lr: 1.19e-02 +2024-08-06 05:02:46,487 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 05:02:55,378 INFO [trainer.py:811] (4/8) Epoch 10, validation: loss=2.927, ArTop10Accuracy=0.7304, over 1829298.00 frames. +2024-08-06 05:02:55,378 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 32942MB +2024-08-06 05:02:55,728 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.269e+02 1.367e+02 1.518e+02 4.405e+02, threshold=2.733e+02, percent-clipped=0.4 +2024-08-06 05:02:58,361 INFO [trainer.py:765] (4/8) Epoch 10, batch 500, train_loss[loss=2.784, ArTop10Accuracy=0.7519, over 12245.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7288, over 10867.87 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 05:03:48,229 INFO [trainer.py:765] (4/8) Epoch 10, batch 600, train_loss[loss=2.767, ArTop10Accuracy=0.7542, over 11611.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.7275, over 11413.89 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 05:04:36,715 INFO [trainer.py:765] (4/8) Epoch 10, batch 700, train_loss[loss=2.904, ArTop10Accuracy=0.7148, over 10314.00 frames. ], tot_loss[loss=2.93, ArTop10Accuracy=0.7261, over 11583.91 frames. ], batch size: 12, lr: 1.18e-02 +2024-08-06 05:05:10,725 INFO [trainer.py:765] (4/8) Epoch 10, batch 800, train_loss[loss=2.947, ArTop10Accuracy=0.7253, over 10171.00 frames. ], tot_loss[loss=2.938, ArTop10Accuracy=0.7246, over 11697.08 frames. ], batch size: 12, lr: 1.17e-02 +2024-08-06 05:05:42,245 INFO [trainer.py:765] (4/8) Epoch 10, batch 900, train_loss[loss=2.99, ArTop10Accuracy=0.7181, over 13227.00 frames. ], tot_loss[loss=2.932, ArTop10Accuracy=0.7256, over 11744.68 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 05:06:13,843 INFO [trainer.py:765] (4/8) Epoch 10, batch 1000, train_loss[loss=2.793, ArTop10Accuracy=0.7464, over 13076.00 frames. ], tot_loss[loss=2.934, ArTop10Accuracy=0.7249, over 11951.21 frames. ], batch size: 27, lr: 1.16e-02 +2024-08-06 05:06:45,055 INFO [trainer.py:765] (4/8) Epoch 10, batch 1100, train_loss[loss=2.991, ArTop10Accuracy=0.717, over 13592.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.7242, over 11989.96 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 05:07:15,483 INFO [trainer.py:765] (4/8) Epoch 10, batch 1200, train_loss[loss=3.102, ArTop10Accuracy=0.6922, over 11832.00 frames. ], tot_loss[loss=2.94, ArTop10Accuracy=0.7235, over 11929.40 frames. ], batch size: 98, lr: 1.16e-02 +2024-08-06 05:07:40,420 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 05:08:52,967 INFO [trainer.py:765] (4/8) Epoch 11, batch 100, train_loss[loss=3.014, ArTop10Accuracy=0.7123, over 14503.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7338, over 4797.82 frames. ], batch size: 61, lr: 1.10e-02 +2024-08-06 05:09:41,278 INFO [trainer.py:765] (4/8) Epoch 11, batch 200, train_loss[loss=2.777, ArTop10Accuracy=0.755, over 13762.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7331, over 7798.94 frames. ], batch size: 34, lr: 1.10e-02 +2024-08-06 05:09:51,176 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.278e+02 1.371e+02 1.502e+02 3.785e+02, threshold=2.743e+02, percent-clipped=0.3 +2024-08-06 05:10:24,721 INFO [trainer.py:765] (4/8) Epoch 11, batch 300, train_loss[loss=2.941, ArTop10Accuracy=0.7218, over 14035.00 frames. ], tot_loss[loss=2.899, ArTop10Accuracy=0.7327, over 9418.51 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 05:11:11,784 INFO [trainer.py:765] (4/8) Epoch 11, batch 400, train_loss[loss=2.832, ArTop10Accuracy=0.7506, over 10417.00 frames. ], tot_loss[loss=2.9, ArTop10Accuracy=0.7324, over 10333.06 frames. ], batch size: 14, lr: 1.09e-02 +2024-08-06 05:11:52,693 INFO [trainer.py:765] (4/8) Epoch 11, batch 500, train_loss[loss=2.955, ArTop10Accuracy=0.7212, over 12237.00 frames. ], tot_loss[loss=2.895, ArTop10Accuracy=0.7326, over 10913.42 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 05:12:40,288 INFO [trainer.py:765] (4/8) Epoch 11, batch 600, train_loss[loss=2.901, ArTop10Accuracy=0.7333, over 11485.00 frames. ], tot_loss[loss=2.903, ArTop10Accuracy=0.7313, over 11425.25 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 05:13:25,709 INFO [trainer.py:765] (4/8) Epoch 11, batch 700, train_loss[loss=2.755, ArTop10Accuracy=0.7483, over 10608.00 frames. ], tot_loss[loss=2.908, ArTop10Accuracy=0.7301, over 11559.54 frames. ], batch size: 13, lr: 1.08e-02 +2024-08-06 05:14:04,206 INFO [trainer.py:765] (4/8) Epoch 11, batch 800, train_loss[loss=2.85, ArTop10Accuracy=0.7346, over 10032.00 frames. ], tot_loss[loss=2.92, ArTop10Accuracy=0.7279, over 11661.43 frames. ], batch size: 12, lr: 1.07e-02 +2024-08-06 05:14:35,667 INFO [trainer.py:765] (4/8) Epoch 11, batch 900, train_loss[loss=2.894, ArTop10Accuracy=0.7351, over 13236.00 frames. ], tot_loss[loss=2.908, ArTop10Accuracy=0.7303, over 11721.96 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 05:15:07,264 INFO [trainer.py:765] (4/8) Epoch 11, batch 1000, train_loss[loss=2.833, ArTop10Accuracy=0.753, over 13326.00 frames. ], tot_loss[loss=2.911, ArTop10Accuracy=0.7296, over 11924.43 frames. ], batch size: 28, lr: 1.07e-02 +2024-08-06 05:15:38,260 INFO [trainer.py:765] (4/8) Epoch 11, batch 1100, train_loss[loss=2.981, ArTop10Accuracy=0.7222, over 13836.00 frames. ], tot_loss[loss=2.919, ArTop10Accuracy=0.7279, over 11984.42 frames. ], batch size: 34, lr: 1.06e-02 +2024-08-06 05:16:08,498 INFO [trainer.py:765] (4/8) Epoch 11, batch 1200, train_loss[loss=2.997, ArTop10Accuracy=0.7096, over 11635.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7275, over 11941.26 frames. ], batch size: 98, lr: 1.06e-02 +2024-08-06 05:16:12,698 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 05:16:21,623 INFO [trainer.py:811] (4/8) Epoch 11, validation: loss=2.923, ArTop10Accuracy=0.7318, over 1829298.00 frames. +2024-08-06 05:16:21,623 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 32942MB +2024-08-06 05:16:21,949 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.268e+02 1.368e+02 1.481e+02 4.790e+02, threshold=2.736e+02, percent-clipped=0.6 +2024-08-06 05:16:42,439 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 05:18:03,006 INFO [trainer.py:765] (4/8) Epoch 12, batch 100, train_loss[loss=2.995, ArTop10Accuracy=0.7175, over 14578.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7341, over 4791.99 frames. ], batch size: 61, lr: 1.01e-02 +2024-08-06 05:18:46,005 INFO [trainer.py:765] (4/8) Epoch 12, batch 200, train_loss[loss=2.937, ArTop10Accuracy=0.7235, over 13602.00 frames. ], tot_loss[loss=2.882, ArTop10Accuracy=0.7365, over 7795.10 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 05:19:31,947 INFO [trainer.py:765] (4/8) Epoch 12, batch 300, train_loss[loss=2.866, ArTop10Accuracy=0.7396, over 14205.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.738, over 9417.28 frames. ], batch size: 44, lr: 1.01e-02 +2024-08-06 05:20:12,431 INFO [trainer.py:765] (4/8) Epoch 12, batch 400, train_loss[loss=2.741, ArTop10Accuracy=0.7659, over 11054.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7375, over 10333.75 frames. ], batch size: 15, lr: 1.00e-02 +2024-08-06 05:21:00,640 INFO [trainer.py:765] (4/8) Epoch 12, batch 500, train_loss[loss=2.931, ArTop10Accuracy=0.727, over 12246.00 frames. ], tot_loss[loss=2.874, ArTop10Accuracy=0.7368, over 10903.20 frames. ], batch size: 22, lr: 9.99e-03 +2024-08-06 05:21:43,916 INFO [trainer.py:765] (4/8) Epoch 12, batch 600, train_loss[loss=2.776, ArTop10Accuracy=0.7518, over 11625.00 frames. ], tot_loss[loss=2.881, ArTop10Accuracy=0.7354, over 11430.94 frames. ], batch size: 18, lr: 9.96e-03 +2024-08-06 05:22:32,207 INFO [trainer.py:765] (4/8) Epoch 12, batch 700, train_loss[loss=2.796, ArTop10Accuracy=0.7455, over 9981.00 frames. ], tot_loss[loss=2.889, ArTop10Accuracy=0.7336, over 11572.71 frames. ], batch size: 12, lr: 9.93e-03 +2024-08-06 05:23:08,912 INFO [trainer.py:765] (4/8) Epoch 12, batch 800, train_loss[loss=2.787, ArTop10Accuracy=0.7566, over 10029.00 frames. ], tot_loss[loss=2.902, ArTop10Accuracy=0.7316, over 11695.56 frames. ], batch size: 12, lr: 9.90e-03 +2024-08-06 05:23:40,460 INFO [trainer.py:765] (4/8) Epoch 12, batch 900, train_loss[loss=2.869, ArTop10Accuracy=0.7448, over 13351.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7328, over 11744.65 frames. ], batch size: 28, lr: 9.87e-03 +2024-08-06 05:23:54,576 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.273e+02 1.376e+02 1.503e+02 4.050e+02, threshold=2.752e+02, percent-clipped=0.4 +2024-08-06 05:24:14,346 INFO [trainer.py:765] (4/8) Epoch 12, batch 1000, train_loss[loss=2.969, ArTop10Accuracy=0.7246, over 12941.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7325, over 11941.33 frames. ], batch size: 27, lr: 9.84e-03 +2024-08-06 05:24:45,502 INFO [trainer.py:765] (4/8) Epoch 12, batch 1100, train_loss[loss=2.98, ArTop10Accuracy=0.7142, over 13724.00 frames. ], tot_loss[loss=2.905, ArTop10Accuracy=0.7308, over 11995.55 frames. ], batch size: 34, lr: 9.81e-03 +2024-08-06 05:25:15,882 INFO [trainer.py:765] (4/8) Epoch 12, batch 1200, train_loss[loss=2.983, ArTop10Accuracy=0.7175, over 12501.00 frames. ], tot_loss[loss=2.908, ArTop10Accuracy=0.7304, over 11941.95 frames. ], batch size: 98, lr: 9.78e-03 +2024-08-06 05:25:41,512 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 05:26:46,787 INFO [trainer.py:765] (4/8) Epoch 13, batch 100, train_loss[loss=3.03, ArTop10Accuracy=0.7077, over 14498.00 frames. ], tot_loss[loss=2.879, ArTop10Accuracy=0.7371, over 4788.09 frames. ], batch size: 61, lr: 9.36e-03 +2024-08-06 05:27:32,553 INFO [trainer.py:765] (4/8) Epoch 13, batch 200, train_loss[loss=2.871, ArTop10Accuracy=0.7345, over 14013.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7383, over 7795.66 frames. ], batch size: 34, lr: 9.34e-03 +2024-08-06 05:28:16,035 INFO [trainer.py:765] (4/8) Epoch 13, batch 300, train_loss[loss=2.867, ArTop10Accuracy=0.7396, over 14334.00 frames. ], tot_loss[loss=2.869, ArTop10Accuracy=0.7386, over 9417.30 frames. ], batch size: 44, lr: 9.31e-03 +2024-08-06 05:29:00,149 INFO [trainer.py:765] (4/8) Epoch 13, batch 400, train_loss[loss=2.781, ArTop10Accuracy=0.7557, over 10381.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7381, over 10331.89 frames. ], batch size: 14, lr: 9.28e-03 +2024-08-06 05:29:43,967 INFO [trainer.py:765] (4/8) Epoch 13, batch 500, train_loss[loss=2.945, ArTop10Accuracy=0.7194, over 12237.00 frames. ], tot_loss[loss=2.868, ArTop10Accuracy=0.7384, over 10902.84 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 05:30:24,248 INFO [trainer.py:765] (4/8) Epoch 13, batch 600, train_loss[loss=2.913, ArTop10Accuracy=0.7364, over 11359.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7371, over 11432.37 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 05:30:58,110 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 05:31:07,054 INFO [trainer.py:811] (4/8) Epoch 13, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 05:31:07,055 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 32942MB +2024-08-06 05:31:07,351 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.049e+02 1.283e+02 1.389e+02 1.496e+02 2.729e+02, threshold=2.779e+02, percent-clipped=0.0 +2024-08-06 05:31:24,043 INFO [trainer.py:765] (4/8) Epoch 13, batch 700, train_loss[loss=2.865, ArTop10Accuracy=0.7461, over 9935.00 frames. ], tot_loss[loss=2.88, ArTop10Accuracy=0.7357, over 11580.84 frames. ], batch size: 12, lr: 9.20e-03 +2024-08-06 05:32:00,147 INFO [trainer.py:765] (4/8) Epoch 13, batch 800, train_loss[loss=2.941, ArTop10Accuracy=0.7319, over 10135.00 frames. ], tot_loss[loss=2.888, ArTop10Accuracy=0.7343, over 11706.48 frames. ], batch size: 12, lr: 9.18e-03 +2024-08-06 05:32:31,521 INFO [trainer.py:765] (4/8) Epoch 13, batch 900, train_loss[loss=2.951, ArTop10Accuracy=0.7203, over 13266.00 frames. ], tot_loss[loss=2.885, ArTop10Accuracy=0.735, over 11751.66 frames. ], batch size: 28, lr: 9.15e-03 +2024-08-06 05:33:03,043 INFO [trainer.py:765] (4/8) Epoch 13, batch 1000, train_loss[loss=2.788, ArTop10Accuracy=0.7514, over 13053.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.7346, over 11941.59 frames. ], batch size: 27, lr: 9.13e-03 +2024-08-06 05:33:34,233 INFO [trainer.py:765] (4/8) Epoch 13, batch 1100, train_loss[loss=2.952, ArTop10Accuracy=0.7256, over 13553.00 frames. ], tot_loss[loss=2.893, ArTop10Accuracy=0.7333, over 11999.98 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 05:34:04,519 INFO [trainer.py:765] (4/8) Epoch 13, batch 1200, train_loss[loss=3.004, ArTop10Accuracy=0.7117, over 11857.00 frames. ], tot_loss[loss=2.893, ArTop10Accuracy=0.7334, over 11927.37 frames. ], batch size: 97, lr: 9.07e-03 +2024-08-06 05:34:30,148 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 05:35:39,197 INFO [trainer.py:765] (4/8) Epoch 14, batch 100, train_loss[loss=2.914, ArTop10Accuracy=0.7354, over 14784.00 frames. ], tot_loss[loss=2.862, ArTop10Accuracy=0.7397, over 4792.52 frames. ], batch size: 61, lr: 8.71e-03 +2024-08-06 05:36:23,062 INFO [trainer.py:765] (4/8) Epoch 14, batch 200, train_loss[loss=2.873, ArTop10Accuracy=0.7373, over 13689.00 frames. ], tot_loss[loss=2.859, ArTop10Accuracy=0.7407, over 7786.48 frames. ], batch size: 34, lr: 8.68e-03 +2024-08-06 05:37:09,309 INFO [trainer.py:765] (4/8) Epoch 14, batch 300, train_loss[loss=2.914, ArTop10Accuracy=0.7293, over 14205.00 frames. ], tot_loss[loss=2.862, ArTop10Accuracy=0.7405, over 9433.01 frames. ], batch size: 44, lr: 8.66e-03 +2024-08-06 05:37:46,030 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.304e+02 1.410e+02 1.531e+02 2.912e+02, threshold=2.820e+02, percent-clipped=0.2 +2024-08-06 05:37:55,138 INFO [trainer.py:765] (4/8) Epoch 14, batch 400, train_loss[loss=2.744, ArTop10Accuracy=0.7562, over 10774.00 frames. ], tot_loss[loss=2.853, ArTop10Accuracy=0.7417, over 10353.48 frames. ], batch size: 15, lr: 8.64e-03 +2024-08-06 05:38:42,024 INFO [trainer.py:765] (4/8) Epoch 14, batch 500, train_loss[loss=2.787, ArTop10Accuracy=0.7473, over 12525.00 frames. ], tot_loss[loss=2.858, ArTop10Accuracy=0.7404, over 10925.69 frames. ], batch size: 22, lr: 8.61e-03 +2024-08-06 05:39:22,373 INFO [trainer.py:765] (4/8) Epoch 14, batch 600, train_loss[loss=2.902, ArTop10Accuracy=0.7255, over 11670.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.7391, over 11447.81 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 05:40:15,143 INFO [trainer.py:765] (4/8) Epoch 14, batch 700, train_loss[loss=2.72, ArTop10Accuracy=0.761, over 10090.00 frames. ], tot_loss[loss=2.869, ArTop10Accuracy=0.738, over 11586.10 frames. ], batch size: 12, lr: 8.57e-03 +2024-08-06 05:40:49,134 INFO [trainer.py:765] (4/8) Epoch 14, batch 800, train_loss[loss=2.656, ArTop10Accuracy=0.7718, over 10274.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7375, over 11691.23 frames. ], batch size: 12, lr: 8.55e-03 +2024-08-06 05:41:20,466 INFO [trainer.py:765] (4/8) Epoch 14, batch 900, train_loss[loss=2.849, ArTop10Accuracy=0.7388, over 12757.00 frames. ], tot_loss[loss=2.866, ArTop10Accuracy=0.7387, over 11734.65 frames. ], batch size: 27, lr: 8.52e-03 +2024-08-06 05:41:51,995 INFO [trainer.py:765] (4/8) Epoch 14, batch 1000, train_loss[loss=2.883, ArTop10Accuracy=0.7369, over 12862.00 frames. ], tot_loss[loss=2.876, ArTop10Accuracy=0.737, over 11936.11 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 05:42:23,216 INFO [trainer.py:765] (4/8) Epoch 14, batch 1100, train_loss[loss=2.931, ArTop10Accuracy=0.7257, over 14038.00 frames. ], tot_loss[loss=2.884, ArTop10Accuracy=0.7356, over 12006.76 frames. ], batch size: 35, lr: 8.48e-03 +2024-08-06 05:42:53,548 INFO [trainer.py:765] (4/8) Epoch 14, batch 1200, train_loss[loss=3.039, ArTop10Accuracy=0.7046, over 12044.00 frames. ], tot_loss[loss=2.886, ArTop10Accuracy=0.7349, over 11944.72 frames. ], batch size: 98, lr: 8.46e-03 +2024-08-06 05:43:18,087 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 05:44:28,571 INFO [trainer.py:765] (4/8) Epoch 15, batch 100, train_loss[loss=2.97, ArTop10Accuracy=0.7199, over 14461.00 frames. ], tot_loss[loss=2.86, ArTop10Accuracy=0.7413, over 4763.63 frames. ], batch size: 61, lr: 8.14e-03 +2024-08-06 05:44:29,213 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 05:44:38,024 INFO [trainer.py:811] (4/8) Epoch 15, validation: loss=2.913, ArTop10Accuracy=0.7339, over 1829298.00 frames. +2024-08-06 05:44:38,024 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 33238MB +2024-08-06 05:44:38,413 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.307e+02 1.417e+02 1.528e+02 2.981e+02, threshold=2.833e+02, percent-clipped=0.1 +2024-08-06 05:45:20,185 INFO [trainer.py:765] (4/8) Epoch 15, batch 200, train_loss[loss=2.787, ArTop10Accuracy=0.757, over 13880.00 frames. ], tot_loss[loss=2.852, ArTop10Accuracy=0.7426, over 7772.12 frames. ], batch size: 34, lr: 8.11e-03 +2024-08-06 05:46:04,648 INFO [trainer.py:765] (4/8) Epoch 15, batch 300, train_loss[loss=2.902, ArTop10Accuracy=0.7249, over 14280.00 frames. ], tot_loss[loss=2.851, ArTop10Accuracy=0.7426, over 9415.78 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 05:46:51,904 INFO [trainer.py:765] (4/8) Epoch 15, batch 400, train_loss[loss=2.692, ArTop10Accuracy=0.7702, over 10831.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7435, over 10349.33 frames. ], batch size: 15, lr: 8.07e-03 +2024-08-06 05:47:36,911 INFO [trainer.py:765] (4/8) Epoch 15, batch 500, train_loss[loss=2.804, ArTop10Accuracy=0.7557, over 12268.00 frames. ], tot_loss[loss=2.842, ArTop10Accuracy=0.7434, over 10917.90 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 05:48:24,723 INFO [trainer.py:765] (4/8) Epoch 15, batch 600, train_loss[loss=2.857, ArTop10Accuracy=0.7341, over 11641.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7425, over 11450.41 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 05:49:11,855 INFO [trainer.py:765] (4/8) Epoch 15, batch 700, train_loss[loss=2.868, ArTop10Accuracy=0.7409, over 9931.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.7406, over 11580.75 frames. ], batch size: 12, lr: 8.01e-03 +2024-08-06 05:49:45,779 INFO [trainer.py:765] (4/8) Epoch 15, batch 800, train_loss[loss=2.734, ArTop10Accuracy=0.7718, over 10211.00 frames. ], tot_loss[loss=2.86, ArTop10Accuracy=0.7401, over 11689.06 frames. ], batch size: 12, lr: 7.99e-03 +2024-08-06 05:50:17,211 INFO [trainer.py:765] (4/8) Epoch 15, batch 900, train_loss[loss=2.873, ArTop10Accuracy=0.7339, over 12911.00 frames. ], tot_loss[loss=2.851, ArTop10Accuracy=0.7414, over 11735.84 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 05:50:48,829 INFO [trainer.py:765] (4/8) Epoch 15, batch 1000, train_loss[loss=2.654, ArTop10Accuracy=0.7785, over 13117.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7409, over 11930.10 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 05:51:20,069 INFO [trainer.py:765] (4/8) Epoch 15, batch 1100, train_loss[loss=2.803, ArTop10Accuracy=0.7572, over 13762.00 frames. ], tot_loss[loss=2.867, ArTop10Accuracy=0.7385, over 12001.61 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 05:51:23,515 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.337e+02 1.431e+02 1.541e+02 2.784e+02, threshold=2.862e+02, percent-clipped=0.0 +2024-08-06 05:51:53,082 INFO [trainer.py:765] (4/8) Epoch 15, batch 1200, train_loss[loss=2.988, ArTop10Accuracy=0.7164, over 12044.00 frames. ], tot_loss[loss=2.87, ArTop10Accuracy=0.7378, over 11955.03 frames. ], batch size: 97, lr: 7.91e-03 +2024-08-06 05:52:17,883 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 05:53:29,263 INFO [trainer.py:765] (4/8) Epoch 16, batch 100, train_loss[loss=2.902, ArTop10Accuracy=0.7419, over 15023.00 frames. ], tot_loss[loss=2.842, ArTop10Accuracy=0.7448, over 4782.46 frames. ], batch size: 61, lr: 7.63e-03 +2024-08-06 05:54:12,877 INFO [trainer.py:765] (4/8) Epoch 16, batch 200, train_loss[loss=2.861, ArTop10Accuracy=0.7354, over 13694.00 frames. ], tot_loss[loss=2.84, ArTop10Accuracy=0.745, over 7807.21 frames. ], batch size: 34, lr: 7.61e-03 +2024-08-06 05:54:59,737 INFO [trainer.py:765] (4/8) Epoch 16, batch 300, train_loss[loss=2.951, ArTop10Accuracy=0.7253, over 14130.00 frames. ], tot_loss[loss=2.834, ArTop10Accuracy=0.7461, over 9439.21 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 05:55:41,931 INFO [trainer.py:765] (4/8) Epoch 16, batch 400, train_loss[loss=2.679, ArTop10Accuracy=0.7733, over 10924.00 frames. ], tot_loss[loss=2.838, ArTop10Accuracy=0.7448, over 10333.05 frames. ], batch size: 15, lr: 7.58e-03 +2024-08-06 05:56:27,680 INFO [trainer.py:765] (4/8) Epoch 16, batch 500, train_loss[loss=2.912, ArTop10Accuracy=0.7288, over 12157.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7449, over 10905.38 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 05:57:12,440 INFO [trainer.py:765] (4/8) Epoch 16, batch 600, train_loss[loss=2.699, ArTop10Accuracy=0.7707, over 11768.00 frames. ], tot_loss[loss=2.839, ArTop10Accuracy=0.744, over 11431.19 frames. ], batch size: 18, lr: 7.54e-03 +2024-08-06 05:58:00,040 INFO [trainer.py:765] (4/8) Epoch 16, batch 700, train_loss[loss=2.786, ArTop10Accuracy=0.752, over 10125.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7422, over 11566.77 frames. ], batch size: 12, lr: 7.52e-03 +2024-08-06 05:58:34,024 INFO [trainer.py:765] (4/8) Epoch 16, batch 800, train_loss[loss=2.767, ArTop10Accuracy=0.7575, over 10167.00 frames. ], tot_loss[loss=2.853, ArTop10Accuracy=0.7416, over 11688.81 frames. ], batch size: 12, lr: 7.50e-03 +2024-08-06 05:58:41,568 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 05:58:50,426 INFO [trainer.py:811] (4/8) Epoch 16, validation: loss=2.915, ArTop10Accuracy=0.7338, over 1829298.00 frames. +2024-08-06 05:58:50,427 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 33238MB +2024-08-06 05:58:50,730 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.335e+02 1.445e+02 1.570e+02 3.252e+02, threshold=2.890e+02, percent-clipped=0.1 +2024-08-06 05:59:14,321 INFO [trainer.py:765] (4/8) Epoch 16, batch 900, train_loss[loss=2.802, ArTop10Accuracy=0.7577, over 12835.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7429, over 11726.97 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 05:59:45,915 INFO [trainer.py:765] (4/8) Epoch 16, batch 1000, train_loss[loss=2.798, ArTop10Accuracy=0.7568, over 12999.00 frames. ], tot_loss[loss=2.849, ArTop10Accuracy=0.7426, over 11948.45 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 06:00:17,091 INFO [trainer.py:765] (4/8) Epoch 16, batch 1100, train_loss[loss=2.849, ArTop10Accuracy=0.7447, over 13693.00 frames. ], tot_loss[loss=2.855, ArTop10Accuracy=0.7411, over 12004.57 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 06:00:47,463 INFO [trainer.py:765] (4/8) Epoch 16, batch 1200, train_loss[loss=3.004, ArTop10Accuracy=0.7118, over 13264.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.7405, over 11946.92 frames. ], batch size: 98, lr: 7.43e-03 +2024-08-06 06:01:12,467 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 06:02:27,259 INFO [trainer.py:765] (4/8) Epoch 17, batch 100, train_loss[loss=2.866, ArTop10Accuracy=0.7415, over 14469.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.7478, over 4786.35 frames. ], batch size: 61, lr: 7.18e-03 +2024-08-06 06:03:11,850 INFO [trainer.py:765] (4/8) Epoch 17, batch 200, train_loss[loss=2.892, ArTop10Accuracy=0.7381, over 14098.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7485, over 7793.44 frames. ], batch size: 35, lr: 7.17e-03 +2024-08-06 06:03:57,501 INFO [trainer.py:765] (4/8) Epoch 17, batch 300, train_loss[loss=2.908, ArTop10Accuracy=0.7312, over 14365.00 frames. ], tot_loss[loss=2.824, ArTop10Accuracy=0.7482, over 9434.75 frames. ], batch size: 44, lr: 7.15e-03 +2024-08-06 06:04:42,837 INFO [trainer.py:765] (4/8) Epoch 17, batch 400, train_loss[loss=2.747, ArTop10Accuracy=0.7647, over 10579.00 frames. ], tot_loss[loss=2.826, ArTop10Accuracy=0.7474, over 10361.54 frames. ], batch size: 14, lr: 7.13e-03 +2024-08-06 06:05:29,003 INFO [trainer.py:765] (4/8) Epoch 17, batch 500, train_loss[loss=2.809, ArTop10Accuracy=0.7488, over 12553.00 frames. ], tot_loss[loss=2.826, ArTop10Accuracy=0.7469, over 10905.70 frames. ], batch size: 23, lr: 7.12e-03 +2024-08-06 06:05:49,550 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.359e+02 1.445e+02 1.551e+02 2.741e+02, threshold=2.891e+02, percent-clipped=0.0 +2024-08-06 06:06:20,722 INFO [trainer.py:765] (4/8) Epoch 17, batch 600, train_loss[loss=2.815, ArTop10Accuracy=0.7519, over 11660.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.746, over 11441.25 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 06:07:04,693 INFO [trainer.py:765] (4/8) Epoch 17, batch 700, train_loss[loss=2.738, ArTop10Accuracy=0.771, over 10293.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7447, over 11580.38 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 06:07:44,895 INFO [trainer.py:765] (4/8) Epoch 17, batch 800, train_loss[loss=2.772, ArTop10Accuracy=0.7552, over 9968.00 frames. ], tot_loss[loss=2.839, ArTop10Accuracy=0.7441, over 11696.32 frames. ], batch size: 12, lr: 7.07e-03 +2024-08-06 06:08:16,383 INFO [trainer.py:765] (4/8) Epoch 17, batch 900, train_loss[loss=2.857, ArTop10Accuracy=0.7439, over 13384.00 frames. ], tot_loss[loss=2.834, ArTop10Accuracy=0.7448, over 11742.74 frames. ], batch size: 28, lr: 7.05e-03 +2024-08-06 06:08:47,994 INFO [trainer.py:765] (4/8) Epoch 17, batch 1000, train_loss[loss=2.774, ArTop10Accuracy=0.7556, over 13306.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7445, over 11941.86 frames. ], batch size: 27, lr: 7.04e-03 +2024-08-06 06:09:19,133 INFO [trainer.py:765] (4/8) Epoch 17, batch 1100, train_loss[loss=2.923, ArTop10Accuracy=0.7303, over 13604.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7429, over 12008.74 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 06:09:49,443 INFO [trainer.py:765] (4/8) Epoch 17, batch 1200, train_loss[loss=2.971, ArTop10Accuracy=0.7165, over 12541.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7426, over 11957.92 frames. ], batch size: 97, lr: 7.01e-03 +2024-08-06 06:10:14,192 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 06:11:23,101 INFO [trainer.py:765] (4/8) Epoch 18, batch 100, train_loss[loss=2.824, ArTop10Accuracy=0.7428, over 14456.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7484, over 4780.52 frames. ], batch size: 61, lr: 6.78e-03 +2024-08-06 06:12:16,259 INFO [trainer.py:765] (4/8) Epoch 18, batch 200, train_loss[loss=2.87, ArTop10Accuracy=0.7447, over 13597.00 frames. ], tot_loss[loss=2.813, ArTop10Accuracy=0.7498, over 7800.42 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 06:12:40,317 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 06:12:48,991 INFO [trainer.py:811] (4/8) Epoch 18, validation: loss=2.916, ArTop10Accuracy=0.7343, over 1829298.00 frames. +2024-08-06 06:12:48,992 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 33238MB +2024-08-06 06:12:49,335 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.377e+02 1.476e+02 1.588e+02 2.450e+02, threshold=2.952e+02, percent-clipped=0.0 +2024-08-06 06:13:07,116 INFO [trainer.py:765] (4/8) Epoch 18, batch 300, train_loss[loss=2.816, ArTop10Accuracy=0.7503, over 14516.00 frames. ], tot_loss[loss=2.809, ArTop10Accuracy=0.7505, over 9424.59 frames. ], batch size: 44, lr: 6.75e-03 +2024-08-06 06:13:54,096 INFO [trainer.py:765] (4/8) Epoch 18, batch 400, train_loss[loss=2.891, ArTop10Accuracy=0.7351, over 10161.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.751, over 10320.29 frames. ], batch size: 14, lr: 6.74e-03 +2024-08-06 06:14:38,487 INFO [trainer.py:765] (4/8) Epoch 18, batch 500, train_loss[loss=2.789, ArTop10Accuracy=0.7508, over 12243.00 frames. ], tot_loss[loss=2.801, ArTop10Accuracy=0.7519, over 10882.79 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 06:15:23,627 INFO [trainer.py:765] (4/8) Epoch 18, batch 600, train_loss[loss=2.767, ArTop10Accuracy=0.7578, over 11713.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.7493, over 11403.00 frames. ], batch size: 18, lr: 6.71e-03 +2024-08-06 06:16:17,341 INFO [trainer.py:765] (4/8) Epoch 18, batch 700, train_loss[loss=2.836, ArTop10Accuracy=0.7506, over 10410.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.7489, over 11550.81 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 06:16:51,428 INFO [trainer.py:765] (4/8) Epoch 18, batch 800, train_loss[loss=2.698, ArTop10Accuracy=0.7731, over 10095.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7476, over 11683.04 frames. ], batch size: 12, lr: 6.68e-03 +2024-08-06 06:17:22,912 INFO [trainer.py:765] (4/8) Epoch 18, batch 900, train_loss[loss=2.871, ArTop10Accuracy=0.7433, over 12882.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7473, over 11726.41 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 06:17:54,528 INFO [trainer.py:765] (4/8) Epoch 18, batch 1000, train_loss[loss=2.808, ArTop10Accuracy=0.7565, over 12953.00 frames. ], tot_loss[loss=2.826, ArTop10Accuracy=0.7468, over 11928.60 frames. ], batch size: 27, lr: 6.65e-03 +2024-08-06 06:18:25,662 INFO [trainer.py:765] (4/8) Epoch 18, batch 1100, train_loss[loss=2.871, ArTop10Accuracy=0.7413, over 13895.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7448, over 11988.56 frames. ], batch size: 35, lr: 6.64e-03 +2024-08-06 06:18:55,971 INFO [trainer.py:765] (4/8) Epoch 18, batch 1200, train_loss[loss=2.961, ArTop10Accuracy=0.7208, over 12786.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7445, over 11952.21 frames. ], batch size: 97, lr: 6.63e-03 +2024-08-06 06:19:19,163 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.387e+02 1.492e+02 1.607e+02 2.982e+02, threshold=2.983e+02, percent-clipped=0.1 +2024-08-06 06:19:23,497 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 06:20:29,729 INFO [trainer.py:765] (4/8) Epoch 19, batch 100, train_loss[loss=2.793, ArTop10Accuracy=0.7492, over 14350.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.7513, over 4780.65 frames. ], batch size: 61, lr: 6.43e-03 +2024-08-06 06:21:11,275 INFO [trainer.py:765] (4/8) Epoch 19, batch 200, train_loss[loss=2.742, ArTop10Accuracy=0.761, over 13768.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7522, over 7786.94 frames. ], batch size: 34, lr: 6.41e-03 +2024-08-06 06:21:56,079 INFO [trainer.py:765] (4/8) Epoch 19, batch 300, train_loss[loss=2.83, ArTop10Accuracy=0.7448, over 14055.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7517, over 9412.47 frames. ], batch size: 44, lr: 6.40e-03 +2024-08-06 06:22:36,013 INFO [trainer.py:765] (4/8) Epoch 19, batch 400, train_loss[loss=2.694, ArTop10Accuracy=0.778, over 10296.00 frames. ], tot_loss[loss=2.802, ArTop10Accuracy=0.7522, over 10326.23 frames. ], batch size: 14, lr: 6.39e-03 +2024-08-06 06:23:18,998 INFO [trainer.py:765] (4/8) Epoch 19, batch 500, train_loss[loss=2.769, ArTop10Accuracy=0.7601, over 12104.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7529, over 10892.66 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 06:24:03,685 INFO [trainer.py:765] (4/8) Epoch 19, batch 600, train_loss[loss=2.779, ArTop10Accuracy=0.7515, over 11587.00 frames. ], tot_loss[loss=2.802, ArTop10Accuracy=0.7518, over 11420.60 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 06:24:46,186 INFO [trainer.py:765] (4/8) Epoch 19, batch 700, train_loss[loss=2.725, ArTop10Accuracy=0.7759, over 10144.00 frames. ], tot_loss[loss=2.813, ArTop10Accuracy=0.7497, over 11561.76 frames. ], batch size: 12, lr: 6.35e-03 +2024-08-06 06:25:22,356 INFO [trainer.py:765] (4/8) Epoch 19, batch 800, train_loss[loss=2.807, ArTop10Accuracy=0.7435, over 9925.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.749, over 11666.59 frames. ], batch size: 12, lr: 6.33e-03 +2024-08-06 06:25:53,625 INFO [trainer.py:765] (4/8) Epoch 19, batch 900, train_loss[loss=2.755, ArTop10Accuracy=0.7641, over 12840.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.7497, over 11723.49 frames. ], batch size: 27, lr: 6.32e-03 +2024-08-06 06:26:21,773 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 06:26:30,765 INFO [trainer.py:811] (4/8) Epoch 19, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 06:26:30,765 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 33238MB +2024-08-06 06:26:31,053 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.416e+02 1.525e+02 1.662e+02 2.849e+02, threshold=3.050e+02, percent-clipped=0.0 +2024-08-06 06:26:34,030 INFO [trainer.py:765] (4/8) Epoch 19, batch 1000, train_loss[loss=2.764, ArTop10Accuracy=0.753, over 12956.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.7488, over 11930.35 frames. ], batch size: 27, lr: 6.31e-03 +2024-08-06 06:27:05,189 INFO [trainer.py:765] (4/8) Epoch 19, batch 1100, train_loss[loss=2.803, ArTop10Accuracy=0.7524, over 13621.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.7468, over 11993.90 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 06:27:35,453 INFO [trainer.py:765] (4/8) Epoch 19, batch 1200, train_loss[loss=2.963, ArTop10Accuracy=0.7257, over 12311.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7464, over 11940.68 frames. ], batch size: 98, lr: 6.28e-03 +2024-08-06 06:28:00,636 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 06:29:08,985 INFO [trainer.py:765] (4/8) Epoch 20, batch 100, train_loss[loss=2.816, ArTop10Accuracy=0.7457, over 14013.00 frames. ], tot_loss[loss=2.796, ArTop10Accuracy=0.7538, over 4798.63 frames. ], batch size: 61, lr: 6.10e-03 +2024-08-06 06:29:50,319 INFO [trainer.py:765] (4/8) Epoch 20, batch 200, train_loss[loss=2.789, ArTop10Accuracy=0.758, over 13533.00 frames. ], tot_loss[loss=2.791, ArTop10Accuracy=0.7548, over 7780.90 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 06:30:37,106 INFO [trainer.py:765] (4/8) Epoch 20, batch 300, train_loss[loss=2.871, ArTop10Accuracy=0.7379, over 14234.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7538, over 9409.96 frames. ], batch size: 44, lr: 6.08e-03 +2024-08-06 06:31:16,354 INFO [trainer.py:765] (4/8) Epoch 20, batch 400, train_loss[loss=2.674, ArTop10Accuracy=0.7735, over 11005.00 frames. ], tot_loss[loss=2.796, ArTop10Accuracy=0.7535, over 10335.45 frames. ], batch size: 15, lr: 6.07e-03 +2024-08-06 06:32:03,759 INFO [trainer.py:765] (4/8) Epoch 20, batch 500, train_loss[loss=2.781, ArTop10Accuracy=0.7579, over 12511.00 frames. ], tot_loss[loss=2.784, ArTop10Accuracy=0.7552, over 10916.03 frames. ], batch size: 22, lr: 6.05e-03 +2024-08-06 06:32:43,358 INFO [trainer.py:765] (4/8) Epoch 20, batch 600, train_loss[loss=2.757, ArTop10Accuracy=0.765, over 11461.00 frames. ], tot_loss[loss=2.792, ArTop10Accuracy=0.7538, over 11435.31 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 06:33:36,752 INFO [trainer.py:765] (4/8) Epoch 20, batch 700, train_loss[loss=2.77, ArTop10Accuracy=0.7605, over 9976.00 frames. ], tot_loss[loss=2.8, ArTop10Accuracy=0.7518, over 11573.42 frames. ], batch size: 12, lr: 6.03e-03 +2024-08-06 06:33:43,829 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.417e+02 1.526e+02 1.639e+02 3.791e+02, threshold=3.052e+02, percent-clipped=0.1 +2024-08-06 06:34:13,304 INFO [trainer.py:765] (4/8) Epoch 20, batch 800, train_loss[loss=2.664, ArTop10Accuracy=0.7766, over 10023.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7505, over 11695.94 frames. ], batch size: 12, lr: 6.02e-03 +2024-08-06 06:34:44,580 INFO [trainer.py:765] (4/8) Epoch 20, batch 900, train_loss[loss=2.768, ArTop10Accuracy=0.7583, over 13051.00 frames. ], tot_loss[loss=2.8, ArTop10Accuracy=0.7519, over 11734.50 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 06:35:16,139 INFO [trainer.py:765] (4/8) Epoch 20, batch 1000, train_loss[loss=2.848, ArTop10Accuracy=0.7418, over 12945.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7505, over 11946.05 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 06:35:47,215 INFO [trainer.py:765] (4/8) Epoch 20, batch 1100, train_loss[loss=2.881, ArTop10Accuracy=0.7405, over 13812.00 frames. ], tot_loss[loss=2.814, ArTop10Accuracy=0.749, over 11985.54 frames. ], batch size: 34, lr: 5.99e-03 +2024-08-06 06:36:17,439 INFO [trainer.py:765] (4/8) Epoch 20, batch 1200, train_loss[loss=2.913, ArTop10Accuracy=0.7331, over 12200.00 frames. ], tot_loss[loss=2.814, ArTop10Accuracy=0.7492, over 11945.43 frames. ], batch size: 99, lr: 5.97e-03 +2024-08-06 06:36:42,590 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 06:36:42,593 INFO [trainer.py:1069] (4/8) Done! diff --git a/libritts/log/log-train-2024-08-06-03-39-40-5 b/libritts/log/log-train-2024-08-06-03-39-40-5 new file mode 100644 index 0000000000000000000000000000000000000000..d9dbb9df6cbdd3378d98d533ffae0d38a67f25a7 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-39-40-5 @@ -0,0 +1,336 @@ +2024-08-06 03:39:40,357 INFO [trainer.py:870] (5/8) Training started +2024-08-06 03:39:40,358 INFO [trainer.py:889] (5/8) Device: cuda:5 +2024-08-06 03:39:40,358 INFO [trainer.py:890] (5/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:39:40,358 INFO [trainer.py:892] (5/8) About to create model +2024-08-06 03:39:41,131 INFO [trainer.py:899] (5/8) Number of model parameters: 367386628 +2024-08-06 03:39:41,936 INFO [trainer.py:914] (5/8) Using DDP +2024-08-06 03:39:43,994 INFO [datamodule.py:427] (5/8) About to get train cuts +2024-08-06 03:39:43,996 INFO [datamodule.py:434] (5/8) About to get dev cuts +2024-08-06 03:39:43,997 INFO [datamodule.py:292] (5/8) Disable SpecAugment +2024-08-06 03:39:43,997 INFO [datamodule.py:294] (5/8) About to create train dataset +2024-08-06 03:39:43,998 INFO [datamodule.py:323] (5/8) Using DynamicBucketingSampler +2024-08-06 03:39:44,608 INFO [datamodule.py:344] (5/8) About to create train dataloader +2024-08-06 03:39:44,608 INFO [datamodule.py:367] (5/8) About to create dev dataset +2024-08-06 03:39:44,938 INFO [datamodule.py:388] (5/8) About to create dev dataloader +2024-08-06 03:40:39,571 INFO [trainer.py:765] (5/8) Epoch 1, batch 100, train_loss[loss=4.067, ArTop10Accuracy=0.5153, over 14698.00 frames. ], tot_loss[loss=4.768, ArTop10Accuracy=0.3985, over 4803.94 frames. ], batch size: 61, lr: 2.25e-02 +2024-08-06 03:41:16,923 INFO [trainer.py:765] (5/8) Epoch 1, batch 200, train_loss[loss=3.914, ArTop10Accuracy=0.5413, over 13982.00 frames. ], tot_loss[loss=4.296, ArTop10Accuracy=0.4768, over 7819.22 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 03:41:57,951 INFO [trainer.py:765] (5/8) Epoch 1, batch 300, train_loss[loss=3.74, ArTop10Accuracy=0.5657, over 14595.00 frames. ], tot_loss[loss=4.092, ArTop10Accuracy=0.51, over 9441.11 frames. ], batch size: 44, lr: 3.00e-02 +2024-08-06 03:42:33,080 INFO [trainer.py:765] (5/8) Epoch 1, batch 400, train_loss[loss=3.832, ArTop10Accuracy=0.544, over 11085.00 frames. ], tot_loss[loss=3.943, ArTop10Accuracy=0.5348, over 10345.48 frames. ], batch size: 15, lr: 3.00e-02 +2024-08-06 03:43:11,272 INFO [trainer.py:765] (5/8) Epoch 1, batch 500, train_loss[loss=3.575, ArTop10Accuracy=0.6015, over 12279.00 frames. ], tot_loss[loss=3.827, ArTop10Accuracy=0.554, over 10905.73 frames. ], batch size: 22, lr: 2.99e-02 +2024-08-06 03:43:46,593 INFO [trainer.py:765] (5/8) Epoch 1, batch 600, train_loss[loss=3.53, ArTop10Accuracy=0.6034, over 11551.00 frames. ], tot_loss[loss=3.743, ArTop10Accuracy=0.5684, over 11432.17 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 03:44:27,899 INFO [trainer.py:765] (5/8) Epoch 1, batch 700, train_loss[loss=3.449, ArTop10Accuracy=0.6251, over 10225.00 frames. ], tot_loss[loss=3.684, ArTop10Accuracy=0.5786, over 11587.85 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 03:45:01,514 INFO [trainer.py:765] (5/8) Epoch 1, batch 800, train_loss[loss=3.53, ArTop10Accuracy=0.6113, over 10199.00 frames. ], tot_loss[loss=3.637, ArTop10Accuracy=0.5872, over 11703.53 frames. ], batch size: 12, lr: 2.98e-02 +2024-08-06 03:45:32,556 INFO [trainer.py:765] (5/8) Epoch 1, batch 900, train_loss[loss=3.565, ArTop10Accuracy=0.5989, over 12863.00 frames. ], tot_loss[loss=3.592, ArTop10Accuracy=0.5956, over 11754.75 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 03:46:03,648 INFO [trainer.py:765] (5/8) Epoch 1, batch 1000, train_loss[loss=3.418, ArTop10Accuracy=0.6271, over 13368.00 frames. ], tot_loss[loss=3.562, ArTop10Accuracy=0.6009, over 11949.64 frames. ], batch size: 28, lr: 2.97e-02 +2024-08-06 03:46:07,989 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 8.169e+01 1.565e+02 2.239e+02 3.485e+02 9.105e+03, threshold=4.478e+02, percent-clipped=0.0 +2024-08-06 03:46:38,611 INFO [trainer.py:765] (5/8) Epoch 1, batch 1100, train_loss[loss=3.506, ArTop10Accuracy=0.6104, over 13606.00 frames. ], tot_loss[loss=3.532, ArTop10Accuracy=0.6068, over 12001.98 frames. ], batch size: 34, lr: 2.96e-02 +2024-08-06 03:47:08,744 INFO [trainer.py:765] (5/8) Epoch 1, batch 1200, train_loss[loss=3.554, ArTop10Accuracy=0.6072, over 12768.00 frames. ], tot_loss[loss=3.504, ArTop10Accuracy=0.6124, over 11966.10 frames. ], batch size: 99, lr: 2.96e-02 +2024-08-06 03:47:33,831 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 03:48:38,676 INFO [trainer.py:765] (5/8) Epoch 2, batch 100, train_loss[loss=3.509, ArTop10Accuracy=0.6145, over 15383.00 frames. ], tot_loss[loss=3.462, ArTop10Accuracy=0.6208, over 4782.40 frames. ], batch size: 62, lr: 2.90e-02 +2024-08-06 03:49:14,597 INFO [trainer.py:765] (5/8) Epoch 2, batch 200, train_loss[loss=3.493, ArTop10Accuracy=0.6161, over 13591.00 frames. ], tot_loss[loss=3.439, ArTop10Accuracy=0.6244, over 7778.19 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 03:49:56,519 INFO [trainer.py:765] (5/8) Epoch 2, batch 300, train_loss[loss=3.494, ArTop10Accuracy=0.6171, over 14088.00 frames. ], tot_loss[loss=3.421, ArTop10Accuracy=0.6282, over 9400.23 frames. ], batch size: 44, lr: 2.89e-02 +2024-08-06 03:50:31,999 INFO [trainer.py:765] (5/8) Epoch 2, batch 400, train_loss[loss=3.269, ArTop10Accuracy=0.6539, over 10404.00 frames. ], tot_loss[loss=3.42, ArTop10Accuracy=0.6282, over 10326.14 frames. ], batch size: 14, lr: 2.88e-02 +2024-08-06 03:51:17,109 INFO [trainer.py:765] (5/8) Epoch 2, batch 500, train_loss[loss=3.403, ArTop10Accuracy=0.6296, over 12285.00 frames. ], tot_loss[loss=3.405, ArTop10Accuracy=0.631, over 10906.93 frames. ], batch size: 22, lr: 2.87e-02 +2024-08-06 03:51:53,202 INFO [trainer.py:765] (5/8) Epoch 2, batch 600, train_loss[loss=3.382, ArTop10Accuracy=0.6347, over 11612.00 frames. ], tot_loss[loss=3.403, ArTop10Accuracy=0.6314, over 11426.21 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 03:52:38,993 INFO [trainer.py:765] (5/8) Epoch 2, batch 700, train_loss[loss=3.322, ArTop10Accuracy=0.6408, over 10116.00 frames. ], tot_loss[loss=3.396, ArTop10Accuracy=0.633, over 11590.48 frames. ], batch size: 12, lr: 2.85e-02 +2024-08-06 03:52:47,090 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 03:52:56,023 INFO [trainer.py:811] (5/8) Epoch 2, validation: loss=3.327, ArTop10Accuracy=0.6492, over 1829298.00 frames. +2024-08-06 03:52:56,024 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33247MB +2024-08-06 03:52:56,541 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 8.181e+01 1.431e+02 1.849e+02 2.730e+02 2.344e+03, threshold=3.697e+02, percent-clipped=7.2 +2024-08-06 03:53:21,881 INFO [trainer.py:765] (5/8) Epoch 2, batch 800, train_loss[loss=3.297, ArTop10Accuracy=0.6453, over 9991.00 frames. ], tot_loss[loss=3.384, ArTop10Accuracy=0.6354, over 11690.37 frames. ], batch size: 12, lr: 2.84e-02 +2024-08-06 03:53:53,299 INFO [trainer.py:765] (5/8) Epoch 2, batch 900, train_loss[loss=3.489, ArTop10Accuracy=0.6104, over 13175.00 frames. ], tot_loss[loss=3.371, ArTop10Accuracy=0.6379, over 11741.36 frames. ], batch size: 27, lr: 2.83e-02 +2024-08-06 03:54:24,809 INFO [trainer.py:765] (5/8) Epoch 2, batch 1000, train_loss[loss=3.344, ArTop10Accuracy=0.6403, over 13207.00 frames. ], tot_loss[loss=3.37, ArTop10Accuracy=0.6383, over 11955.27 frames. ], batch size: 28, lr: 2.82e-02 +2024-08-06 03:54:56,006 INFO [trainer.py:765] (5/8) Epoch 2, batch 1100, train_loss[loss=3.354, ArTop10Accuracy=0.6401, over 13620.00 frames. ], tot_loss[loss=3.363, ArTop10Accuracy=0.6398, over 12005.23 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 03:55:26,229 INFO [trainer.py:765] (5/8) Epoch 2, batch 1200, train_loss[loss=3.358, ArTop10Accuracy=0.6407, over 12633.00 frames. ], tot_loss[loss=3.351, ArTop10Accuracy=0.6421, over 11951.95 frames. ], batch size: 99, lr: 2.80e-02 +2024-08-06 03:55:51,006 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 03:57:04,102 INFO [trainer.py:765] (5/8) Epoch 3, batch 100, train_loss[loss=3.372, ArTop10Accuracy=0.637, over 14379.00 frames. ], tot_loss[loss=3.305, ArTop10Accuracy=0.6514, over 4797.26 frames. ], batch size: 61, lr: 2.67e-02 +2024-08-06 03:57:50,980 INFO [trainer.py:765] (5/8) Epoch 3, batch 200, train_loss[loss=3.295, ArTop10Accuracy=0.6549, over 13611.00 frames. ], tot_loss[loss=3.296, ArTop10Accuracy=0.6531, over 7794.13 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 03:58:26,074 INFO [trainer.py:765] (5/8) Epoch 3, batch 300, train_loss[loss=3.283, ArTop10Accuracy=0.6571, over 14285.00 frames. ], tot_loss[loss=3.277, ArTop10Accuracy=0.6565, over 9418.33 frames. ], batch size: 44, lr: 2.64e-02 +2024-08-06 03:59:11,254 INFO [trainer.py:765] (5/8) Epoch 3, batch 400, train_loss[loss=3.251, ArTop10Accuracy=0.6608, over 10088.00 frames. ], tot_loss[loss=3.266, ArTop10Accuracy=0.659, over 10326.14 frames. ], batch size: 14, lr: 2.63e-02 +2024-08-06 03:59:29,675 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 8.720e+01 1.461e+02 1.775e+02 2.344e+02 9.150e+02, threshold=3.550e+02, percent-clipped=5.2 +2024-08-06 03:59:49,303 INFO [trainer.py:765] (5/8) Epoch 3, batch 500, train_loss[loss=3.14, ArTop10Accuracy=0.6808, over 12417.00 frames. ], tot_loss[loss=3.251, ArTop10Accuracy=0.6616, over 10905.55 frames. ], batch size: 22, lr: 2.62e-02 +2024-08-06 04:00:35,095 INFO [trainer.py:765] (5/8) Epoch 3, batch 600, train_loss[loss=3.259, ArTop10Accuracy=0.654, over 11574.00 frames. ], tot_loss[loss=3.239, ArTop10Accuracy=0.6639, over 11441.83 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 04:01:22,059 INFO [trainer.py:765] (5/8) Epoch 3, batch 700, train_loss[loss=3.175, ArTop10Accuracy=0.6775, over 9982.00 frames. ], tot_loss[loss=3.231, ArTop10Accuracy=0.6655, over 11581.37 frames. ], batch size: 12, lr: 2.60e-02 +2024-08-06 04:01:56,269 INFO [trainer.py:765] (5/8) Epoch 3, batch 800, train_loss[loss=3.022, ArTop10Accuracy=0.7074, over 10282.00 frames. ], tot_loss[loss=3.225, ArTop10Accuracy=0.6669, over 11692.55 frames. ], batch size: 12, lr: 2.59e-02 +2024-08-06 04:02:27,740 INFO [trainer.py:765] (5/8) Epoch 3, batch 900, train_loss[loss=3.11, ArTop10Accuracy=0.6884, over 12949.00 frames. ], tot_loss[loss=3.209, ArTop10Accuracy=0.6702, over 11726.02 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 04:02:59,284 INFO [trainer.py:765] (5/8) Epoch 3, batch 1000, train_loss[loss=3.151, ArTop10Accuracy=0.6834, over 12768.00 frames. ], tot_loss[loss=3.195, ArTop10Accuracy=0.6728, over 11939.30 frames. ], batch size: 27, lr: 2.56e-02 +2024-08-06 04:03:30,942 INFO [trainer.py:765] (5/8) Epoch 3, batch 1100, train_loss[loss=3.21, ArTop10Accuracy=0.6757, over 13656.00 frames. ], tot_loss[loss=3.19, ArTop10Accuracy=0.6739, over 11995.70 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 04:04:01,313 INFO [trainer.py:765] (5/8) Epoch 3, batch 1200, train_loss[loss=3.201, ArTop10Accuracy=0.6668, over 11681.00 frames. ], tot_loss[loss=3.182, ArTop10Accuracy=0.6754, over 11931.92 frames. ], batch size: 99, lr: 2.54e-02 +2024-08-06 04:04:26,820 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 04:05:43,368 INFO [trainer.py:765] (5/8) Epoch 4, batch 100, train_loss[loss=3.204, ArTop10Accuracy=0.6698, over 14500.00 frames. ], tot_loss[loss=3.138, ArTop10Accuracy=0.6847, over 4766.81 frames. ], batch size: 61, lr: 2.38e-02 +2024-08-06 04:06:07,077 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 04:06:16,404 INFO [trainer.py:811] (5/8) Epoch 4, validation: loss=3.063, ArTop10Accuracy=0.7031, over 1829298.00 frames. +2024-08-06 04:06:16,405 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33247MB +2024-08-06 04:06:16,746 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.493e+02 1.709e+02 2.068e+02 7.969e+02, threshold=3.418e+02, percent-clipped=2.9 +2024-08-06 04:06:31,826 INFO [trainer.py:765] (5/8) Epoch 4, batch 200, train_loss[loss=3.068, ArTop10Accuracy=0.7023, over 13915.00 frames. ], tot_loss[loss=3.12, ArTop10Accuracy=0.6881, over 7780.21 frames. ], batch size: 34, lr: 2.37e-02 +2024-08-06 04:07:18,545 INFO [trainer.py:765] (5/8) Epoch 4, batch 300, train_loss[loss=3.182, ArTop10Accuracy=0.6698, over 14364.00 frames. ], tot_loss[loss=3.116, ArTop10Accuracy=0.6888, over 9416.98 frames. ], batch size: 44, lr: 2.36e-02 +2024-08-06 04:08:01,910 INFO [trainer.py:765] (5/8) Epoch 4, batch 400, train_loss[loss=3.235, ArTop10Accuracy=0.6675, over 10712.00 frames. ], tot_loss[loss=3.112, ArTop10Accuracy=0.6896, over 10331.30 frames. ], batch size: 15, lr: 2.34e-02 +2024-08-06 04:08:45,345 INFO [trainer.py:765] (5/8) Epoch 4, batch 500, train_loss[loss=3.071, ArTop10Accuracy=0.6985, over 12318.00 frames. ], tot_loss[loss=3.103, ArTop10Accuracy=0.6908, over 10905.47 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 04:09:37,072 INFO [trainer.py:765] (5/8) Epoch 4, batch 600, train_loss[loss=2.998, ArTop10Accuracy=0.7101, over 11544.00 frames. ], tot_loss[loss=3.106, ArTop10Accuracy=0.6899, over 11446.33 frames. ], batch size: 18, lr: 2.32e-02 +2024-08-06 04:10:13,502 INFO [trainer.py:765] (5/8) Epoch 4, batch 700, train_loss[loss=3.127, ArTop10Accuracy=0.6854, over 10712.00 frames. ], tot_loss[loss=3.109, ArTop10Accuracy=0.6895, over 11597.86 frames. ], batch size: 13, lr: 2.31e-02 +2024-08-06 04:10:51,960 INFO [trainer.py:765] (5/8) Epoch 4, batch 800, train_loss[loss=3.118, ArTop10Accuracy=0.6844, over 10125.00 frames. ], tot_loss[loss=3.112, ArTop10Accuracy=0.6893, over 11711.35 frames. ], batch size: 12, lr: 2.30e-02 +2024-08-06 04:11:23,331 INFO [trainer.py:765] (5/8) Epoch 4, batch 900, train_loss[loss=3.065, ArTop10Accuracy=0.6956, over 12823.00 frames. ], tot_loss[loss=3.101, ArTop10Accuracy=0.6914, over 11744.91 frames. ], batch size: 27, lr: 2.29e-02 +2024-08-06 04:11:54,826 INFO [trainer.py:765] (5/8) Epoch 4, batch 1000, train_loss[loss=3.113, ArTop10Accuracy=0.695, over 12969.00 frames. ], tot_loss[loss=3.108, ArTop10Accuracy=0.6902, over 11929.79 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 04:12:25,960 INFO [trainer.py:765] (5/8) Epoch 4, batch 1100, train_loss[loss=3.165, ArTop10Accuracy=0.6787, over 13719.00 frames. ], tot_loss[loss=3.106, ArTop10Accuracy=0.6906, over 11981.14 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 04:12:48,544 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.106e+02 1.440e+02 1.608e+02 1.893e+02 7.925e+02, threshold=3.216e+02, percent-clipped=2.0 +2024-08-06 04:12:58,828 INFO [trainer.py:765] (5/8) Epoch 4, batch 1200, train_loss[loss=3.153, ArTop10Accuracy=0.6798, over 13163.00 frames. ], tot_loss[loss=3.105, ArTop10Accuracy=0.691, over 11930.69 frames. ], batch size: 99, lr: 2.25e-02 +2024-08-06 04:13:24,395 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 04:14:38,685 INFO [trainer.py:765] (5/8) Epoch 5, batch 100, train_loss[loss=3.103, ArTop10Accuracy=0.6941, over 14277.00 frames. ], tot_loss[loss=3.071, ArTop10Accuracy=0.6987, over 4775.62 frames. ], batch size: 61, lr: 2.10e-02 +2024-08-06 04:15:26,826 INFO [trainer.py:765] (5/8) Epoch 5, batch 200, train_loss[loss=3.121, ArTop10Accuracy=0.6934, over 13238.00 frames. ], tot_loss[loss=3.062, ArTop10Accuracy=0.6999, over 7787.15 frames. ], batch size: 33, lr: 2.09e-02 +2024-08-06 04:16:08,011 INFO [trainer.py:765] (5/8) Epoch 5, batch 300, train_loss[loss=3.047, ArTop10Accuracy=0.6996, over 14418.00 frames. ], tot_loss[loss=3.052, ArTop10Accuracy=0.7016, over 9417.09 frames. ], batch size: 45, lr: 2.08e-02 +2024-08-06 04:16:53,133 INFO [trainer.py:765] (5/8) Epoch 5, batch 400, train_loss[loss=2.983, ArTop10Accuracy=0.718, over 10772.00 frames. ], tot_loss[loss=3.051, ArTop10Accuracy=0.7018, over 10324.21 frames. ], batch size: 15, lr: 2.07e-02 +2024-08-06 04:17:36,638 INFO [trainer.py:765] (5/8) Epoch 5, batch 500, train_loss[loss=3.097, ArTop10Accuracy=0.699, over 12047.00 frames. ], tot_loss[loss=3.052, ArTop10Accuracy=0.7012, over 10893.96 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 04:18:22,114 INFO [trainer.py:765] (5/8) Epoch 5, batch 600, train_loss[loss=2.971, ArTop10Accuracy=0.7173, over 11561.00 frames. ], tot_loss[loss=3.049, ArTop10Accuracy=0.7017, over 11426.06 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 04:19:17,032 INFO [trainer.py:765] (5/8) Epoch 5, batch 700, train_loss[loss=2.911, ArTop10Accuracy=0.7209, over 10102.00 frames. ], tot_loss[loss=3.06, ArTop10Accuracy=0.6995, over 11566.59 frames. ], batch size: 12, lr: 2.04e-02 +2024-08-06 04:19:51,066 INFO [trainer.py:765] (5/8) Epoch 5, batch 800, train_loss[loss=3.042, ArTop10Accuracy=0.7093, over 9885.00 frames. ], tot_loss[loss=3.061, ArTop10Accuracy=0.6991, over 11673.70 frames. ], batch size: 12, lr: 2.03e-02 +2024-08-06 04:20:18,214 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 04:20:27,475 INFO [trainer.py:811] (5/8) Epoch 5, validation: loss=2.998, ArTop10Accuracy=0.7157, over 1829298.00 frames. +2024-08-06 04:20:27,476 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33247MB +2024-08-06 04:20:27,781 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.057e+02 1.385e+02 1.542e+02 1.759e+02 7.741e+02, threshold=3.083e+02, percent-clipped=0.7 +2024-08-06 04:20:31,767 INFO [trainer.py:765] (5/8) Epoch 5, batch 900, train_loss[loss=3.013, ArTop10Accuracy=0.7121, over 13101.00 frames. ], tot_loss[loss=3.055, ArTop10Accuracy=0.7004, over 11734.70 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 04:21:03,305 INFO [trainer.py:765] (5/8) Epoch 5, batch 1000, train_loss[loss=3.14, ArTop10Accuracy=0.6786, over 12754.00 frames. ], tot_loss[loss=3.062, ArTop10Accuracy=0.6993, over 11949.56 frames. ], batch size: 27, lr: 2.01e-02 +2024-08-06 04:21:34,452 INFO [trainer.py:765] (5/8) Epoch 5, batch 1100, train_loss[loss=3.189, ArTop10Accuracy=0.6789, over 13715.00 frames. ], tot_loss[loss=3.066, ArTop10Accuracy=0.6984, over 11984.84 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 04:22:04,752 INFO [trainer.py:765] (5/8) Epoch 5, batch 1200, train_loss[loss=3.185, ArTop10Accuracy=0.6753, over 11743.00 frames. ], tot_loss[loss=3.063, ArTop10Accuracy=0.6992, over 11930.58 frames. ], batch size: 97, lr: 1.99e-02 +2024-08-06 04:22:30,423 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 04:23:46,282 INFO [trainer.py:765] (5/8) Epoch 6, batch 100, train_loss[loss=3.06, ArTop10Accuracy=0.7028, over 14806.00 frames. ], tot_loss[loss=3.029, ArTop10Accuracy=0.7069, over 4774.08 frames. ], batch size: 61, lr: 1.85e-02 +2024-08-06 04:24:35,255 INFO [trainer.py:765] (5/8) Epoch 6, batch 200, train_loss[loss=3.022, ArTop10Accuracy=0.705, over 13776.00 frames. ], tot_loss[loss=3.015, ArTop10Accuracy=0.7093, over 7781.68 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 04:25:16,676 INFO [trainer.py:765] (5/8) Epoch 6, batch 300, train_loss[loss=2.984, ArTop10Accuracy=0.7149, over 14326.00 frames. ], tot_loss[loss=3.01, ArTop10Accuracy=0.7101, over 9419.02 frames. ], batch size: 44, lr: 1.83e-02 +2024-08-06 04:26:08,924 INFO [trainer.py:765] (5/8) Epoch 6, batch 400, train_loss[loss=2.891, ArTop10Accuracy=0.7361, over 10355.00 frames. ], tot_loss[loss=3.008, ArTop10Accuracy=0.7106, over 10336.18 frames. ], batch size: 14, lr: 1.83e-02 +2024-08-06 04:26:51,485 INFO [trainer.py:765] (5/8) Epoch 6, batch 500, train_loss[loss=2.96, ArTop10Accuracy=0.7243, over 12225.00 frames. ], tot_loss[loss=3.01, ArTop10Accuracy=0.71, over 10905.80 frames. ], batch size: 22, lr: 1.82e-02 +2024-08-06 04:27:39,298 INFO [trainer.py:765] (5/8) Epoch 6, batch 600, train_loss[loss=3.017, ArTop10Accuracy=0.7087, over 11651.00 frames. ], tot_loss[loss=3.009, ArTop10Accuracy=0.7098, over 11422.13 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 04:27:46,370 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.343e+02 1.474e+02 1.660e+02 8.574e+02, threshold=2.947e+02, percent-clipped=0.6 +2024-08-06 04:28:33,240 INFO [trainer.py:765] (5/8) Epoch 6, batch 700, train_loss[loss=3.031, ArTop10Accuracy=0.6912, over 10022.00 frames. ], tot_loss[loss=3.021, ArTop10Accuracy=0.7076, over 11562.47 frames. ], batch size: 12, lr: 1.80e-02 +2024-08-06 04:29:11,216 INFO [trainer.py:765] (5/8) Epoch 6, batch 800, train_loss[loss=2.88, ArTop10Accuracy=0.7336, over 9392.00 frames. ], tot_loss[loss=3.027, ArTop10Accuracy=0.7067, over 11663.92 frames. ], batch size: 11, lr: 1.79e-02 +2024-08-06 04:29:42,752 INFO [trainer.py:765] (5/8) Epoch 6, batch 900, train_loss[loss=2.986, ArTop10Accuracy=0.7112, over 12830.00 frames. ], tot_loss[loss=3.017, ArTop10Accuracy=0.7085, over 11716.20 frames. ], batch size: 27, lr: 1.78e-02 +2024-08-06 04:30:14,306 INFO [trainer.py:765] (5/8) Epoch 6, batch 1000, train_loss[loss=2.923, ArTop10Accuracy=0.7397, over 12980.00 frames. ], tot_loss[loss=3.021, ArTop10Accuracy=0.7075, over 11927.22 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 04:30:45,384 INFO [trainer.py:765] (5/8) Epoch 6, batch 1100, train_loss[loss=3.095, ArTop10Accuracy=0.6964, over 13609.00 frames. ], tot_loss[loss=3.026, ArTop10Accuracy=0.7068, over 11981.75 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 04:31:15,673 INFO [trainer.py:765] (5/8) Epoch 6, batch 1200, train_loss[loss=3.138, ArTop10Accuracy=0.6884, over 11948.00 frames. ], tot_loss[loss=3.026, ArTop10Accuracy=0.7065, over 11922.32 frames. ], batch size: 97, lr: 1.76e-02 +2024-08-06 04:31:40,725 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 04:32:52,405 INFO [trainer.py:765] (5/8) Epoch 7, batch 100, train_loss[loss=3.055, ArTop10Accuracy=0.704, over 14658.00 frames. ], tot_loss[loss=2.993, ArTop10Accuracy=0.7144, over 4807.38 frames. ], batch size: 61, lr: 1.64e-02 +2024-08-06 04:33:38,224 INFO [trainer.py:765] (5/8) Epoch 7, batch 200, train_loss[loss=2.968, ArTop10Accuracy=0.7192, over 13532.00 frames. ], tot_loss[loss=2.98, ArTop10Accuracy=0.7164, over 7806.01 frames. ], batch size: 34, lr: 1.64e-02 +2024-08-06 04:34:22,609 INFO [trainer.py:765] (5/8) Epoch 7, batch 300, train_loss[loss=3.067, ArTop10Accuracy=0.6953, over 14506.00 frames. ], tot_loss[loss=2.981, ArTop10Accuracy=0.7162, over 9431.21 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 04:34:36,847 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 04:34:45,808 INFO [trainer.py:811] (5/8) Epoch 7, validation: loss=2.963, ArTop10Accuracy=0.7233, over 1829298.00 frames. +2024-08-06 04:34:45,809 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33249MB +2024-08-06 04:34:46,125 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.306e+02 1.435e+02 1.599e+02 8.689e+02, threshold=2.871e+02, percent-clipped=0.9 +2024-08-06 04:35:17,146 INFO [trainer.py:765] (5/8) Epoch 7, batch 400, train_loss[loss=2.841, ArTop10Accuracy=0.7387, over 10467.00 frames. ], tot_loss[loss=2.978, ArTop10Accuracy=0.7164, over 10340.88 frames. ], batch size: 14, lr: 1.62e-02 +2024-08-06 04:36:01,711 INFO [trainer.py:765] (5/8) Epoch 7, batch 500, train_loss[loss=2.971, ArTop10Accuracy=0.715, over 12329.00 frames. ], tot_loss[loss=2.984, ArTop10Accuracy=0.7152, over 10907.27 frames. ], batch size: 22, lr: 1.61e-02 +2024-08-06 04:36:48,811 INFO [trainer.py:765] (5/8) Epoch 7, batch 600, train_loss[loss=2.926, ArTop10Accuracy=0.7292, over 11628.00 frames. ], tot_loss[loss=2.988, ArTop10Accuracy=0.714, over 11413.22 frames. ], batch size: 18, lr: 1.61e-02 +2024-08-06 04:37:34,800 INFO [trainer.py:765] (5/8) Epoch 7, batch 700, train_loss[loss=3.087, ArTop10Accuracy=0.6988, over 10024.00 frames. ], tot_loss[loss=2.994, ArTop10Accuracy=0.7128, over 11560.62 frames. ], batch size: 12, lr: 1.60e-02 +2024-08-06 04:38:13,613 INFO [trainer.py:765] (5/8) Epoch 7, batch 800, train_loss[loss=3.024, ArTop10Accuracy=0.7085, over 10156.00 frames. ], tot_loss[loss=3.002, ArTop10Accuracy=0.7113, over 11686.56 frames. ], batch size: 12, lr: 1.59e-02 +2024-08-06 04:38:45,110 INFO [trainer.py:765] (5/8) Epoch 7, batch 900, train_loss[loss=3.019, ArTop10Accuracy=0.7157, over 12818.00 frames. ], tot_loss[loss=2.995, ArTop10Accuracy=0.7129, over 11753.12 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 04:39:16,575 INFO [trainer.py:765] (5/8) Epoch 7, batch 1000, train_loss[loss=3.124, ArTop10Accuracy=0.6866, over 12760.00 frames. ], tot_loss[loss=2.997, ArTop10Accuracy=0.7125, over 11950.82 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 04:39:47,571 INFO [trainer.py:765] (5/8) Epoch 7, batch 1100, train_loss[loss=3.03, ArTop10Accuracy=0.705, over 13485.00 frames. ], tot_loss[loss=3.005, ArTop10Accuracy=0.7111, over 11978.12 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 04:40:17,990 INFO [trainer.py:765] (5/8) Epoch 7, batch 1200, train_loss[loss=3.173, ArTop10Accuracy=0.6773, over 12137.00 frames. ], tot_loss[loss=2.997, ArTop10Accuracy=0.7122, over 11932.50 frames. ], batch size: 97, lr: 1.57e-02 +2024-08-06 04:40:43,269 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 04:41:37,492 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 9.816e+01 1.295e+02 1.411e+02 1.574e+02 4.953e+02, threshold=2.821e+02, percent-clipped=1.1 +2024-08-06 04:41:58,371 INFO [trainer.py:765] (5/8) Epoch 8, batch 100, train_loss[loss=2.956, ArTop10Accuracy=0.7218, over 14565.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.7205, over 4781.31 frames. ], batch size: 61, lr: 1.47e-02 +2024-08-06 04:42:44,986 INFO [trainer.py:765] (5/8) Epoch 8, batch 200, train_loss[loss=2.915, ArTop10Accuracy=0.7326, over 13588.00 frames. ], tot_loss[loss=2.95, ArTop10Accuracy=0.7227, over 7779.99 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 04:43:28,045 INFO [trainer.py:765] (5/8) Epoch 8, batch 300, train_loss[loss=3.011, ArTop10Accuracy=0.7113, over 14521.00 frames. ], tot_loss[loss=2.947, ArTop10Accuracy=0.723, over 9412.24 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 04:44:14,461 INFO [trainer.py:765] (5/8) Epoch 8, batch 400, train_loss[loss=2.832, ArTop10Accuracy=0.7502, over 10926.00 frames. ], tot_loss[loss=2.947, ArTop10Accuracy=0.7225, over 10339.95 frames. ], batch size: 15, lr: 1.45e-02 +2024-08-06 04:45:00,691 INFO [trainer.py:765] (5/8) Epoch 8, batch 500, train_loss[loss=2.895, ArTop10Accuracy=0.7333, over 12150.00 frames. ], tot_loss[loss=2.951, ArTop10Accuracy=0.7217, over 10895.71 frames. ], batch size: 22, lr: 1.45e-02 +2024-08-06 04:45:45,393 INFO [trainer.py:765] (5/8) Epoch 8, batch 600, train_loss[loss=3.039, ArTop10Accuracy=0.712, over 11707.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7197, over 11435.04 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 04:46:34,037 INFO [trainer.py:765] (5/8) Epoch 8, batch 700, train_loss[loss=2.897, ArTop10Accuracy=0.7229, over 10093.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7184, over 11595.78 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 04:47:10,208 INFO [trainer.py:765] (5/8) Epoch 8, batch 800, train_loss[loss=2.983, ArTop10Accuracy=0.7092, over 10196.00 frames. ], tot_loss[loss=2.971, ArTop10Accuracy=0.7176, over 11709.49 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 04:47:41,606 INFO [trainer.py:765] (5/8) Epoch 8, batch 900, train_loss[loss=2.917, ArTop10Accuracy=0.7312, over 12961.00 frames. ], tot_loss[loss=2.964, ArTop10Accuracy=0.7189, over 11748.63 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:13,033 INFO [trainer.py:765] (5/8) Epoch 8, batch 1000, train_loss[loss=2.901, ArTop10Accuracy=0.7342, over 12912.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7186, over 11953.22 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:28,828 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 04:48:37,663 INFO [trainer.py:811] (5/8) Epoch 8, validation: loss=2.946, ArTop10Accuracy=0.7266, over 1829298.00 frames. +2024-08-06 04:48:37,664 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33249MB +2024-08-06 04:48:37,951 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.289e+02 1.393e+02 1.532e+02 3.557e+02, threshold=2.786e+02, percent-clipped=0.2 +2024-08-06 04:48:52,932 INFO [trainer.py:765] (5/8) Epoch 8, batch 1100, train_loss[loss=2.94, ArTop10Accuracy=0.7232, over 13753.00 frames. ], tot_loss[loss=2.968, ArTop10Accuracy=0.718, over 12000.96 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 04:49:23,202 INFO [trainer.py:765] (5/8) Epoch 8, batch 1200, train_loss[loss=3.118, ArTop10Accuracy=0.6896, over 12209.00 frames. ], tot_loss[loss=2.968, ArTop10Accuracy=0.7178, over 11948.11 frames. ], batch size: 97, lr: 1.40e-02 +2024-08-06 04:49:48,652 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 04:51:01,548 INFO [trainer.py:765] (5/8) Epoch 9, batch 100, train_loss[loss=3.038, ArTop10Accuracy=0.7066, over 14432.00 frames. ], tot_loss[loss=2.946, ArTop10Accuracy=0.7233, over 4777.24 frames. ], batch size: 61, lr: 1.32e-02 +2024-08-06 04:51:45,415 INFO [trainer.py:765] (5/8) Epoch 9, batch 200, train_loss[loss=3.004, ArTop10Accuracy=0.7104, over 13554.00 frames. ], tot_loss[loss=2.939, ArTop10Accuracy=0.7247, over 7781.26 frames. ], batch size: 34, lr: 1.32e-02 +2024-08-06 04:52:29,082 INFO [trainer.py:765] (5/8) Epoch 9, batch 300, train_loss[loss=2.969, ArTop10Accuracy=0.7192, over 13959.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.7254, over 9430.97 frames. ], batch size: 44, lr: 1.31e-02 +2024-08-06 04:53:16,431 INFO [trainer.py:765] (5/8) Epoch 9, batch 400, train_loss[loss=2.894, ArTop10Accuracy=0.7299, over 10772.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.7249, over 10347.47 frames. ], batch size: 15, lr: 1.31e-02 +2024-08-06 04:53:58,143 INFO [trainer.py:765] (5/8) Epoch 9, batch 500, train_loss[loss=3.015, ArTop10Accuracy=0.7047, over 12271.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.7244, over 10911.02 frames. ], batch size: 22, lr: 1.30e-02 +2024-08-06 04:54:51,077 INFO [trainer.py:765] (5/8) Epoch 9, batch 600, train_loss[loss=2.794, ArTop10Accuracy=0.7403, over 11511.00 frames. ], tot_loss[loss=2.94, ArTop10Accuracy=0.7237, over 11427.19 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 04:55:34,399 INFO [trainer.py:765] (5/8) Epoch 9, batch 700, train_loss[loss=2.959, ArTop10Accuracy=0.7114, over 10314.00 frames. ], tot_loss[loss=2.948, ArTop10Accuracy=0.7222, over 11569.37 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 04:56:04,575 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.257e+02 1.367e+02 1.507e+02 8.820e+02, threshold=2.735e+02, percent-clipped=0.5 +2024-08-06 04:56:13,598 INFO [trainer.py:765] (5/8) Epoch 9, batch 800, train_loss[loss=3.015, ArTop10Accuracy=0.7181, over 10057.00 frames. ], tot_loss[loss=2.95, ArTop10Accuracy=0.7216, over 11687.52 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 04:56:44,975 INFO [trainer.py:765] (5/8) Epoch 9, batch 900, train_loss[loss=3.023, ArTop10Accuracy=0.7055, over 12909.00 frames. ], tot_loss[loss=2.946, ArTop10Accuracy=0.7225, over 11724.74 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:16,492 INFO [trainer.py:765] (5/8) Epoch 9, batch 1000, train_loss[loss=3.026, ArTop10Accuracy=0.7122, over 13092.00 frames. ], tot_loss[loss=2.949, ArTop10Accuracy=0.722, over 11937.06 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:47,656 INFO [trainer.py:765] (5/8) Epoch 9, batch 1100, train_loss[loss=2.969, ArTop10Accuracy=0.7216, over 13605.00 frames. ], tot_loss[loss=2.956, ArTop10Accuracy=0.7207, over 11991.87 frames. ], batch size: 34, lr: 1.27e-02 +2024-08-06 04:58:18,093 INFO [trainer.py:765] (5/8) Epoch 9, batch 1200, train_loss[loss=3.079, ArTop10Accuracy=0.6953, over 11790.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7196, over 11919.85 frames. ], batch size: 98, lr: 1.27e-02 +2024-08-06 04:58:43,444 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 04:59:52,749 INFO [trainer.py:765] (5/8) Epoch 10, batch 100, train_loss[loss=3.014, ArTop10Accuracy=0.713, over 14680.00 frames. ], tot_loss[loss=2.931, ArTop10Accuracy=0.727, over 4772.51 frames. ], batch size: 61, lr: 1.20e-02 +2024-08-06 05:00:43,730 INFO [trainer.py:765] (5/8) Epoch 10, batch 200, train_loss[loss=2.92, ArTop10Accuracy=0.7368, over 13794.00 frames. ], tot_loss[loss=2.919, ArTop10Accuracy=0.7287, over 7789.41 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 05:01:20,591 INFO [trainer.py:765] (5/8) Epoch 10, batch 300, train_loss[loss=3.037, ArTop10Accuracy=0.7069, over 14572.00 frames. ], tot_loss[loss=2.915, ArTop10Accuracy=0.7292, over 9410.37 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 05:02:10,048 INFO [trainer.py:765] (5/8) Epoch 10, batch 400, train_loss[loss=2.877, ArTop10Accuracy=0.7329, over 10954.00 frames. ], tot_loss[loss=2.914, ArTop10Accuracy=0.7293, over 10344.37 frames. ], batch size: 15, lr: 1.19e-02 +2024-08-06 05:02:46,488 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 05:02:55,377 INFO [trainer.py:811] (5/8) Epoch 10, validation: loss=2.927, ArTop10Accuracy=0.7304, over 1829298.00 frames. +2024-08-06 05:02:55,378 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33249MB +2024-08-06 05:02:55,729 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.269e+02 1.367e+02 1.518e+02 4.405e+02, threshold=2.733e+02, percent-clipped=0.4 +2024-08-06 05:02:58,361 INFO [trainer.py:765] (5/8) Epoch 10, batch 500, train_loss[loss=2.897, ArTop10Accuracy=0.7318, over 12237.00 frames. ], tot_loss[loss=2.912, ArTop10Accuracy=0.7293, over 10910.35 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 05:03:48,229 INFO [trainer.py:765] (5/8) Epoch 10, batch 600, train_loss[loss=2.841, ArTop10Accuracy=0.7436, over 11691.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7277, over 11434.83 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 05:04:36,716 INFO [trainer.py:765] (5/8) Epoch 10, batch 700, train_loss[loss=2.945, ArTop10Accuracy=0.727, over 10092.00 frames. ], tot_loss[loss=2.927, ArTop10Accuracy=0.7264, over 11581.35 frames. ], batch size: 12, lr: 1.18e-02 +2024-08-06 05:05:10,725 INFO [trainer.py:765] (5/8) Epoch 10, batch 800, train_loss[loss=2.908, ArTop10Accuracy=0.7233, over 10079.00 frames. ], tot_loss[loss=2.932, ArTop10Accuracy=0.7252, over 11689.31 frames. ], batch size: 12, lr: 1.17e-02 +2024-08-06 05:05:42,245 INFO [trainer.py:765] (5/8) Epoch 10, batch 900, train_loss[loss=2.883, ArTop10Accuracy=0.7376, over 13043.00 frames. ], tot_loss[loss=2.926, ArTop10Accuracy=0.7264, over 11738.86 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 05:06:13,844 INFO [trainer.py:765] (5/8) Epoch 10, batch 1000, train_loss[loss=2.926, ArTop10Accuracy=0.7263, over 12952.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.7246, over 11940.80 frames. ], batch size: 27, lr: 1.16e-02 +2024-08-06 05:06:45,055 INFO [trainer.py:765] (5/8) Epoch 10, batch 1100, train_loss[loss=3.042, ArTop10Accuracy=0.7047, over 13760.00 frames. ], tot_loss[loss=2.938, ArTop10Accuracy=0.7243, over 12002.23 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 05:07:15,483 INFO [trainer.py:765] (5/8) Epoch 10, batch 1200, train_loss[loss=3.121, ArTop10Accuracy=0.6849, over 12120.00 frames. ], tot_loss[loss=2.94, ArTop10Accuracy=0.7243, over 11958.72 frames. ], batch size: 97, lr: 1.16e-02 +2024-08-06 05:07:40,694 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 05:08:52,967 INFO [trainer.py:765] (5/8) Epoch 11, batch 100, train_loss[loss=2.937, ArTop10Accuracy=0.7192, over 14567.00 frames. ], tot_loss[loss=2.905, ArTop10Accuracy=0.7318, over 4793.45 frames. ], batch size: 61, lr: 1.10e-02 +2024-08-06 05:09:41,277 INFO [trainer.py:765] (5/8) Epoch 11, batch 200, train_loss[loss=2.898, ArTop10Accuracy=0.7312, over 14043.00 frames. ], tot_loss[loss=2.9, ArTop10Accuracy=0.7327, over 7807.45 frames. ], batch size: 34, lr: 1.10e-02 +2024-08-06 05:09:51,176 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.278e+02 1.371e+02 1.502e+02 3.785e+02, threshold=2.743e+02, percent-clipped=0.3 +2024-08-06 05:10:24,721 INFO [trainer.py:765] (5/8) Epoch 11, batch 300, train_loss[loss=2.97, ArTop10Accuracy=0.7165, over 14339.00 frames. ], tot_loss[loss=2.898, ArTop10Accuracy=0.733, over 9424.25 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 05:11:11,784 INFO [trainer.py:765] (5/8) Epoch 11, batch 400, train_loss[loss=2.766, ArTop10Accuracy=0.7584, over 10857.00 frames. ], tot_loss[loss=2.902, ArTop10Accuracy=0.7318, over 10351.17 frames. ], batch size: 15, lr: 1.09e-02 +2024-08-06 05:11:52,692 INFO [trainer.py:765] (5/8) Epoch 11, batch 500, train_loss[loss=2.901, ArTop10Accuracy=0.7325, over 12308.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7328, over 10934.98 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 05:12:40,287 INFO [trainer.py:765] (5/8) Epoch 11, batch 600, train_loss[loss=2.842, ArTop10Accuracy=0.75, over 11632.00 frames. ], tot_loss[loss=2.9, ArTop10Accuracy=0.7319, over 11450.89 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 05:13:25,708 INFO [trainer.py:765] (5/8) Epoch 11, batch 700, train_loss[loss=3.059, ArTop10Accuracy=0.7049, over 10090.00 frames. ], tot_loss[loss=2.907, ArTop10Accuracy=0.7304, over 11613.90 frames. ], batch size: 12, lr: 1.08e-02 +2024-08-06 05:14:04,206 INFO [trainer.py:765] (5/8) Epoch 11, batch 800, train_loss[loss=2.711, ArTop10Accuracy=0.7611, over 10006.00 frames. ], tot_loss[loss=2.916, ArTop10Accuracy=0.7287, over 11724.83 frames. ], batch size: 12, lr: 1.07e-02 +2024-08-06 05:14:35,667 INFO [trainer.py:765] (5/8) Epoch 11, batch 900, train_loss[loss=2.932, ArTop10Accuracy=0.7206, over 13172.00 frames. ], tot_loss[loss=2.911, ArTop10Accuracy=0.7297, over 11748.89 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 05:15:07,263 INFO [trainer.py:765] (5/8) Epoch 11, batch 1000, train_loss[loss=2.941, ArTop10Accuracy=0.7212, over 12987.00 frames. ], tot_loss[loss=2.918, ArTop10Accuracy=0.728, over 11920.40 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 05:15:38,261 INFO [trainer.py:765] (5/8) Epoch 11, batch 1100, train_loss[loss=2.858, ArTop10Accuracy=0.7273, over 13739.00 frames. ], tot_loss[loss=2.923, ArTop10Accuracy=0.7271, over 11993.56 frames. ], batch size: 34, lr: 1.06e-02 +2024-08-06 05:16:08,498 INFO [trainer.py:765] (5/8) Epoch 11, batch 1200, train_loss[loss=3.114, ArTop10Accuracy=0.6951, over 11855.00 frames. ], tot_loss[loss=2.922, ArTop10Accuracy=0.7272, over 11930.92 frames. ], batch size: 97, lr: 1.06e-02 +2024-08-06 05:16:12,698 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 05:16:21,622 INFO [trainer.py:811] (5/8) Epoch 11, validation: loss=2.923, ArTop10Accuracy=0.7318, over 1829298.00 frames. +2024-08-06 05:16:21,623 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33249MB +2024-08-06 05:16:21,949 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.268e+02 1.368e+02 1.481e+02 4.790e+02, threshold=2.736e+02, percent-clipped=0.6 +2024-08-06 05:16:42,523 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 05:18:03,006 INFO [trainer.py:765] (5/8) Epoch 12, batch 100, train_loss[loss=2.966, ArTop10Accuracy=0.7207, over 14796.00 frames. ], tot_loss[loss=2.892, ArTop10Accuracy=0.7338, over 4788.81 frames. ], batch size: 61, lr: 1.01e-02 +2024-08-06 05:18:46,005 INFO [trainer.py:765] (5/8) Epoch 12, batch 200, train_loss[loss=2.89, ArTop10Accuracy=0.7343, over 13601.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.7347, over 7816.76 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 05:19:31,946 INFO [trainer.py:765] (5/8) Epoch 12, batch 300, train_loss[loss=2.991, ArTop10Accuracy=0.7133, over 14200.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.7348, over 9430.75 frames. ], batch size: 44, lr: 1.01e-02 +2024-08-06 05:20:12,431 INFO [trainer.py:765] (5/8) Epoch 12, batch 400, train_loss[loss=2.786, ArTop10Accuracy=0.7553, over 10949.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.7348, over 10368.71 frames. ], batch size: 15, lr: 1.00e-02 +2024-08-06 05:21:00,640 INFO [trainer.py:765] (5/8) Epoch 12, batch 500, train_loss[loss=2.895, ArTop10Accuracy=0.7366, over 12408.00 frames. ], tot_loss[loss=2.886, ArTop10Accuracy=0.7348, over 10915.69 frames. ], batch size: 22, lr: 9.99e-03 +2024-08-06 05:21:43,916 INFO [trainer.py:765] (5/8) Epoch 12, batch 600, train_loss[loss=2.869, ArTop10Accuracy=0.7357, over 11756.00 frames. ], tot_loss[loss=2.889, ArTop10Accuracy=0.7342, over 11437.24 frames. ], batch size: 18, lr: 9.96e-03 +2024-08-06 05:22:32,206 INFO [trainer.py:765] (5/8) Epoch 12, batch 700, train_loss[loss=2.898, ArTop10Accuracy=0.7291, over 9976.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7326, over 11586.51 frames. ], batch size: 12, lr: 9.93e-03 +2024-08-06 05:23:08,911 INFO [trainer.py:765] (5/8) Epoch 12, batch 800, train_loss[loss=2.991, ArTop10Accuracy=0.7164, over 9276.00 frames. ], tot_loss[loss=2.903, ArTop10Accuracy=0.7314, over 11706.98 frames. ], batch size: 11, lr: 9.90e-03 +2024-08-06 05:23:40,460 INFO [trainer.py:765] (5/8) Epoch 12, batch 900, train_loss[loss=2.842, ArTop10Accuracy=0.75, over 12874.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7326, over 11760.21 frames. ], batch size: 27, lr: 9.87e-03 +2024-08-06 05:23:54,577 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.273e+02 1.376e+02 1.503e+02 4.050e+02, threshold=2.752e+02, percent-clipped=0.4 +2024-08-06 05:24:14,346 INFO [trainer.py:765] (5/8) Epoch 12, batch 1000, train_loss[loss=2.907, ArTop10Accuracy=0.7301, over 13063.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7323, over 11944.31 frames. ], batch size: 27, lr: 9.84e-03 +2024-08-06 05:24:45,501 INFO [trainer.py:765] (5/8) Epoch 12, batch 1100, train_loss[loss=2.942, ArTop10Accuracy=0.7275, over 14028.00 frames. ], tot_loss[loss=2.908, ArTop10Accuracy=0.7303, over 11991.42 frames. ], batch size: 34, lr: 9.81e-03 +2024-08-06 05:25:15,882 INFO [trainer.py:765] (5/8) Epoch 12, batch 1200, train_loss[loss=3.043, ArTop10Accuracy=0.7045, over 11848.00 frames. ], tot_loss[loss=2.909, ArTop10Accuracy=0.7302, over 11935.06 frames. ], batch size: 97, lr: 9.78e-03 +2024-08-06 05:25:41,190 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 05:26:46,787 INFO [trainer.py:765] (5/8) Epoch 13, batch 100, train_loss[loss=2.986, ArTop10Accuracy=0.713, over 14626.00 frames. ], tot_loss[loss=2.874, ArTop10Accuracy=0.7377, over 4775.81 frames. ], batch size: 61, lr: 9.36e-03 +2024-08-06 05:27:32,553 INFO [trainer.py:765] (5/8) Epoch 13, batch 200, train_loss[loss=2.878, ArTop10Accuracy=0.7345, over 13751.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7379, over 7798.49 frames. ], batch size: 34, lr: 9.34e-03 +2024-08-06 05:28:16,036 INFO [trainer.py:765] (5/8) Epoch 13, batch 300, train_loss[loss=2.99, ArTop10Accuracy=0.7187, over 14290.00 frames. ], tot_loss[loss=2.863, ArTop10Accuracy=0.7397, over 9411.87 frames. ], batch size: 44, lr: 9.31e-03 +2024-08-06 05:29:00,149 INFO [trainer.py:765] (5/8) Epoch 13, batch 400, train_loss[loss=2.804, ArTop10Accuracy=0.7396, over 11075.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7381, over 10347.05 frames. ], batch size: 15, lr: 9.28e-03 +2024-08-06 05:29:43,967 INFO [trainer.py:765] (5/8) Epoch 13, batch 500, train_loss[loss=2.766, ArTop10Accuracy=0.7527, over 12172.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7376, over 10906.91 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 05:30:24,247 INFO [trainer.py:765] (5/8) Epoch 13, batch 600, train_loss[loss=2.868, ArTop10Accuracy=0.7391, over 11584.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7374, over 11413.24 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 05:30:58,110 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 05:31:07,054 INFO [trainer.py:811] (5/8) Epoch 13, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 05:31:07,055 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33249MB +2024-08-06 05:31:07,351 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.049e+02 1.283e+02 1.389e+02 1.496e+02 2.729e+02, threshold=2.779e+02, percent-clipped=0.0 +2024-08-06 05:31:24,043 INFO [trainer.py:765] (5/8) Epoch 13, batch 700, train_loss[loss=2.845, ArTop10Accuracy=0.7507, over 10064.00 frames. ], tot_loss[loss=2.884, ArTop10Accuracy=0.7352, over 11570.16 frames. ], batch size: 12, lr: 9.20e-03 +2024-08-06 05:32:00,147 INFO [trainer.py:765] (5/8) Epoch 13, batch 800, train_loss[loss=2.783, ArTop10Accuracy=0.745, over 9973.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.7345, over 11676.01 frames. ], batch size: 12, lr: 9.18e-03 +2024-08-06 05:32:31,522 INFO [trainer.py:765] (5/8) Epoch 13, batch 900, train_loss[loss=2.924, ArTop10Accuracy=0.7292, over 12966.00 frames. ], tot_loss[loss=2.885, ArTop10Accuracy=0.7349, over 11722.13 frames. ], batch size: 27, lr: 9.15e-03 +2024-08-06 05:33:03,043 INFO [trainer.py:765] (5/8) Epoch 13, batch 1000, train_loss[loss=2.894, ArTop10Accuracy=0.7336, over 12922.00 frames. ], tot_loss[loss=2.889, ArTop10Accuracy=0.7343, over 11908.18 frames. ], batch size: 27, lr: 9.13e-03 +2024-08-06 05:33:34,232 INFO [trainer.py:765] (5/8) Epoch 13, batch 1100, train_loss[loss=2.901, ArTop10Accuracy=0.7322, over 13749.00 frames. ], tot_loss[loss=2.898, ArTop10Accuracy=0.7324, over 11996.25 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 05:34:04,519 INFO [trainer.py:765] (5/8) Epoch 13, batch 1200, train_loss[loss=2.983, ArTop10Accuracy=0.7158, over 11719.00 frames. ], tot_loss[loss=2.899, ArTop10Accuracy=0.7324, over 11936.10 frames. ], batch size: 99, lr: 9.07e-03 +2024-08-06 05:34:29,787 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 05:35:39,198 INFO [trainer.py:765] (5/8) Epoch 14, batch 100, train_loss[loss=2.966, ArTop10Accuracy=0.7234, over 14614.00 frames. ], tot_loss[loss=2.866, ArTop10Accuracy=0.7396, over 4788.57 frames. ], batch size: 61, lr: 8.71e-03 +2024-08-06 05:36:23,063 INFO [trainer.py:765] (5/8) Epoch 14, batch 200, train_loss[loss=2.882, ArTop10Accuracy=0.7387, over 13887.00 frames. ], tot_loss[loss=2.86, ArTop10Accuracy=0.7408, over 7776.78 frames. ], batch size: 34, lr: 8.68e-03 +2024-08-06 05:37:09,311 INFO [trainer.py:765] (5/8) Epoch 14, batch 300, train_loss[loss=2.932, ArTop10Accuracy=0.7357, over 14494.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7416, over 9433.89 frames. ], batch size: 44, lr: 8.66e-03 +2024-08-06 05:37:46,030 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.304e+02 1.410e+02 1.531e+02 2.912e+02, threshold=2.820e+02, percent-clipped=0.2 +2024-08-06 05:37:55,139 INFO [trainer.py:765] (5/8) Epoch 14, batch 400, train_loss[loss=2.878, ArTop10Accuracy=0.7394, over 11072.00 frames. ], tot_loss[loss=2.86, ArTop10Accuracy=0.7407, over 10346.32 frames. ], batch size: 15, lr: 8.64e-03 +2024-08-06 05:38:42,025 INFO [trainer.py:765] (5/8) Epoch 14, batch 500, train_loss[loss=2.941, ArTop10Accuracy=0.7272, over 12282.00 frames. ], tot_loss[loss=2.863, ArTop10Accuracy=0.7398, over 10900.23 frames. ], batch size: 22, lr: 8.61e-03 +2024-08-06 05:39:22,374 INFO [trainer.py:765] (5/8) Epoch 14, batch 600, train_loss[loss=2.826, ArTop10Accuracy=0.7509, over 11567.00 frames. ], tot_loss[loss=2.863, ArTop10Accuracy=0.7392, over 11428.54 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 05:40:15,144 INFO [trainer.py:765] (5/8) Epoch 14, batch 700, train_loss[loss=2.749, ArTop10Accuracy=0.768, over 10135.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.7375, over 11572.30 frames. ], batch size: 12, lr: 8.57e-03 +2024-08-06 05:40:49,136 INFO [trainer.py:765] (5/8) Epoch 14, batch 800, train_loss[loss=2.793, ArTop10Accuracy=0.7499, over 10077.00 frames. ], tot_loss[loss=2.874, ArTop10Accuracy=0.7368, over 11706.79 frames. ], batch size: 12, lr: 8.55e-03 +2024-08-06 05:41:20,467 INFO [trainer.py:765] (5/8) Epoch 14, batch 900, train_loss[loss=2.872, ArTop10Accuracy=0.7384, over 13285.00 frames. ], tot_loss[loss=2.862, ArTop10Accuracy=0.739, over 11752.36 frames. ], batch size: 28, lr: 8.52e-03 +2024-08-06 05:41:51,996 INFO [trainer.py:765] (5/8) Epoch 14, batch 1000, train_loss[loss=2.878, ArTop10Accuracy=0.7373, over 13032.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.7385, over 11945.61 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 05:42:23,217 INFO [trainer.py:765] (5/8) Epoch 14, batch 1100, train_loss[loss=2.913, ArTop10Accuracy=0.7271, over 13720.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7362, over 11998.01 frames. ], batch size: 34, lr: 8.48e-03 +2024-08-06 05:42:53,549 INFO [trainer.py:765] (5/8) Epoch 14, batch 1200, train_loss[loss=3.009, ArTop10Accuracy=0.7162, over 12218.00 frames. ], tot_loss[loss=2.881, ArTop10Accuracy=0.7356, over 11940.21 frames. ], batch size: 99, lr: 8.46e-03 +2024-08-06 05:43:18,804 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 05:44:28,571 INFO [trainer.py:765] (5/8) Epoch 15, batch 100, train_loss[loss=2.916, ArTop10Accuracy=0.7277, over 14505.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7427, over 4774.84 frames. ], batch size: 62, lr: 8.14e-03 +2024-08-06 05:44:29,213 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 05:44:38,023 INFO [trainer.py:811] (5/8) Epoch 15, validation: loss=2.913, ArTop10Accuracy=0.7339, over 1829298.00 frames. +2024-08-06 05:44:38,024 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33249MB +2024-08-06 05:44:38,413 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.307e+02 1.417e+02 1.528e+02 2.981e+02, threshold=2.833e+02, percent-clipped=0.1 +2024-08-06 05:45:20,185 INFO [trainer.py:765] (5/8) Epoch 15, batch 200, train_loss[loss=2.899, ArTop10Accuracy=0.7329, over 13933.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7431, over 7781.58 frames. ], batch size: 34, lr: 8.11e-03 +2024-08-06 05:46:04,647 INFO [trainer.py:765] (5/8) Epoch 15, batch 300, train_loss[loss=2.887, ArTop10Accuracy=0.7367, over 14309.00 frames. ], tot_loss[loss=2.844, ArTop10Accuracy=0.7435, over 9416.21 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 05:46:51,902 INFO [trainer.py:765] (5/8) Epoch 15, batch 400, train_loss[loss=2.768, ArTop10Accuracy=0.7495, over 10263.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7428, over 10321.60 frames. ], batch size: 14, lr: 8.07e-03 +2024-08-06 05:47:36,911 INFO [trainer.py:765] (5/8) Epoch 15, batch 500, train_loss[loss=2.699, ArTop10Accuracy=0.77, over 12305.00 frames. ], tot_loss[loss=2.844, ArTop10Accuracy=0.743, over 10893.49 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 05:48:24,723 INFO [trainer.py:765] (5/8) Epoch 15, batch 600, train_loss[loss=2.845, ArTop10Accuracy=0.7478, over 11519.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7424, over 11419.70 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 05:49:11,854 INFO [trainer.py:765] (5/8) Epoch 15, batch 700, train_loss[loss=2.881, ArTop10Accuracy=0.7378, over 10212.00 frames. ], tot_loss[loss=2.853, ArTop10Accuracy=0.7416, over 11566.88 frames. ], batch size: 12, lr: 8.01e-03 +2024-08-06 05:49:45,779 INFO [trainer.py:765] (5/8) Epoch 15, batch 800, train_loss[loss=2.616, ArTop10Accuracy=0.7703, over 9258.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.7395, over 11686.86 frames. ], batch size: 11, lr: 7.99e-03 +2024-08-06 05:50:17,210 INFO [trainer.py:765] (5/8) Epoch 15, batch 900, train_loss[loss=2.815, ArTop10Accuracy=0.7488, over 12825.00 frames. ], tot_loss[loss=2.855, ArTop10Accuracy=0.7409, over 11735.78 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 05:50:48,829 INFO [trainer.py:765] (5/8) Epoch 15, batch 1000, train_loss[loss=2.881, ArTop10Accuracy=0.7417, over 12877.00 frames. ], tot_loss[loss=2.859, ArTop10Accuracy=0.7402, over 11943.33 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 05:51:20,069 INFO [trainer.py:765] (5/8) Epoch 15, batch 1100, train_loss[loss=2.868, ArTop10Accuracy=0.7424, over 13599.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.7392, over 11998.68 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 05:51:23,515 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.337e+02 1.431e+02 1.541e+02 2.784e+02, threshold=2.862e+02, percent-clipped=0.0 +2024-08-06 05:51:53,082 INFO [trainer.py:765] (5/8) Epoch 15, batch 1200, train_loss[loss=3.011, ArTop10Accuracy=0.7153, over 11780.00 frames. ], tot_loss[loss=2.869, ArTop10Accuracy=0.7382, over 11953.49 frames. ], batch size: 99, lr: 7.91e-03 +2024-08-06 05:52:18,030 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 05:53:29,263 INFO [trainer.py:765] (5/8) Epoch 16, batch 100, train_loss[loss=2.965, ArTop10Accuracy=0.7277, over 14761.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.7467, over 4762.90 frames. ], batch size: 61, lr: 7.63e-03 +2024-08-06 05:54:12,878 INFO [trainer.py:765] (5/8) Epoch 16, batch 200, train_loss[loss=2.873, ArTop10Accuracy=0.7397, over 13755.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7465, over 7785.70 frames. ], batch size: 34, lr: 7.61e-03 +2024-08-06 05:54:59,737 INFO [trainer.py:765] (5/8) Epoch 16, batch 300, train_loss[loss=2.916, ArTop10Accuracy=0.7231, over 14126.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7468, over 9405.54 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 05:55:41,931 INFO [trainer.py:765] (5/8) Epoch 16, batch 400, train_loss[loss=2.712, ArTop10Accuracy=0.7616, over 10183.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.7464, over 10323.19 frames. ], batch size: 14, lr: 7.58e-03 +2024-08-06 05:56:27,680 INFO [trainer.py:765] (5/8) Epoch 16, batch 500, train_loss[loss=2.853, ArTop10Accuracy=0.7371, over 12258.00 frames. ], tot_loss[loss=2.833, ArTop10Accuracy=0.7453, over 10879.50 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 05:57:12,440 INFO [trainer.py:765] (5/8) Epoch 16, batch 600, train_loss[loss=2.766, ArTop10Accuracy=0.7673, over 11722.00 frames. ], tot_loss[loss=2.839, ArTop10Accuracy=0.7444, over 11435.82 frames. ], batch size: 18, lr: 7.54e-03 +2024-08-06 05:58:00,041 INFO [trainer.py:765] (5/8) Epoch 16, batch 700, train_loss[loss=2.886, ArTop10Accuracy=0.7349, over 10114.00 frames. ], tot_loss[loss=2.852, ArTop10Accuracy=0.7415, over 11570.45 frames. ], batch size: 12, lr: 7.52e-03 +2024-08-06 05:58:34,024 INFO [trainer.py:765] (5/8) Epoch 16, batch 800, train_loss[loss=2.717, ArTop10Accuracy=0.7578, over 9291.00 frames. ], tot_loss[loss=2.858, ArTop10Accuracy=0.7406, over 11686.05 frames. ], batch size: 11, lr: 7.50e-03 +2024-08-06 05:58:41,569 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 05:58:50,426 INFO [trainer.py:811] (5/8) Epoch 16, validation: loss=2.915, ArTop10Accuracy=0.7338, over 1829298.00 frames. +2024-08-06 05:58:50,427 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33249MB +2024-08-06 05:58:50,730 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.335e+02 1.445e+02 1.570e+02 3.252e+02, threshold=2.890e+02, percent-clipped=0.1 +2024-08-06 05:59:14,321 INFO [trainer.py:765] (5/8) Epoch 16, batch 900, train_loss[loss=2.851, ArTop10Accuracy=0.7429, over 12940.00 frames. ], tot_loss[loss=2.844, ArTop10Accuracy=0.7432, over 11758.97 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 05:59:45,915 INFO [trainer.py:765] (5/8) Epoch 16, batch 1000, train_loss[loss=2.886, ArTop10Accuracy=0.7365, over 12968.00 frames. ], tot_loss[loss=2.844, ArTop10Accuracy=0.7431, over 11937.70 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 06:00:17,092 INFO [trainer.py:765] (5/8) Epoch 16, batch 1100, train_loss[loss=2.966, ArTop10Accuracy=0.7182, over 13702.00 frames. ], tot_loss[loss=2.858, ArTop10Accuracy=0.7403, over 11986.84 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 06:00:47,464 INFO [trainer.py:765] (5/8) Epoch 16, batch 1200, train_loss[loss=3.017, ArTop10Accuracy=0.7132, over 12539.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.7394, over 11949.06 frames. ], batch size: 97, lr: 7.43e-03 +2024-08-06 06:01:13,070 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 06:02:27,261 INFO [trainer.py:765] (5/8) Epoch 17, batch 100, train_loss[loss=2.847, ArTop10Accuracy=0.7444, over 14808.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.7476, over 4788.46 frames. ], batch size: 61, lr: 7.18e-03 +2024-08-06 06:03:11,851 INFO [trainer.py:765] (5/8) Epoch 17, batch 200, train_loss[loss=2.861, ArTop10Accuracy=0.7373, over 13755.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7484, over 7809.53 frames. ], batch size: 34, lr: 7.17e-03 +2024-08-06 06:03:57,502 INFO [trainer.py:765] (5/8) Epoch 17, batch 300, train_loss[loss=2.831, ArTop10Accuracy=0.7449, over 14115.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7484, over 9430.76 frames. ], batch size: 44, lr: 7.15e-03 +2024-08-06 06:04:42,838 INFO [trainer.py:765] (5/8) Epoch 17, batch 400, train_loss[loss=2.813, ArTop10Accuracy=0.7461, over 10334.00 frames. ], tot_loss[loss=2.824, ArTop10Accuracy=0.7475, over 10334.62 frames. ], batch size: 14, lr: 7.13e-03 +2024-08-06 06:05:29,004 INFO [trainer.py:765] (5/8) Epoch 17, batch 500, train_loss[loss=2.851, ArTop10Accuracy=0.739, over 12347.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.7474, over 10907.53 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 06:05:49,551 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.359e+02 1.445e+02 1.551e+02 2.741e+02, threshold=2.891e+02, percent-clipped=0.0 +2024-08-06 06:06:20,723 INFO [trainer.py:765] (5/8) Epoch 17, batch 600, train_loss[loss=2.903, ArTop10Accuracy=0.7349, over 11538.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.7459, over 11417.79 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 06:07:04,694 INFO [trainer.py:765] (5/8) Epoch 17, batch 700, train_loss[loss=2.761, ArTop10Accuracy=0.7582, over 10257.00 frames. ], tot_loss[loss=2.842, ArTop10Accuracy=0.7437, over 11580.44 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 06:07:44,896 INFO [trainer.py:765] (5/8) Epoch 17, batch 800, train_loss[loss=2.715, ArTop10Accuracy=0.7664, over 10337.00 frames. ], tot_loss[loss=2.84, ArTop10Accuracy=0.7439, over 11692.70 frames. ], batch size: 12, lr: 7.07e-03 +2024-08-06 06:08:16,385 INFO [trainer.py:765] (5/8) Epoch 17, batch 900, train_loss[loss=2.8, ArTop10Accuracy=0.7541, over 12753.00 frames. ], tot_loss[loss=2.831, ArTop10Accuracy=0.7455, over 11736.58 frames. ], batch size: 27, lr: 7.05e-03 +2024-08-06 06:08:47,995 INFO [trainer.py:765] (5/8) Epoch 17, batch 1000, train_loss[loss=2.847, ArTop10Accuracy=0.7361, over 12807.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7446, over 11931.11 frames. ], batch size: 27, lr: 7.04e-03 +2024-08-06 06:09:19,134 INFO [trainer.py:765] (5/8) Epoch 17, batch 1100, train_loss[loss=2.782, ArTop10Accuracy=0.759, over 13725.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7431, over 11983.41 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 06:09:49,445 INFO [trainer.py:765] (5/8) Epoch 17, batch 1200, train_loss[loss=2.994, ArTop10Accuracy=0.7166, over 12228.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7427, over 11924.42 frames. ], batch size: 99, lr: 7.01e-03 +2024-08-06 06:10:15,298 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 06:11:23,101 INFO [trainer.py:765] (5/8) Epoch 18, batch 100, train_loss[loss=2.892, ArTop10Accuracy=0.7381, over 14867.00 frames. ], tot_loss[loss=2.823, ArTop10Accuracy=0.7479, over 4783.92 frames. ], batch size: 61, lr: 6.78e-03 +2024-08-06 06:12:16,260 INFO [trainer.py:765] (5/8) Epoch 18, batch 200, train_loss[loss=2.75, ArTop10Accuracy=0.7603, over 13290.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7488, over 7795.58 frames. ], batch size: 33, lr: 6.77e-03 +2024-08-06 06:12:40,317 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 06:12:48,991 INFO [trainer.py:811] (5/8) Epoch 18, validation: loss=2.916, ArTop10Accuracy=0.7343, over 1829298.00 frames. +2024-08-06 06:12:48,992 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33249MB +2024-08-06 06:12:49,335 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.377e+02 1.476e+02 1.588e+02 2.450e+02, threshold=2.952e+02, percent-clipped=0.0 +2024-08-06 06:13:07,116 INFO [trainer.py:765] (5/8) Epoch 18, batch 300, train_loss[loss=2.946, ArTop10Accuracy=0.7246, over 14437.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.7502, over 9417.70 frames. ], batch size: 44, lr: 6.75e-03 +2024-08-06 06:13:54,098 INFO [trainer.py:765] (5/8) Epoch 18, batch 400, train_loss[loss=2.769, ArTop10Accuracy=0.7524, over 10271.00 frames. ], tot_loss[loss=2.811, ArTop10Accuracy=0.7503, over 10343.54 frames. ], batch size: 14, lr: 6.74e-03 +2024-08-06 06:14:38,488 INFO [trainer.py:765] (5/8) Epoch 18, batch 500, train_loss[loss=2.746, ArTop10Accuracy=0.7527, over 12279.00 frames. ], tot_loss[loss=2.806, ArTop10Accuracy=0.7508, over 10909.53 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 06:15:23,628 INFO [trainer.py:765] (5/8) Epoch 18, batch 600, train_loss[loss=2.855, ArTop10Accuracy=0.7384, over 11684.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.7486, over 11420.40 frames. ], batch size: 18, lr: 6.71e-03 +2024-08-06 06:16:17,343 INFO [trainer.py:765] (5/8) Epoch 18, batch 700, train_loss[loss=2.693, ArTop10Accuracy=0.7776, over 10180.00 frames. ], tot_loss[loss=2.823, ArTop10Accuracy=0.7475, over 11547.38 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 06:16:51,428 INFO [trainer.py:765] (5/8) Epoch 18, batch 800, train_loss[loss=2.714, ArTop10Accuracy=0.7765, over 10189.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.7469, over 11680.86 frames. ], batch size: 12, lr: 6.68e-03 +2024-08-06 06:17:22,913 INFO [trainer.py:765] (5/8) Epoch 18, batch 900, train_loss[loss=2.93, ArTop10Accuracy=0.726, over 12879.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7475, over 11738.31 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 06:17:54,528 INFO [trainer.py:765] (5/8) Epoch 18, batch 1000, train_loss[loss=2.694, ArTop10Accuracy=0.766, over 12936.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7462, over 11945.04 frames. ], batch size: 27, lr: 6.65e-03 +2024-08-06 06:18:25,662 INFO [trainer.py:765] (5/8) Epoch 18, batch 1100, train_loss[loss=2.865, ArTop10Accuracy=0.7363, over 13745.00 frames. ], tot_loss[loss=2.839, ArTop10Accuracy=0.7443, over 12002.10 frames. ], batch size: 34, lr: 6.64e-03 +2024-08-06 06:18:55,971 INFO [trainer.py:765] (5/8) Epoch 18, batch 1200, train_loss[loss=2.938, ArTop10Accuracy=0.726, over 12394.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7447, over 11951.43 frames. ], batch size: 98, lr: 6.63e-03 +2024-08-06 06:19:19,163 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.387e+02 1.492e+02 1.607e+02 2.982e+02, threshold=2.983e+02, percent-clipped=0.1 +2024-08-06 06:19:23,779 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 06:20:29,728 INFO [trainer.py:765] (5/8) Epoch 19, batch 100, train_loss[loss=2.899, ArTop10Accuracy=0.7371, over 14803.00 frames. ], tot_loss[loss=2.81, ArTop10Accuracy=0.7505, over 4775.27 frames. ], batch size: 62, lr: 6.43e-03 +2024-08-06 06:21:11,275 INFO [trainer.py:765] (5/8) Epoch 19, batch 200, train_loss[loss=2.666, ArTop10Accuracy=0.775, over 14085.00 frames. ], tot_loss[loss=2.801, ArTop10Accuracy=0.7527, over 7770.99 frames. ], batch size: 35, lr: 6.41e-03 +2024-08-06 06:21:56,078 INFO [trainer.py:765] (5/8) Epoch 19, batch 300, train_loss[loss=2.818, ArTop10Accuracy=0.7519, over 14261.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7537, over 9409.99 frames. ], batch size: 44, lr: 6.40e-03 +2024-08-06 06:22:36,013 INFO [trainer.py:765] (5/8) Epoch 19, batch 400, train_loss[loss=2.827, ArTop10Accuracy=0.7469, over 11058.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7524, over 10316.35 frames. ], batch size: 15, lr: 6.39e-03 +2024-08-06 06:23:18,998 INFO [trainer.py:765] (5/8) Epoch 19, batch 500, train_loss[loss=2.733, ArTop10Accuracy=0.7689, over 12300.00 frames. ], tot_loss[loss=2.801, ArTop10Accuracy=0.752, over 10877.78 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 06:24:03,686 INFO [trainer.py:765] (5/8) Epoch 19, batch 600, train_loss[loss=2.779, ArTop10Accuracy=0.7626, over 11511.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.7508, over 11421.42 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 06:24:46,187 INFO [trainer.py:765] (5/8) Epoch 19, batch 700, train_loss[loss=2.66, ArTop10Accuracy=0.781, over 10008.00 frames. ], tot_loss[loss=2.813, ArTop10Accuracy=0.7494, over 11564.74 frames. ], batch size: 12, lr: 6.35e-03 +2024-08-06 06:25:22,355 INFO [trainer.py:765] (5/8) Epoch 19, batch 800, train_loss[loss=2.887, ArTop10Accuracy=0.7291, over 10049.00 frames. ], tot_loss[loss=2.814, ArTop10Accuracy=0.7491, over 11678.24 frames. ], batch size: 12, lr: 6.33e-03 +2024-08-06 06:25:53,625 INFO [trainer.py:765] (5/8) Epoch 19, batch 900, train_loss[loss=2.631, ArTop10Accuracy=0.7877, over 12965.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7507, over 11713.42 frames. ], batch size: 27, lr: 6.32e-03 +2024-08-06 06:26:21,774 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 06:26:30,765 INFO [trainer.py:811] (5/8) Epoch 19, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 06:26:30,766 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 33249MB +2024-08-06 06:26:31,053 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.416e+02 1.525e+02 1.662e+02 2.849e+02, threshold=3.050e+02, percent-clipped=0.0 +2024-08-06 06:26:34,031 INFO [trainer.py:765] (5/8) Epoch 19, batch 1000, train_loss[loss=2.847, ArTop10Accuracy=0.7399, over 13042.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.7484, over 11905.17 frames. ], batch size: 27, lr: 6.31e-03 +2024-08-06 06:27:05,190 INFO [trainer.py:765] (5/8) Epoch 19, batch 1100, train_loss[loss=2.85, ArTop10Accuracy=0.7451, over 13800.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.7463, over 11968.02 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 06:27:35,454 INFO [trainer.py:765] (5/8) Epoch 19, batch 1200, train_loss[loss=2.985, ArTop10Accuracy=0.7162, over 11824.00 frames. ], tot_loss[loss=2.826, ArTop10Accuracy=0.7468, over 11905.60 frames. ], batch size: 97, lr: 6.28e-03 +2024-08-06 06:28:00,610 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 06:29:08,984 INFO [trainer.py:765] (5/8) Epoch 20, batch 100, train_loss[loss=2.817, ArTop10Accuracy=0.7474, over 14655.00 frames. ], tot_loss[loss=2.797, ArTop10Accuracy=0.7536, over 4791.92 frames. ], batch size: 61, lr: 6.10e-03 +2024-08-06 06:29:50,318 INFO [trainer.py:765] (5/8) Epoch 20, batch 200, train_loss[loss=2.79, ArTop10Accuracy=0.7503, over 13650.00 frames. ], tot_loss[loss=2.793, ArTop10Accuracy=0.7543, over 7793.65 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 06:30:37,105 INFO [trainer.py:765] (5/8) Epoch 20, batch 300, train_loss[loss=2.833, ArTop10Accuracy=0.7478, over 14093.00 frames. ], tot_loss[loss=2.79, ArTop10Accuracy=0.7547, over 9417.07 frames. ], batch size: 43, lr: 6.08e-03 +2024-08-06 06:31:16,354 INFO [trainer.py:765] (5/8) Epoch 20, batch 400, train_loss[loss=2.782, ArTop10Accuracy=0.7602, over 10860.00 frames. ], tot_loss[loss=2.791, ArTop10Accuracy=0.7543, over 10322.85 frames. ], batch size: 15, lr: 6.07e-03 +2024-08-06 06:32:03,759 INFO [trainer.py:765] (5/8) Epoch 20, batch 500, train_loss[loss=2.847, ArTop10Accuracy=0.7415, over 12177.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7533, over 10899.29 frames. ], batch size: 22, lr: 6.05e-03 +2024-08-06 06:32:43,357 INFO [trainer.py:765] (5/8) Epoch 20, batch 600, train_loss[loss=2.709, ArTop10Accuracy=0.7684, over 11645.00 frames. ], tot_loss[loss=2.798, ArTop10Accuracy=0.7528, over 11421.14 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 06:33:36,752 INFO [trainer.py:765] (5/8) Epoch 20, batch 700, train_loss[loss=2.792, ArTop10Accuracy=0.7455, over 9336.00 frames. ], tot_loss[loss=2.802, ArTop10Accuracy=0.7517, over 11568.97 frames. ], batch size: 11, lr: 6.03e-03 +2024-08-06 06:33:43,830 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.417e+02 1.526e+02 1.639e+02 3.791e+02, threshold=3.052e+02, percent-clipped=0.1 +2024-08-06 06:34:13,304 INFO [trainer.py:765] (5/8) Epoch 20, batch 800, train_loss[loss=2.812, ArTop10Accuracy=0.7496, over 10384.00 frames. ], tot_loss[loss=2.806, ArTop10Accuracy=0.7508, over 11675.68 frames. ], batch size: 12, lr: 6.02e-03 +2024-08-06 06:34:44,580 INFO [trainer.py:765] (5/8) Epoch 20, batch 900, train_loss[loss=2.887, ArTop10Accuracy=0.7393, over 12969.00 frames. ], tot_loss[loss=2.803, ArTop10Accuracy=0.7513, over 11732.17 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 06:35:16,139 INFO [trainer.py:765] (5/8) Epoch 20, batch 1000, train_loss[loss=2.768, ArTop10Accuracy=0.7536, over 13008.00 frames. ], tot_loss[loss=2.809, ArTop10Accuracy=0.7502, over 11937.31 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 06:35:47,214 INFO [trainer.py:765] (5/8) Epoch 20, batch 1100, train_loss[loss=2.785, ArTop10Accuracy=0.7584, over 13661.00 frames. ], tot_loss[loss=2.818, ArTop10Accuracy=0.7484, over 12013.73 frames. ], batch size: 34, lr: 5.99e-03 +2024-08-06 06:36:17,439 INFO [trainer.py:765] (5/8) Epoch 20, batch 1200, train_loss[loss=2.982, ArTop10Accuracy=0.7139, over 11908.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7477, over 11961.51 frames. ], batch size: 97, lr: 5.97e-03 +2024-08-06 06:36:42,607 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 06:36:42,610 INFO [trainer.py:1069] (5/8) Done! diff --git a/libritts/log/log-train-2024-08-06-03-39-40-6 b/libritts/log/log-train-2024-08-06-03-39-40-6 new file mode 100644 index 0000000000000000000000000000000000000000..9ca6d157049a4acc6a63b4b1155b60621dd580fd --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-39-40-6 @@ -0,0 +1,336 @@ +2024-08-06 03:39:40,362 INFO [trainer.py:870] (6/8) Training started +2024-08-06 03:39:40,362 INFO [trainer.py:889] (6/8) Device: cuda:6 +2024-08-06 03:39:40,363 INFO [trainer.py:890] (6/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:39:40,363 INFO [trainer.py:892] (6/8) About to create model +2024-08-06 03:39:41,125 INFO [trainer.py:899] (6/8) Number of model parameters: 367386628 +2024-08-06 03:39:41,878 INFO [trainer.py:914] (6/8) Using DDP +2024-08-06 03:39:43,994 INFO [datamodule.py:427] (6/8) About to get train cuts +2024-08-06 03:39:43,996 INFO [datamodule.py:434] (6/8) About to get dev cuts +2024-08-06 03:39:43,997 INFO [datamodule.py:292] (6/8) Disable SpecAugment +2024-08-06 03:39:43,997 INFO [datamodule.py:294] (6/8) About to create train dataset +2024-08-06 03:39:43,998 INFO [datamodule.py:323] (6/8) Using DynamicBucketingSampler +2024-08-06 03:39:44,605 INFO [datamodule.py:344] (6/8) About to create train dataloader +2024-08-06 03:39:44,605 INFO [datamodule.py:367] (6/8) About to create dev dataset +2024-08-06 03:39:44,931 INFO [datamodule.py:388] (6/8) About to create dev dataloader +2024-08-06 03:40:39,570 INFO [trainer.py:765] (6/8) Epoch 1, batch 100, train_loss[loss=4.161, ArTop10Accuracy=0.508, over 14269.00 frames. ], tot_loss[loss=4.792, ArTop10Accuracy=0.3944, over 4791.41 frames. ], batch size: 61, lr: 2.25e-02 +2024-08-06 03:41:16,921 INFO [trainer.py:765] (6/8) Epoch 1, batch 200, train_loss[loss=4.011, ArTop10Accuracy=0.5187, over 13608.00 frames. ], tot_loss[loss=4.307, ArTop10Accuracy=0.475, over 7789.12 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 03:41:57,950 INFO [trainer.py:765] (6/8) Epoch 1, batch 300, train_loss[loss=3.827, ArTop10Accuracy=0.547, over 14420.00 frames. ], tot_loss[loss=4.091, ArTop10Accuracy=0.51, over 9420.38 frames. ], batch size: 44, lr: 3.00e-02 +2024-08-06 03:42:33,080 INFO [trainer.py:765] (6/8) Epoch 1, batch 400, train_loss[loss=3.637, ArTop10Accuracy=0.59, over 11047.00 frames. ], tot_loss[loss=3.944, ArTop10Accuracy=0.5343, over 10334.93 frames. ], batch size: 15, lr: 3.00e-02 +2024-08-06 03:43:11,270 INFO [trainer.py:765] (6/8) Epoch 1, batch 500, train_loss[loss=3.544, ArTop10Accuracy=0.6094, over 12284.00 frames. ], tot_loss[loss=3.832, ArTop10Accuracy=0.5528, over 10891.31 frames. ], batch size: 22, lr: 2.99e-02 +2024-08-06 03:43:46,592 INFO [trainer.py:765] (6/8) Epoch 1, batch 600, train_loss[loss=3.657, ArTop10Accuracy=0.5837, over 11494.00 frames. ], tot_loss[loss=3.75, ArTop10Accuracy=0.5672, over 11435.24 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 03:44:27,898 INFO [trainer.py:765] (6/8) Epoch 1, batch 700, train_loss[loss=3.501, ArTop10Accuracy=0.6087, over 10358.00 frames. ], tot_loss[loss=3.688, ArTop10Accuracy=0.578, over 11589.54 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 03:45:01,513 INFO [trainer.py:765] (6/8) Epoch 1, batch 800, train_loss[loss=3.477, ArTop10Accuracy=0.6212, over 10064.00 frames. ], tot_loss[loss=3.639, ArTop10Accuracy=0.587, over 11706.16 frames. ], batch size: 12, lr: 2.98e-02 +2024-08-06 03:45:32,556 INFO [trainer.py:765] (6/8) Epoch 1, batch 900, train_loss[loss=3.63, ArTop10Accuracy=0.5772, over 12914.00 frames. ], tot_loss[loss=3.586, ArTop10Accuracy=0.5967, over 11741.21 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 03:46:03,648 INFO [trainer.py:765] (6/8) Epoch 1, batch 1000, train_loss[loss=3.374, ArTop10Accuracy=0.647, over 13027.00 frames. ], tot_loss[loss=3.556, ArTop10Accuracy=0.6025, over 11949.50 frames. ], batch size: 27, lr: 2.97e-02 +2024-08-06 03:46:07,988 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 8.169e+01 1.565e+02 2.239e+02 3.485e+02 9.105e+03, threshold=4.478e+02, percent-clipped=0.0 +2024-08-06 03:46:38,611 INFO [trainer.py:765] (6/8) Epoch 1, batch 1100, train_loss[loss=3.547, ArTop10Accuracy=0.6059, over 13974.00 frames. ], tot_loss[loss=3.526, ArTop10Accuracy=0.6082, over 12021.20 frames. ], batch size: 34, lr: 2.96e-02 +2024-08-06 03:47:08,744 INFO [trainer.py:765] (6/8) Epoch 1, batch 1200, train_loss[loss=3.556, ArTop10Accuracy=0.6074, over 12323.00 frames. ], tot_loss[loss=3.503, ArTop10Accuracy=0.6128, over 11936.64 frames. ], batch size: 98, lr: 2.96e-02 +2024-08-06 03:47:33,827 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 03:48:38,677 INFO [trainer.py:765] (6/8) Epoch 2, batch 100, train_loss[loss=3.494, ArTop10Accuracy=0.6103, over 14786.00 frames. ], tot_loss[loss=3.449, ArTop10Accuracy=0.6226, over 4771.63 frames. ], batch size: 61, lr: 2.90e-02 +2024-08-06 03:49:14,597 INFO [trainer.py:765] (6/8) Epoch 2, batch 200, train_loss[loss=3.466, ArTop10Accuracy=0.625, over 13520.00 frames. ], tot_loss[loss=3.431, ArTop10Accuracy=0.6263, over 7771.61 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 03:49:56,520 INFO [trainer.py:765] (6/8) Epoch 2, batch 300, train_loss[loss=3.43, ArTop10Accuracy=0.6274, over 14468.00 frames. ], tot_loss[loss=3.419, ArTop10Accuracy=0.6283, over 9423.26 frames. ], batch size: 44, lr: 2.89e-02 +2024-08-06 03:50:32,000 INFO [trainer.py:765] (6/8) Epoch 2, batch 400, train_loss[loss=3.37, ArTop10Accuracy=0.6389, over 11171.00 frames. ], tot_loss[loss=3.409, ArTop10Accuracy=0.6304, over 10351.34 frames. ], batch size: 15, lr: 2.88e-02 +2024-08-06 03:51:17,110 INFO [trainer.py:765] (6/8) Epoch 2, batch 500, train_loss[loss=3.36, ArTop10Accuracy=0.6437, over 12325.00 frames. ], tot_loss[loss=3.398, ArTop10Accuracy=0.6325, over 10919.90 frames. ], batch size: 22, lr: 2.87e-02 +2024-08-06 03:51:53,204 INFO [trainer.py:765] (6/8) Epoch 2, batch 600, train_loss[loss=3.234, ArTop10Accuracy=0.6485, over 11584.00 frames. ], tot_loss[loss=3.397, ArTop10Accuracy=0.6327, over 11448.35 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 03:52:38,994 INFO [trainer.py:765] (6/8) Epoch 2, batch 700, train_loss[loss=3.368, ArTop10Accuracy=0.6372, over 9851.00 frames. ], tot_loss[loss=3.395, ArTop10Accuracy=0.6332, over 11571.32 frames. ], batch size: 12, lr: 2.85e-02 +2024-08-06 03:52:47,092 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 03:52:56,023 INFO [trainer.py:811] (6/8) Epoch 2, validation: loss=3.327, ArTop10Accuracy=0.6492, over 1829298.00 frames. +2024-08-06 03:52:56,024 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 32951MB +2024-08-06 03:52:56,541 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 8.181e+01 1.431e+02 1.849e+02 2.730e+02 2.344e+03, threshold=3.697e+02, percent-clipped=7.2 +2024-08-06 03:53:21,881 INFO [trainer.py:765] (6/8) Epoch 2, batch 800, train_loss[loss=3.245, ArTop10Accuracy=0.6494, over 9901.00 frames. ], tot_loss[loss=3.393, ArTop10Accuracy=0.6334, over 11684.77 frames. ], batch size: 12, lr: 2.84e-02 +2024-08-06 03:53:53,299 INFO [trainer.py:765] (6/8) Epoch 2, batch 900, train_loss[loss=3.319, ArTop10Accuracy=0.6515, over 13396.00 frames. ], tot_loss[loss=3.379, ArTop10Accuracy=0.6359, over 11755.65 frames. ], batch size: 28, lr: 2.83e-02 +2024-08-06 03:54:24,808 INFO [trainer.py:765] (6/8) Epoch 2, batch 1000, train_loss[loss=3.361, ArTop10Accuracy=0.6335, over 12882.00 frames. ], tot_loss[loss=3.371, ArTop10Accuracy=0.6377, over 11964.45 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 03:54:56,006 INFO [trainer.py:765] (6/8) Epoch 2, batch 1100, train_loss[loss=3.303, ArTop10Accuracy=0.6547, over 13844.00 frames. ], tot_loss[loss=3.368, ArTop10Accuracy=0.6383, over 11999.69 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 03:55:26,228 INFO [trainer.py:765] (6/8) Epoch 2, batch 1200, train_loss[loss=3.375, ArTop10Accuracy=0.6409, over 11179.00 frames. ], tot_loss[loss=3.354, ArTop10Accuracy=0.6413, over 11934.15 frames. ], batch size: 98, lr: 2.80e-02 +2024-08-06 03:55:51,263 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 03:57:04,103 INFO [trainer.py:765] (6/8) Epoch 3, batch 100, train_loss[loss=3.332, ArTop10Accuracy=0.6457, over 14606.00 frames. ], tot_loss[loss=3.316, ArTop10Accuracy=0.649, over 4778.34 frames. ], batch size: 61, lr: 2.67e-02 +2024-08-06 03:57:50,981 INFO [trainer.py:765] (6/8) Epoch 3, batch 200, train_loss[loss=3.288, ArTop10Accuracy=0.6519, over 13832.00 frames. ], tot_loss[loss=3.296, ArTop10Accuracy=0.6531, over 7778.02 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 03:58:26,075 INFO [trainer.py:765] (6/8) Epoch 3, batch 300, train_loss[loss=3.3, ArTop10Accuracy=0.6499, over 14213.00 frames. ], tot_loss[loss=3.278, ArTop10Accuracy=0.6565, over 9397.70 frames. ], batch size: 44, lr: 2.64e-02 +2024-08-06 03:59:11,255 INFO [trainer.py:765] (6/8) Epoch 3, batch 400, train_loss[loss=3.156, ArTop10Accuracy=0.6775, over 10349.00 frames. ], tot_loss[loss=3.262, ArTop10Accuracy=0.6597, over 10310.87 frames. ], batch size: 14, lr: 2.63e-02 +2024-08-06 03:59:29,675 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 8.720e+01 1.461e+02 1.775e+02 2.344e+02 9.150e+02, threshold=3.550e+02, percent-clipped=5.2 +2024-08-06 03:59:49,304 INFO [trainer.py:765] (6/8) Epoch 3, batch 500, train_loss[loss=3.1, ArTop10Accuracy=0.6894, over 12313.00 frames. ], tot_loss[loss=3.254, ArTop10Accuracy=0.6611, over 10907.59 frames. ], batch size: 22, lr: 2.62e-02 +2024-08-06 04:00:35,096 INFO [trainer.py:765] (6/8) Epoch 3, batch 600, train_loss[loss=3.216, ArTop10Accuracy=0.6666, over 11816.00 frames. ], tot_loss[loss=3.241, ArTop10Accuracy=0.6635, over 11433.99 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 04:01:22,060 INFO [trainer.py:765] (6/8) Epoch 3, batch 700, train_loss[loss=3.193, ArTop10Accuracy=0.6722, over 9177.00 frames. ], tot_loss[loss=3.239, ArTop10Accuracy=0.664, over 11546.32 frames. ], batch size: 11, lr: 2.60e-02 +2024-08-06 04:01:56,270 INFO [trainer.py:765] (6/8) Epoch 3, batch 800, train_loss[loss=2.847, ArTop10Accuracy=0.7352, over 10189.00 frames. ], tot_loss[loss=3.229, ArTop10Accuracy=0.6661, over 11674.31 frames. ], batch size: 12, lr: 2.59e-02 +2024-08-06 04:02:27,741 INFO [trainer.py:765] (6/8) Epoch 3, batch 900, train_loss[loss=3.323, ArTop10Accuracy=0.6508, over 12935.00 frames. ], tot_loss[loss=3.215, ArTop10Accuracy=0.6692, over 11737.70 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 04:02:59,284 INFO [trainer.py:765] (6/8) Epoch 3, batch 1000, train_loss[loss=3.191, ArTop10Accuracy=0.6816, over 12970.00 frames. ], tot_loss[loss=3.203, ArTop10Accuracy=0.6716, over 11926.31 frames. ], batch size: 27, lr: 2.56e-02 +2024-08-06 04:03:30,943 INFO [trainer.py:765] (6/8) Epoch 3, batch 1100, train_loss[loss=3.12, ArTop10Accuracy=0.6882, over 13801.00 frames. ], tot_loss[loss=3.193, ArTop10Accuracy=0.6731, over 11986.47 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 04:04:01,313 INFO [trainer.py:765] (6/8) Epoch 3, batch 1200, train_loss[loss=3.199, ArTop10Accuracy=0.6727, over 12437.00 frames. ], tot_loss[loss=3.186, ArTop10Accuracy=0.6746, over 11927.32 frames. ], batch size: 98, lr: 2.54e-02 +2024-08-06 04:04:26,834 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 04:05:43,368 INFO [trainer.py:765] (6/8) Epoch 4, batch 100, train_loss[loss=3.243, ArTop10Accuracy=0.6617, over 14624.00 frames. ], tot_loss[loss=3.131, ArTop10Accuracy=0.686, over 4773.64 frames. ], batch size: 62, lr: 2.38e-02 +2024-08-06 04:06:07,076 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 04:06:16,404 INFO [trainer.py:811] (6/8) Epoch 4, validation: loss=3.063, ArTop10Accuracy=0.7031, over 1829298.00 frames. +2024-08-06 04:06:16,404 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 32951MB +2024-08-06 04:06:16,746 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.493e+02 1.709e+02 2.068e+02 7.969e+02, threshold=3.418e+02, percent-clipped=2.9 +2024-08-06 04:06:31,825 INFO [trainer.py:765] (6/8) Epoch 4, batch 200, train_loss[loss=3.136, ArTop10Accuracy=0.6865, over 13964.00 frames. ], tot_loss[loss=3.126, ArTop10Accuracy=0.6873, over 7776.38 frames. ], batch size: 34, lr: 2.37e-02 +2024-08-06 04:07:18,544 INFO [trainer.py:765] (6/8) Epoch 4, batch 300, train_loss[loss=3.212, ArTop10Accuracy=0.6659, over 14204.00 frames. ], tot_loss[loss=3.116, ArTop10Accuracy=0.6893, over 9400.68 frames. ], batch size: 44, lr: 2.36e-02 +2024-08-06 04:08:01,910 INFO [trainer.py:765] (6/8) Epoch 4, batch 400, train_loss[loss=2.92, ArTop10Accuracy=0.7249, over 10399.00 frames. ], tot_loss[loss=3.11, ArTop10Accuracy=0.6899, over 10305.42 frames. ], batch size: 14, lr: 2.34e-02 +2024-08-06 04:08:45,344 INFO [trainer.py:765] (6/8) Epoch 4, batch 500, train_loss[loss=3.072, ArTop10Accuracy=0.7006, over 12154.00 frames. ], tot_loss[loss=3.106, ArTop10Accuracy=0.6907, over 10887.77 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 04:09:37,071 INFO [trainer.py:765] (6/8) Epoch 4, batch 600, train_loss[loss=3.138, ArTop10Accuracy=0.691, over 11645.00 frames. ], tot_loss[loss=3.106, ArTop10Accuracy=0.6899, over 11423.12 frames. ], batch size: 18, lr: 2.32e-02 +2024-08-06 04:10:13,501 INFO [trainer.py:765] (6/8) Epoch 4, batch 700, train_loss[loss=2.83, ArTop10Accuracy=0.7375, over 10410.00 frames. ], tot_loss[loss=3.107, ArTop10Accuracy=0.6898, over 11561.29 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 04:10:51,959 INFO [trainer.py:765] (6/8) Epoch 4, batch 800, train_loss[loss=3.099, ArTop10Accuracy=0.6947, over 10081.00 frames. ], tot_loss[loss=3.114, ArTop10Accuracy=0.6887, over 11674.78 frames. ], batch size: 12, lr: 2.30e-02 +2024-08-06 04:11:23,330 INFO [trainer.py:765] (6/8) Epoch 4, batch 900, train_loss[loss=3.152, ArTop10Accuracy=0.6821, over 13001.00 frames. ], tot_loss[loss=3.108, ArTop10Accuracy=0.6896, over 11730.08 frames. ], batch size: 27, lr: 2.29e-02 +2024-08-06 04:11:54,826 INFO [trainer.py:765] (6/8) Epoch 4, batch 1000, train_loss[loss=3.056, ArTop10Accuracy=0.6984, over 13017.00 frames. ], tot_loss[loss=3.101, ArTop10Accuracy=0.691, over 11944.87 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 04:12:25,960 INFO [trainer.py:765] (6/8) Epoch 4, batch 1100, train_loss[loss=3.071, ArTop10Accuracy=0.6959, over 13743.00 frames. ], tot_loss[loss=3.105, ArTop10Accuracy=0.6905, over 11989.95 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 04:12:48,544 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.106e+02 1.440e+02 1.608e+02 1.893e+02 7.925e+02, threshold=3.216e+02, percent-clipped=2.0 +2024-08-06 04:12:58,827 INFO [trainer.py:765] (6/8) Epoch 4, batch 1200, train_loss[loss=3.174, ArTop10Accuracy=0.677, over 12469.00 frames. ], tot_loss[loss=3.1, ArTop10Accuracy=0.6913, over 11928.23 frames. ], batch size: 99, lr: 2.25e-02 +2024-08-06 04:13:24,132 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 04:14:38,685 INFO [trainer.py:765] (6/8) Epoch 5, batch 100, train_loss[loss=3.083, ArTop10Accuracy=0.6935, over 14692.00 frames. ], tot_loss[loss=3.064, ArTop10Accuracy=0.7, over 4776.75 frames. ], batch size: 61, lr: 2.10e-02 +2024-08-06 04:15:26,826 INFO [trainer.py:765] (6/8) Epoch 5, batch 200, train_loss[loss=3.157, ArTop10Accuracy=0.6749, over 13699.00 frames. ], tot_loss[loss=3.056, ArTop10Accuracy=0.7011, over 7792.73 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 04:16:08,010 INFO [trainer.py:765] (6/8) Epoch 5, batch 300, train_loss[loss=3.049, ArTop10Accuracy=0.7064, over 14372.00 frames. ], tot_loss[loss=3.052, ArTop10Accuracy=0.7017, over 9429.46 frames. ], batch size: 44, lr: 2.08e-02 +2024-08-06 04:16:53,133 INFO [trainer.py:765] (6/8) Epoch 5, batch 400, train_loss[loss=3.02, ArTop10Accuracy=0.7063, over 10964.00 frames. ], tot_loss[loss=3.056, ArTop10Accuracy=0.7011, over 10351.09 frames. ], batch size: 15, lr: 2.07e-02 +2024-08-06 04:17:36,637 INFO [trainer.py:765] (6/8) Epoch 5, batch 500, train_loss[loss=3.004, ArTop10Accuracy=0.7123, over 12358.00 frames. ], tot_loss[loss=3.048, ArTop10Accuracy=0.7022, over 10915.32 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 04:18:22,113 INFO [trainer.py:765] (6/8) Epoch 5, batch 600, train_loss[loss=3.066, ArTop10Accuracy=0.6961, over 11418.00 frames. ], tot_loss[loss=3.051, ArTop10Accuracy=0.7014, over 11431.83 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 04:19:17,032 INFO [trainer.py:765] (6/8) Epoch 5, batch 700, train_loss[loss=2.864, ArTop10Accuracy=0.7372, over 10174.00 frames. ], tot_loss[loss=3.056, ArTop10Accuracy=0.7005, over 11582.06 frames. ], batch size: 12, lr: 2.04e-02 +2024-08-06 04:19:51,066 INFO [trainer.py:765] (6/8) Epoch 5, batch 800, train_loss[loss=2.994, ArTop10Accuracy=0.7222, over 10009.00 frames. ], tot_loss[loss=3.063, ArTop10Accuracy=0.699, over 11696.35 frames. ], batch size: 12, lr: 2.03e-02 +2024-08-06 04:20:18,214 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 04:20:27,476 INFO [trainer.py:811] (6/8) Epoch 5, validation: loss=2.998, ArTop10Accuracy=0.7157, over 1829298.00 frames. +2024-08-06 04:20:27,476 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 32951MB +2024-08-06 04:20:27,781 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.057e+02 1.385e+02 1.542e+02 1.759e+02 7.741e+02, threshold=3.083e+02, percent-clipped=0.7 +2024-08-06 04:20:31,767 INFO [trainer.py:765] (6/8) Epoch 5, batch 900, train_loss[loss=3.004, ArTop10Accuracy=0.7097, over 13064.00 frames. ], tot_loss[loss=3.056, ArTop10Accuracy=0.7005, over 11736.27 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 04:21:03,306 INFO [trainer.py:765] (6/8) Epoch 5, batch 1000, train_loss[loss=3.052, ArTop10Accuracy=0.6998, over 13018.00 frames. ], tot_loss[loss=3.055, ArTop10Accuracy=0.7005, over 11943.12 frames. ], batch size: 27, lr: 2.01e-02 +2024-08-06 04:21:34,451 INFO [trainer.py:765] (6/8) Epoch 5, batch 1100, train_loss[loss=3.152, ArTop10Accuracy=0.6854, over 13724.00 frames. ], tot_loss[loss=3.066, ArTop10Accuracy=0.6986, over 12000.47 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 04:22:04,752 INFO [trainer.py:765] (6/8) Epoch 5, batch 1200, train_loss[loss=3.19, ArTop10Accuracy=0.6735, over 13139.00 frames. ], tot_loss[loss=3.055, ArTop10Accuracy=0.7003, over 11935.00 frames. ], batch size: 99, lr: 1.99e-02 +2024-08-06 04:22:30,545 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 04:23:46,281 INFO [trainer.py:765] (6/8) Epoch 6, batch 100, train_loss[loss=3.005, ArTop10Accuracy=0.7128, over 14625.00 frames. ], tot_loss[loss=3.028, ArTop10Accuracy=0.7073, over 4771.22 frames. ], batch size: 62, lr: 1.85e-02 +2024-08-06 04:24:35,254 INFO [trainer.py:765] (6/8) Epoch 6, batch 200, train_loss[loss=2.976, ArTop10Accuracy=0.7161, over 13812.00 frames. ], tot_loss[loss=3.014, ArTop10Accuracy=0.7097, over 7788.77 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 04:25:16,675 INFO [trainer.py:765] (6/8) Epoch 6, batch 300, train_loss[loss=3.076, ArTop10Accuracy=0.6953, over 13977.00 frames. ], tot_loss[loss=3.011, ArTop10Accuracy=0.7102, over 9418.50 frames. ], batch size: 44, lr: 1.83e-02 +2024-08-06 04:26:08,923 INFO [trainer.py:765] (6/8) Epoch 6, batch 400, train_loss[loss=3.117, ArTop10Accuracy=0.6891, over 10962.00 frames. ], tot_loss[loss=3.016, ArTop10Accuracy=0.709, over 10323.34 frames. ], batch size: 15, lr: 1.83e-02 +2024-08-06 04:26:51,484 INFO [trainer.py:765] (6/8) Epoch 6, batch 500, train_loss[loss=3.071, ArTop10Accuracy=0.6958, over 12488.00 frames. ], tot_loss[loss=3.009, ArTop10Accuracy=0.7098, over 10900.75 frames. ], batch size: 22, lr: 1.82e-02 +2024-08-06 04:27:39,296 INFO [trainer.py:765] (6/8) Epoch 6, batch 600, train_loss[loss=3.05, ArTop10Accuracy=0.7108, over 11604.00 frames. ], tot_loss[loss=3.017, ArTop10Accuracy=0.7082, over 11432.62 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 04:27:46,369 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.343e+02 1.474e+02 1.660e+02 8.574e+02, threshold=2.947e+02, percent-clipped=0.6 +2024-08-06 04:28:33,239 INFO [trainer.py:765] (6/8) Epoch 6, batch 700, train_loss[loss=2.878, ArTop10Accuracy=0.7286, over 10137.00 frames. ], tot_loss[loss=3.026, ArTop10Accuracy=0.7064, over 11573.57 frames. ], batch size: 12, lr: 1.80e-02 +2024-08-06 04:29:11,215 INFO [trainer.py:765] (6/8) Epoch 6, batch 800, train_loss[loss=3.014, ArTop10Accuracy=0.7099, over 10201.00 frames. ], tot_loss[loss=3.027, ArTop10Accuracy=0.706, over 11701.91 frames. ], batch size: 12, lr: 1.79e-02 +2024-08-06 04:29:42,751 INFO [trainer.py:765] (6/8) Epoch 6, batch 900, train_loss[loss=2.978, ArTop10Accuracy=0.7177, over 12907.00 frames. ], tot_loss[loss=3.023, ArTop10Accuracy=0.7071, over 11750.61 frames. ], batch size: 27, lr: 1.78e-02 +2024-08-06 04:30:14,304 INFO [trainer.py:765] (6/8) Epoch 6, batch 1000, train_loss[loss=3.08, ArTop10Accuracy=0.7022, over 13005.00 frames. ], tot_loss[loss=3.023, ArTop10Accuracy=0.7072, over 11953.69 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 04:30:45,382 INFO [trainer.py:765] (6/8) Epoch 6, batch 1100, train_loss[loss=3.068, ArTop10Accuracy=0.702, over 13731.00 frames. ], tot_loss[loss=3.027, ArTop10Accuracy=0.7062, over 11999.00 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 04:31:15,673 INFO [trainer.py:765] (6/8) Epoch 6, batch 1200, train_loss[loss=3.152, ArTop10Accuracy=0.6838, over 11840.00 frames. ], tot_loss[loss=3.027, ArTop10Accuracy=0.7065, over 11949.76 frames. ], batch size: 99, lr: 1.76e-02 +2024-08-06 04:31:40,556 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 04:32:52,404 INFO [trainer.py:765] (6/8) Epoch 7, batch 100, train_loss[loss=2.983, ArTop10Accuracy=0.7144, over 14621.00 frames. ], tot_loss[loss=2.988, ArTop10Accuracy=0.7142, over 4768.91 frames. ], batch size: 61, lr: 1.64e-02 +2024-08-06 04:33:38,223 INFO [trainer.py:765] (6/8) Epoch 7, batch 200, train_loss[loss=3.015, ArTop10Accuracy=0.7045, over 13644.00 frames. ], tot_loss[loss=2.987, ArTop10Accuracy=0.7146, over 7767.84 frames. ], batch size: 34, lr: 1.64e-02 +2024-08-06 04:34:22,608 INFO [trainer.py:765] (6/8) Epoch 7, batch 300, train_loss[loss=3.139, ArTop10Accuracy=0.6837, over 14366.00 frames. ], tot_loss[loss=2.987, ArTop10Accuracy=0.7154, over 9402.21 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 04:34:36,847 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 04:34:45,808 INFO [trainer.py:811] (6/8) Epoch 7, validation: loss=2.963, ArTop10Accuracy=0.7233, over 1829298.00 frames. +2024-08-06 04:34:45,809 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 32951MB +2024-08-06 04:34:46,124 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.306e+02 1.435e+02 1.599e+02 8.689e+02, threshold=2.871e+02, percent-clipped=0.9 +2024-08-06 04:35:17,146 INFO [trainer.py:765] (6/8) Epoch 7, batch 400, train_loss[loss=3.094, ArTop10Accuracy=0.6922, over 10366.00 frames. ], tot_loss[loss=2.984, ArTop10Accuracy=0.7158, over 10306.87 frames. ], batch size: 14, lr: 1.62e-02 +2024-08-06 04:36:01,710 INFO [trainer.py:765] (6/8) Epoch 7, batch 500, train_loss[loss=3.055, ArTop10Accuracy=0.6986, over 12227.00 frames. ], tot_loss[loss=2.982, ArTop10Accuracy=0.7158, over 10896.64 frames. ], batch size: 22, lr: 1.61e-02 +2024-08-06 04:36:48,811 INFO [trainer.py:765] (6/8) Epoch 7, batch 600, train_loss[loss=2.879, ArTop10Accuracy=0.7363, over 11473.00 frames. ], tot_loss[loss=2.988, ArTop10Accuracy=0.7142, over 11419.30 frames. ], batch size: 18, lr: 1.61e-02 +2024-08-06 04:37:34,799 INFO [trainer.py:765] (6/8) Epoch 7, batch 700, train_loss[loss=2.809, ArTop10Accuracy=0.7458, over 10089.00 frames. ], tot_loss[loss=2.991, ArTop10Accuracy=0.7133, over 11561.95 frames. ], batch size: 12, lr: 1.60e-02 +2024-08-06 04:38:13,613 INFO [trainer.py:765] (6/8) Epoch 7, batch 800, train_loss[loss=3.022, ArTop10Accuracy=0.7025, over 10124.00 frames. ], tot_loss[loss=3, ArTop10Accuracy=0.7116, over 11674.07 frames. ], batch size: 12, lr: 1.59e-02 +2024-08-06 04:38:45,110 INFO [trainer.py:765] (6/8) Epoch 7, batch 900, train_loss[loss=3.033, ArTop10Accuracy=0.7055, over 12956.00 frames. ], tot_loss[loss=2.992, ArTop10Accuracy=0.7133, over 11722.13 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 04:39:16,574 INFO [trainer.py:765] (6/8) Epoch 7, batch 1000, train_loss[loss=2.804, ArTop10Accuracy=0.7444, over 12987.00 frames. ], tot_loss[loss=2.992, ArTop10Accuracy=0.7134, over 11943.17 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 04:39:47,570 INFO [trainer.py:765] (6/8) Epoch 7, batch 1100, train_loss[loss=2.989, ArTop10Accuracy=0.7157, over 14209.00 frames. ], tot_loss[loss=2.998, ArTop10Accuracy=0.7125, over 12000.89 frames. ], batch size: 35, lr: 1.57e-02 +2024-08-06 04:40:17,989 INFO [trainer.py:765] (6/8) Epoch 7, batch 1200, train_loss[loss=3.246, ArTop10Accuracy=0.6652, over 12797.00 frames. ], tot_loss[loss=2.997, ArTop10Accuracy=0.7126, over 11949.83 frames. ], batch size: 97, lr: 1.57e-02 +2024-08-06 04:40:43,223 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 04:41:37,491 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 9.816e+01 1.295e+02 1.411e+02 1.574e+02 4.953e+02, threshold=2.821e+02, percent-clipped=1.1 +2024-08-06 04:41:58,370 INFO [trainer.py:765] (6/8) Epoch 8, batch 100, train_loss[loss=3.072, ArTop10Accuracy=0.6982, over 14822.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7192, over 4791.35 frames. ], batch size: 61, lr: 1.47e-02 +2024-08-06 04:42:44,985 INFO [trainer.py:765] (6/8) Epoch 8, batch 200, train_loss[loss=2.948, ArTop10Accuracy=0.7245, over 13851.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.72, over 7800.24 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 04:43:28,044 INFO [trainer.py:765] (6/8) Epoch 8, batch 300, train_loss[loss=3.094, ArTop10Accuracy=0.6992, over 14146.00 frames. ], tot_loss[loss=2.961, ArTop10Accuracy=0.7202, over 9410.20 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 04:44:14,461 INFO [trainer.py:765] (6/8) Epoch 8, batch 400, train_loss[loss=2.872, ArTop10Accuracy=0.7443, over 11005.00 frames. ], tot_loss[loss=2.96, ArTop10Accuracy=0.7206, over 10324.30 frames. ], batch size: 15, lr: 1.45e-02 +2024-08-06 04:45:00,691 INFO [trainer.py:765] (6/8) Epoch 8, batch 500, train_loss[loss=2.913, ArTop10Accuracy=0.7315, over 12314.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7197, over 10888.82 frames. ], batch size: 22, lr: 1.45e-02 +2024-08-06 04:45:45,392 INFO [trainer.py:765] (6/8) Epoch 8, batch 600, train_loss[loss=2.862, ArTop10Accuracy=0.73, over 11694.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7187, over 11422.69 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 04:46:34,037 INFO [trainer.py:765] (6/8) Epoch 8, batch 700, train_loss[loss=2.828, ArTop10Accuracy=0.7413, over 10124.00 frames. ], tot_loss[loss=2.973, ArTop10Accuracy=0.7172, over 11567.71 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 04:47:10,206 INFO [trainer.py:765] (6/8) Epoch 8, batch 800, train_loss[loss=3, ArTop10Accuracy=0.7161, over 10100.00 frames. ], tot_loss[loss=2.974, ArTop10Accuracy=0.717, over 11672.37 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 04:47:41,605 INFO [trainer.py:765] (6/8) Epoch 8, batch 900, train_loss[loss=2.922, ArTop10Accuracy=0.731, over 12991.00 frames. ], tot_loss[loss=2.964, ArTop10Accuracy=0.7192, over 11714.53 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:13,031 INFO [trainer.py:765] (6/8) Epoch 8, batch 1000, train_loss[loss=3.023, ArTop10Accuracy=0.7092, over 12925.00 frames. ], tot_loss[loss=2.968, ArTop10Accuracy=0.7181, over 11908.81 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:28,826 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 04:48:37,663 INFO [trainer.py:811] (6/8) Epoch 8, validation: loss=2.946, ArTop10Accuracy=0.7266, over 1829298.00 frames. +2024-08-06 04:48:37,664 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 33346MB +2024-08-06 04:48:37,951 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.289e+02 1.393e+02 1.532e+02 3.557e+02, threshold=2.786e+02, percent-clipped=0.2 +2024-08-06 04:48:52,931 INFO [trainer.py:765] (6/8) Epoch 8, batch 1100, train_loss[loss=3.009, ArTop10Accuracy=0.7063, over 13836.00 frames. ], tot_loss[loss=2.974, ArTop10Accuracy=0.7168, over 11965.86 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 04:49:23,201 INFO [trainer.py:765] (6/8) Epoch 8, batch 1200, train_loss[loss=3.172, ArTop10Accuracy=0.6788, over 12546.00 frames. ], tot_loss[loss=2.976, ArTop10Accuracy=0.7165, over 11922.21 frames. ], batch size: 98, lr: 1.40e-02 +2024-08-06 04:49:48,386 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 04:51:01,548 INFO [trainer.py:765] (6/8) Epoch 9, batch 100, train_loss[loss=2.999, ArTop10Accuracy=0.7161, over 14559.00 frames. ], tot_loss[loss=2.955, ArTop10Accuracy=0.7214, over 4782.48 frames. ], batch size: 61, lr: 1.32e-02 +2024-08-06 04:51:45,414 INFO [trainer.py:765] (6/8) Epoch 9, batch 200, train_loss[loss=2.95, ArTop10Accuracy=0.7205, over 13862.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.7252, over 7782.04 frames. ], batch size: 34, lr: 1.32e-02 +2024-08-06 04:52:29,082 INFO [trainer.py:765] (6/8) Epoch 9, batch 300, train_loss[loss=2.96, ArTop10Accuracy=0.7273, over 14179.00 frames. ], tot_loss[loss=2.933, ArTop10Accuracy=0.7256, over 9427.27 frames. ], batch size: 44, lr: 1.31e-02 +2024-08-06 04:53:16,431 INFO [trainer.py:765] (6/8) Epoch 9, batch 400, train_loss[loss=3.048, ArTop10Accuracy=0.7115, over 10180.00 frames. ], tot_loss[loss=2.941, ArTop10Accuracy=0.7241, over 10333.95 frames. ], batch size: 14, lr: 1.31e-02 +2024-08-06 04:53:58,143 INFO [trainer.py:765] (6/8) Epoch 9, batch 500, train_loss[loss=2.918, ArTop10Accuracy=0.7287, over 12246.00 frames. ], tot_loss[loss=2.935, ArTop10Accuracy=0.7247, over 10898.02 frames. ], batch size: 22, lr: 1.30e-02 +2024-08-06 04:54:51,077 INFO [trainer.py:765] (6/8) Epoch 9, batch 600, train_loss[loss=2.791, ArTop10Accuracy=0.7564, over 11719.00 frames. ], tot_loss[loss=2.94, ArTop10Accuracy=0.7237, over 11442.63 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 04:55:34,399 INFO [trainer.py:765] (6/8) Epoch 9, batch 700, train_loss[loss=2.828, ArTop10Accuracy=0.7401, over 10363.00 frames. ], tot_loss[loss=2.947, ArTop10Accuracy=0.7225, over 11584.90 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 04:56:04,574 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.257e+02 1.367e+02 1.507e+02 8.820e+02, threshold=2.735e+02, percent-clipped=0.5 +2024-08-06 04:56:13,598 INFO [trainer.py:765] (6/8) Epoch 9, batch 800, train_loss[loss=2.846, ArTop10Accuracy=0.7383, over 10147.00 frames. ], tot_loss[loss=2.954, ArTop10Accuracy=0.7212, over 11682.73 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 04:56:44,976 INFO [trainer.py:765] (6/8) Epoch 9, batch 900, train_loss[loss=3.01, ArTop10Accuracy=0.7141, over 12949.00 frames. ], tot_loss[loss=2.946, ArTop10Accuracy=0.7226, over 11748.16 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:16,491 INFO [trainer.py:765] (6/8) Epoch 9, batch 1000, train_loss[loss=2.774, ArTop10Accuracy=0.7466, over 12978.00 frames. ], tot_loss[loss=2.946, ArTop10Accuracy=0.7221, over 11953.92 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:47,657 INFO [trainer.py:765] (6/8) Epoch 9, batch 1100, train_loss[loss=2.944, ArTop10Accuracy=0.7184, over 13608.00 frames. ], tot_loss[loss=2.957, ArTop10Accuracy=0.7202, over 11997.00 frames. ], batch size: 34, lr: 1.27e-02 +2024-08-06 04:58:18,094 INFO [trainer.py:765] (6/8) Epoch 9, batch 1200, train_loss[loss=3.158, ArTop10Accuracy=0.6793, over 11926.00 frames. ], tot_loss[loss=2.956, ArTop10Accuracy=0.7201, over 11942.22 frames. ], batch size: 100, lr: 1.27e-02 +2024-08-06 04:58:43,862 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 04:59:52,749 INFO [trainer.py:765] (6/8) Epoch 10, batch 100, train_loss[loss=3.026, ArTop10Accuracy=0.706, over 14447.00 frames. ], tot_loss[loss=2.917, ArTop10Accuracy=0.729, over 4805.52 frames. ], batch size: 61, lr: 1.20e-02 +2024-08-06 05:00:43,730 INFO [trainer.py:765] (6/8) Epoch 10, batch 200, train_loss[loss=2.916, ArTop10Accuracy=0.7329, over 13714.00 frames. ], tot_loss[loss=2.917, ArTop10Accuracy=0.7291, over 7803.72 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 05:01:20,591 INFO [trainer.py:765] (6/8) Epoch 10, batch 300, train_loss[loss=2.938, ArTop10Accuracy=0.7244, over 14233.00 frames. ], tot_loss[loss=2.914, ArTop10Accuracy=0.7294, over 9413.02 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 05:02:10,048 INFO [trainer.py:765] (6/8) Epoch 10, batch 400, train_loss[loss=3.002, ArTop10Accuracy=0.7058, over 11150.00 frames. ], tot_loss[loss=2.924, ArTop10Accuracy=0.7272, over 10330.50 frames. ], batch size: 15, lr: 1.19e-02 +2024-08-06 05:02:46,488 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 05:02:55,378 INFO [trainer.py:811] (6/8) Epoch 10, validation: loss=2.927, ArTop10Accuracy=0.7304, over 1829298.00 frames. +2024-08-06 05:02:55,378 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 33346MB +2024-08-06 05:02:55,728 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.269e+02 1.367e+02 1.518e+02 4.405e+02, threshold=2.733e+02, percent-clipped=0.4 +2024-08-06 05:02:58,361 INFO [trainer.py:765] (6/8) Epoch 10, batch 500, train_loss[loss=2.882, ArTop10Accuracy=0.7305, over 12094.00 frames. ], tot_loss[loss=2.916, ArTop10Accuracy=0.7283, over 10901.91 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 05:03:48,229 INFO [trainer.py:765] (6/8) Epoch 10, batch 600, train_loss[loss=2.951, ArTop10Accuracy=0.722, over 11503.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7273, over 11411.77 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 05:04:36,715 INFO [trainer.py:765] (6/8) Epoch 10, batch 700, train_loss[loss=2.805, ArTop10Accuracy=0.7427, over 10260.00 frames. ], tot_loss[loss=2.93, ArTop10Accuracy=0.7258, over 11582.96 frames. ], batch size: 12, lr: 1.18e-02 +2024-08-06 05:05:10,725 INFO [trainer.py:765] (6/8) Epoch 10, batch 800, train_loss[loss=2.809, ArTop10Accuracy=0.742, over 9179.00 frames. ], tot_loss[loss=2.933, ArTop10Accuracy=0.7252, over 11694.54 frames. ], batch size: 11, lr: 1.17e-02 +2024-08-06 05:05:42,245 INFO [trainer.py:765] (6/8) Epoch 10, batch 900, train_loss[loss=2.903, ArTop10Accuracy=0.7268, over 13022.00 frames. ], tot_loss[loss=2.928, ArTop10Accuracy=0.7263, over 11735.62 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 05:06:13,844 INFO [trainer.py:765] (6/8) Epoch 10, batch 1000, train_loss[loss=2.911, ArTop10Accuracy=0.734, over 13074.00 frames. ], tot_loss[loss=2.93, ArTop10Accuracy=0.7261, over 11932.28 frames. ], batch size: 27, lr: 1.16e-02 +2024-08-06 05:06:45,055 INFO [trainer.py:765] (6/8) Epoch 10, batch 1100, train_loss[loss=2.96, ArTop10Accuracy=0.7241, over 13853.00 frames. ], tot_loss[loss=2.939, ArTop10Accuracy=0.7241, over 11994.78 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 05:07:15,483 INFO [trainer.py:765] (6/8) Epoch 10, batch 1200, train_loss[loss=3.077, ArTop10Accuracy=0.6975, over 12334.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.7241, over 11941.41 frames. ], batch size: 98, lr: 1.16e-02 +2024-08-06 05:07:40,359 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 05:08:52,966 INFO [trainer.py:765] (6/8) Epoch 11, batch 100, train_loss[loss=2.96, ArTop10Accuracy=0.7138, over 14417.00 frames. ], tot_loss[loss=2.899, ArTop10Accuracy=0.7328, over 4797.63 frames. ], batch size: 61, lr: 1.10e-02 +2024-08-06 05:09:41,277 INFO [trainer.py:765] (6/8) Epoch 11, batch 200, train_loss[loss=2.906, ArTop10Accuracy=0.7315, over 13696.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.733, over 7787.67 frames. ], batch size: 34, lr: 1.10e-02 +2024-08-06 05:09:51,176 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.278e+02 1.371e+02 1.502e+02 3.785e+02, threshold=2.743e+02, percent-clipped=0.3 +2024-08-06 05:10:24,720 INFO [trainer.py:765] (6/8) Epoch 11, batch 300, train_loss[loss=2.943, ArTop10Accuracy=0.721, over 14337.00 frames. ], tot_loss[loss=2.899, ArTop10Accuracy=0.7327, over 9421.71 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 05:11:11,783 INFO [trainer.py:765] (6/8) Epoch 11, batch 400, train_loss[loss=2.64, ArTop10Accuracy=0.7663, over 10380.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.733, over 10359.43 frames. ], batch size: 14, lr: 1.09e-02 +2024-08-06 05:11:52,692 INFO [trainer.py:765] (6/8) Epoch 11, batch 500, train_loss[loss=2.843, ArTop10Accuracy=0.7353, over 12382.00 frames. ], tot_loss[loss=2.892, ArTop10Accuracy=0.7334, over 10929.24 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 05:12:40,287 INFO [trainer.py:765] (6/8) Epoch 11, batch 600, train_loss[loss=2.738, ArTop10Accuracy=0.759, over 11689.00 frames. ], tot_loss[loss=2.901, ArTop10Accuracy=0.7316, over 11440.25 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 05:13:25,708 INFO [trainer.py:765] (6/8) Epoch 11, batch 700, train_loss[loss=2.641, ArTop10Accuracy=0.7817, over 10194.00 frames. ], tot_loss[loss=2.91, ArTop10Accuracy=0.7296, over 11573.71 frames. ], batch size: 12, lr: 1.08e-02 +2024-08-06 05:14:04,206 INFO [trainer.py:765] (6/8) Epoch 11, batch 800, train_loss[loss=2.725, ArTop10Accuracy=0.7633, over 10132.00 frames. ], tot_loss[loss=2.913, ArTop10Accuracy=0.7293, over 11685.32 frames. ], batch size: 12, lr: 1.07e-02 +2024-08-06 05:14:35,667 INFO [trainer.py:765] (6/8) Epoch 11, batch 900, train_loss[loss=2.873, ArTop10Accuracy=0.7368, over 12868.00 frames. ], tot_loss[loss=2.91, ArTop10Accuracy=0.7297, over 11740.18 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 05:15:07,263 INFO [trainer.py:765] (6/8) Epoch 11, batch 1000, train_loss[loss=2.893, ArTop10Accuracy=0.7318, over 12972.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7275, over 11939.75 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 05:15:38,259 INFO [trainer.py:765] (6/8) Epoch 11, batch 1100, train_loss[loss=2.943, ArTop10Accuracy=0.7183, over 13533.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.7268, over 11976.86 frames. ], batch size: 34, lr: 1.06e-02 +2024-08-06 05:16:08,497 INFO [trainer.py:765] (6/8) Epoch 11, batch 1200, train_loss[loss=3.038, ArTop10Accuracy=0.7033, over 12384.00 frames. ], tot_loss[loss=2.922, ArTop10Accuracy=0.7273, over 11910.45 frames. ], batch size: 97, lr: 1.06e-02 +2024-08-06 05:16:12,697 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 05:16:21,623 INFO [trainer.py:811] (6/8) Epoch 11, validation: loss=2.923, ArTop10Accuracy=0.7318, over 1829298.00 frames. +2024-08-06 05:16:21,623 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 33346MB +2024-08-06 05:16:21,949 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.268e+02 1.368e+02 1.481e+02 4.790e+02, threshold=2.736e+02, percent-clipped=0.6 +2024-08-06 05:16:42,621 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 05:18:03,005 INFO [trainer.py:765] (6/8) Epoch 12, batch 100, train_loss[loss=2.922, ArTop10Accuracy=0.7236, over 14539.00 frames. ], tot_loss[loss=2.905, ArTop10Accuracy=0.7324, over 4770.25 frames. ], batch size: 61, lr: 1.01e-02 +2024-08-06 05:18:46,005 INFO [trainer.py:765] (6/8) Epoch 12, batch 200, train_loss[loss=2.796, ArTop10Accuracy=0.7531, over 13926.00 frames. ], tot_loss[loss=2.888, ArTop10Accuracy=0.7354, over 7807.67 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 05:19:31,946 INFO [trainer.py:765] (6/8) Epoch 12, batch 300, train_loss[loss=2.825, ArTop10Accuracy=0.7489, over 14196.00 frames. ], tot_loss[loss=2.89, ArTop10Accuracy=0.7351, over 9430.87 frames. ], batch size: 44, lr: 1.01e-02 +2024-08-06 05:20:12,431 INFO [trainer.py:765] (6/8) Epoch 12, batch 400, train_loss[loss=2.808, ArTop10Accuracy=0.7492, over 10331.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.7357, over 10340.51 frames. ], batch size: 14, lr: 1.00e-02 +2024-08-06 05:21:00,640 INFO [trainer.py:765] (6/8) Epoch 12, batch 500, train_loss[loss=2.843, ArTop10Accuracy=0.7418, over 12318.00 frames. ], tot_loss[loss=2.885, ArTop10Accuracy=0.7355, over 10904.95 frames. ], batch size: 22, lr: 9.99e-03 +2024-08-06 05:21:43,916 INFO [trainer.py:765] (6/8) Epoch 12, batch 600, train_loss[loss=2.847, ArTop10Accuracy=0.7485, over 11604.00 frames. ], tot_loss[loss=2.893, ArTop10Accuracy=0.7336, over 11426.17 frames. ], batch size: 18, lr: 9.96e-03 +2024-08-06 05:22:32,206 INFO [trainer.py:765] (6/8) Epoch 12, batch 700, train_loss[loss=2.882, ArTop10Accuracy=0.7386, over 10149.00 frames. ], tot_loss[loss=2.903, ArTop10Accuracy=0.7316, over 11589.05 frames. ], batch size: 12, lr: 9.93e-03 +2024-08-06 05:23:08,912 INFO [trainer.py:765] (6/8) Epoch 12, batch 800, train_loss[loss=2.967, ArTop10Accuracy=0.7172, over 10262.00 frames. ], tot_loss[loss=2.906, ArTop10Accuracy=0.7311, over 11709.56 frames. ], batch size: 12, lr: 9.90e-03 +2024-08-06 05:23:40,460 INFO [trainer.py:765] (6/8) Epoch 12, batch 900, train_loss[loss=2.83, ArTop10Accuracy=0.7505, over 12871.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7333, over 11744.06 frames. ], batch size: 27, lr: 9.87e-03 +2024-08-06 05:23:54,576 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.273e+02 1.376e+02 1.503e+02 4.050e+02, threshold=2.752e+02, percent-clipped=0.4 +2024-08-06 05:24:14,346 INFO [trainer.py:765] (6/8) Epoch 12, batch 1000, train_loss[loss=2.954, ArTop10Accuracy=0.724, over 13001.00 frames. ], tot_loss[loss=2.904, ArTop10Accuracy=0.7311, over 11932.47 frames. ], batch size: 27, lr: 9.84e-03 +2024-08-06 05:24:45,502 INFO [trainer.py:765] (6/8) Epoch 12, batch 1100, train_loss[loss=2.908, ArTop10Accuracy=0.728, over 13990.00 frames. ], tot_loss[loss=2.911, ArTop10Accuracy=0.7299, over 11971.08 frames. ], batch size: 34, lr: 9.81e-03 +2024-08-06 05:25:15,882 INFO [trainer.py:765] (6/8) Epoch 12, batch 1200, train_loss[loss=3.038, ArTop10Accuracy=0.7017, over 11930.00 frames. ], tot_loss[loss=2.913, ArTop10Accuracy=0.7297, over 11919.19 frames. ], batch size: 98, lr: 9.78e-03 +2024-08-06 05:25:41,445 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 05:26:46,785 INFO [trainer.py:765] (6/8) Epoch 13, batch 100, train_loss[loss=2.927, ArTop10Accuracy=0.7285, over 15169.00 frames. ], tot_loss[loss=2.876, ArTop10Accuracy=0.7371, over 4794.44 frames. ], batch size: 62, lr: 9.36e-03 +2024-08-06 05:27:32,551 INFO [trainer.py:765] (6/8) Epoch 13, batch 200, train_loss[loss=2.792, ArTop10Accuracy=0.75, over 13763.00 frames. ], tot_loss[loss=2.869, ArTop10Accuracy=0.7384, over 7792.14 frames. ], batch size: 34, lr: 9.34e-03 +2024-08-06 05:28:16,035 INFO [trainer.py:765] (6/8) Epoch 13, batch 300, train_loss[loss=2.91, ArTop10Accuracy=0.736, over 14214.00 frames. ], tot_loss[loss=2.869, ArTop10Accuracy=0.7383, over 9415.73 frames. ], batch size: 44, lr: 9.31e-03 +2024-08-06 05:29:00,148 INFO [trainer.py:765] (6/8) Epoch 13, batch 400, train_loss[loss=2.859, ArTop10Accuracy=0.7425, over 11014.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7376, over 10334.58 frames. ], batch size: 15, lr: 9.28e-03 +2024-08-06 05:29:43,966 INFO [trainer.py:765] (6/8) Epoch 13, batch 500, train_loss[loss=2.878, ArTop10Accuracy=0.7365, over 12313.00 frames. ], tot_loss[loss=2.869, ArTop10Accuracy=0.7383, over 10898.20 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 05:30:24,246 INFO [trainer.py:765] (6/8) Epoch 13, batch 600, train_loss[loss=2.776, ArTop10Accuracy=0.7517, over 11532.00 frames. ], tot_loss[loss=2.879, ArTop10Accuracy=0.7361, over 11441.59 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 05:30:58,108 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 05:31:07,053 INFO [trainer.py:811] (6/8) Epoch 13, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 05:31:07,054 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 33346MB +2024-08-06 05:31:07,351 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.049e+02 1.283e+02 1.389e+02 1.496e+02 2.729e+02, threshold=2.779e+02, percent-clipped=0.0 +2024-08-06 05:31:24,042 INFO [trainer.py:765] (6/8) Epoch 13, batch 700, train_loss[loss=2.868, ArTop10Accuracy=0.7485, over 9967.00 frames. ], tot_loss[loss=2.888, ArTop10Accuracy=0.7344, over 11571.49 frames. ], batch size: 12, lr: 9.20e-03 +2024-08-06 05:32:00,146 INFO [trainer.py:765] (6/8) Epoch 13, batch 800, train_loss[loss=2.855, ArTop10Accuracy=0.744, over 9478.00 frames. ], tot_loss[loss=2.894, ArTop10Accuracy=0.7333, over 11678.58 frames. ], batch size: 11, lr: 9.18e-03 +2024-08-06 05:32:31,520 INFO [trainer.py:765] (6/8) Epoch 13, batch 900, train_loss[loss=2.874, ArTop10Accuracy=0.7391, over 12900.00 frames. ], tot_loss[loss=2.883, ArTop10Accuracy=0.7351, over 11725.62 frames. ], batch size: 27, lr: 9.15e-03 +2024-08-06 05:33:03,042 INFO [trainer.py:765] (6/8) Epoch 13, batch 1000, train_loss[loss=2.849, ArTop10Accuracy=0.7394, over 12888.00 frames. ], tot_loss[loss=2.889, ArTop10Accuracy=0.7343, over 11931.40 frames. ], batch size: 27, lr: 9.13e-03 +2024-08-06 05:33:34,231 INFO [trainer.py:765] (6/8) Epoch 13, batch 1100, train_loss[loss=2.872, ArTop10Accuracy=0.7424, over 13923.00 frames. ], tot_loss[loss=2.896, ArTop10Accuracy=0.7327, over 11998.91 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 05:34:04,518 INFO [trainer.py:765] (6/8) Epoch 13, batch 1200, train_loss[loss=3.036, ArTop10Accuracy=0.7032, over 11967.00 frames. ], tot_loss[loss=2.897, ArTop10Accuracy=0.7325, over 11962.56 frames. ], batch size: 98, lr: 9.07e-03 +2024-08-06 05:34:29,208 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 05:35:39,196 INFO [trainer.py:765] (6/8) Epoch 14, batch 100, train_loss[loss=2.934, ArTop10Accuracy=0.7265, over 14590.00 frames. ], tot_loss[loss=2.878, ArTop10Accuracy=0.738, over 4777.92 frames. ], batch size: 61, lr: 8.71e-03 +2024-08-06 05:36:23,062 INFO [trainer.py:765] (6/8) Epoch 14, batch 200, train_loss[loss=2.883, ArTop10Accuracy=0.7339, over 13846.00 frames. ], tot_loss[loss=2.869, ArTop10Accuracy=0.7393, over 7784.24 frames. ], batch size: 34, lr: 8.68e-03 +2024-08-06 05:37:09,308 INFO [trainer.py:765] (6/8) Epoch 14, batch 300, train_loss[loss=2.876, ArTop10Accuracy=0.7371, over 14325.00 frames. ], tot_loss[loss=2.859, ArTop10Accuracy=0.7404, over 9417.50 frames. ], batch size: 44, lr: 8.66e-03 +2024-08-06 05:37:46,029 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.304e+02 1.410e+02 1.531e+02 2.912e+02, threshold=2.820e+02, percent-clipped=0.2 +2024-08-06 05:37:55,138 INFO [trainer.py:765] (6/8) Epoch 14, batch 400, train_loss[loss=2.882, ArTop10Accuracy=0.7348, over 11107.00 frames. ], tot_loss[loss=2.855, ArTop10Accuracy=0.741, over 10327.78 frames. ], batch size: 15, lr: 8.64e-03 +2024-08-06 05:38:42,024 INFO [trainer.py:765] (6/8) Epoch 14, batch 500, train_loss[loss=2.801, ArTop10Accuracy=0.7449, over 12382.00 frames. ], tot_loss[loss=2.856, ArTop10Accuracy=0.7409, over 10911.66 frames. ], batch size: 22, lr: 8.61e-03 +2024-08-06 05:39:22,373 INFO [trainer.py:765] (6/8) Epoch 14, batch 600, train_loss[loss=3.01, ArTop10Accuracy=0.7053, over 11436.00 frames. ], tot_loss[loss=2.863, ArTop10Accuracy=0.7392, over 11428.98 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 05:40:15,142 INFO [trainer.py:765] (6/8) Epoch 14, batch 700, train_loss[loss=2.688, ArTop10Accuracy=0.7687, over 9956.00 frames. ], tot_loss[loss=2.871, ArTop10Accuracy=0.7376, over 11559.58 frames. ], batch size: 12, lr: 8.57e-03 +2024-08-06 05:40:49,134 INFO [trainer.py:765] (6/8) Epoch 14, batch 800, train_loss[loss=2.785, ArTop10Accuracy=0.7492, over 10000.00 frames. ], tot_loss[loss=2.868, ArTop10Accuracy=0.7382, over 11668.08 frames. ], batch size: 12, lr: 8.55e-03 +2024-08-06 05:41:20,465 INFO [trainer.py:765] (6/8) Epoch 14, batch 900, train_loss[loss=2.895, ArTop10Accuracy=0.7343, over 12757.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.739, over 11719.99 frames. ], batch size: 27, lr: 8.52e-03 +2024-08-06 05:41:51,994 INFO [trainer.py:765] (6/8) Epoch 14, batch 1000, train_loss[loss=2.835, ArTop10Accuracy=0.7452, over 12949.00 frames. ], tot_loss[loss=2.869, ArTop10Accuracy=0.7381, over 11932.48 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 05:42:23,215 INFO [trainer.py:765] (6/8) Epoch 14, batch 1100, train_loss[loss=2.878, ArTop10Accuracy=0.7432, over 13891.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7365, over 11977.46 frames. ], batch size: 34, lr: 8.48e-03 +2024-08-06 05:42:53,548 INFO [trainer.py:765] (6/8) Epoch 14, batch 1200, train_loss[loss=3.095, ArTop10Accuracy=0.6949, over 12816.00 frames. ], tot_loss[loss=2.876, ArTop10Accuracy=0.7363, over 11916.67 frames. ], batch size: 98, lr: 8.46e-03 +2024-08-06 05:43:18,123 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 05:44:28,572 INFO [trainer.py:765] (6/8) Epoch 15, batch 100, train_loss[loss=2.963, ArTop10Accuracy=0.7203, over 14411.00 frames. ], tot_loss[loss=2.853, ArTop10Accuracy=0.7422, over 4802.26 frames. ], batch size: 61, lr: 8.14e-03 +2024-08-06 05:44:29,213 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 05:44:38,023 INFO [trainer.py:811] (6/8) Epoch 15, validation: loss=2.913, ArTop10Accuracy=0.7339, over 1829298.00 frames. +2024-08-06 05:44:38,024 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 33346MB +2024-08-06 05:44:38,413 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.307e+02 1.417e+02 1.528e+02 2.981e+02, threshold=2.833e+02, percent-clipped=0.1 +2024-08-06 05:45:20,184 INFO [trainer.py:765] (6/8) Epoch 15, batch 200, train_loss[loss=2.844, ArTop10Accuracy=0.7528, over 13453.00 frames. ], tot_loss[loss=2.842, ArTop10Accuracy=0.7443, over 7797.16 frames. ], batch size: 33, lr: 8.11e-03 +2024-08-06 05:46:04,647 INFO [trainer.py:765] (6/8) Epoch 15, batch 300, train_loss[loss=2.874, ArTop10Accuracy=0.7451, over 14226.00 frames. ], tot_loss[loss=2.842, ArTop10Accuracy=0.7444, over 9425.70 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 05:46:51,902 INFO [trainer.py:765] (6/8) Epoch 15, batch 400, train_loss[loss=2.835, ArTop10Accuracy=0.7471, over 10398.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7431, over 10338.77 frames. ], batch size: 14, lr: 8.07e-03 +2024-08-06 05:47:36,911 INFO [trainer.py:765] (6/8) Epoch 15, batch 500, train_loss[loss=2.798, ArTop10Accuracy=0.7539, over 12040.00 frames. ], tot_loss[loss=2.849, ArTop10Accuracy=0.7425, over 10901.73 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 05:48:24,723 INFO [trainer.py:765] (6/8) Epoch 15, batch 600, train_loss[loss=3.032, ArTop10Accuracy=0.708, over 11585.00 frames. ], tot_loss[loss=2.852, ArTop10Accuracy=0.7415, over 11431.99 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 05:49:11,854 INFO [trainer.py:765] (6/8) Epoch 15, batch 700, train_loss[loss=2.907, ArTop10Accuracy=0.7235, over 9913.00 frames. ], tot_loss[loss=2.853, ArTop10Accuracy=0.7409, over 11557.07 frames. ], batch size: 12, lr: 8.01e-03 +2024-08-06 05:49:45,778 INFO [trainer.py:765] (6/8) Epoch 15, batch 800, train_loss[loss=2.845, ArTop10Accuracy=0.7403, over 9945.00 frames. ], tot_loss[loss=2.861, ArTop10Accuracy=0.7399, over 11685.60 frames. ], batch size: 12, lr: 7.99e-03 +2024-08-06 05:50:17,210 INFO [trainer.py:765] (6/8) Epoch 15, batch 900, train_loss[loss=2.869, ArTop10Accuracy=0.7355, over 12966.00 frames. ], tot_loss[loss=2.855, ArTop10Accuracy=0.741, over 11732.22 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 05:50:48,829 INFO [trainer.py:765] (6/8) Epoch 15, batch 1000, train_loss[loss=2.83, ArTop10Accuracy=0.7478, over 12923.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.7404, over 11937.36 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 05:51:20,068 INFO [trainer.py:765] (6/8) Epoch 15, batch 1100, train_loss[loss=2.843, ArTop10Accuracy=0.7462, over 13762.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.7391, over 11970.94 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 05:51:23,515 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.337e+02 1.431e+02 1.541e+02 2.784e+02, threshold=2.862e+02, percent-clipped=0.0 +2024-08-06 05:51:53,081 INFO [trainer.py:765] (6/8) Epoch 15, batch 1200, train_loss[loss=2.994, ArTop10Accuracy=0.7184, over 11526.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.738, over 11928.70 frames. ], batch size: 97, lr: 7.91e-03 +2024-08-06 05:52:18,023 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 05:53:29,263 INFO [trainer.py:765] (6/8) Epoch 16, batch 100, train_loss[loss=2.992, ArTop10Accuracy=0.7189, over 14688.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7458, over 4783.81 frames. ], batch size: 62, lr: 7.63e-03 +2024-08-06 05:54:12,878 INFO [trainer.py:765] (6/8) Epoch 16, batch 200, train_loss[loss=2.93, ArTop10Accuracy=0.7236, over 13434.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7459, over 7790.87 frames. ], batch size: 34, lr: 7.61e-03 +2024-08-06 05:54:59,737 INFO [trainer.py:765] (6/8) Epoch 16, batch 300, train_loss[loss=2.845, ArTop10Accuracy=0.7404, over 13948.00 frames. ], tot_loss[loss=2.833, ArTop10Accuracy=0.7463, over 9424.41 frames. ], batch size: 43, lr: 7.59e-03 +2024-08-06 05:55:41,931 INFO [trainer.py:765] (6/8) Epoch 16, batch 400, train_loss[loss=2.767, ArTop10Accuracy=0.7507, over 11000.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7464, over 10343.48 frames. ], batch size: 15, lr: 7.58e-03 +2024-08-06 05:56:27,680 INFO [trainer.py:765] (6/8) Epoch 16, batch 500, train_loss[loss=2.861, ArTop10Accuracy=0.7365, over 12609.00 frames. ], tot_loss[loss=2.833, ArTop10Accuracy=0.7454, over 10912.79 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 05:57:12,440 INFO [trainer.py:765] (6/8) Epoch 16, batch 600, train_loss[loss=2.911, ArTop10Accuracy=0.7347, over 11418.00 frames. ], tot_loss[loss=2.841, ArTop10Accuracy=0.7442, over 11438.91 frames. ], batch size: 18, lr: 7.54e-03 +2024-08-06 05:58:00,040 INFO [trainer.py:765] (6/8) Epoch 16, batch 700, train_loss[loss=2.715, ArTop10Accuracy=0.7573, over 10134.00 frames. ], tot_loss[loss=2.844, ArTop10Accuracy=0.7434, over 11591.00 frames. ], batch size: 12, lr: 7.52e-03 +2024-08-06 05:58:34,024 INFO [trainer.py:765] (6/8) Epoch 16, batch 800, train_loss[loss=2.779, ArTop10Accuracy=0.7574, over 9957.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7427, over 11698.77 frames. ], batch size: 12, lr: 7.50e-03 +2024-08-06 05:58:41,569 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 05:58:50,426 INFO [trainer.py:811] (6/8) Epoch 16, validation: loss=2.915, ArTop10Accuracy=0.7338, over 1829298.00 frames. +2024-08-06 05:58:50,427 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 33346MB +2024-08-06 05:58:50,730 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.335e+02 1.445e+02 1.570e+02 3.252e+02, threshold=2.890e+02, percent-clipped=0.1 +2024-08-06 05:59:14,321 INFO [trainer.py:765] (6/8) Epoch 16, batch 900, train_loss[loss=2.816, ArTop10Accuracy=0.7501, over 13027.00 frames. ], tot_loss[loss=2.844, ArTop10Accuracy=0.7436, over 11748.87 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 05:59:45,915 INFO [trainer.py:765] (6/8) Epoch 16, batch 1000, train_loss[loss=2.783, ArTop10Accuracy=0.7581, over 13037.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7428, over 11941.45 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 06:00:17,092 INFO [trainer.py:765] (6/8) Epoch 16, batch 1100, train_loss[loss=2.886, ArTop10Accuracy=0.7374, over 13750.00 frames. ], tot_loss[loss=2.856, ArTop10Accuracy=0.7413, over 12013.33 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 06:00:47,465 INFO [trainer.py:765] (6/8) Epoch 16, batch 1200, train_loss[loss=2.953, ArTop10Accuracy=0.7222, over 12108.00 frames. ], tot_loss[loss=2.856, ArTop10Accuracy=0.7407, over 11956.69 frames. ], batch size: 97, lr: 7.43e-03 +2024-08-06 06:01:12,987 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 06:02:27,259 INFO [trainer.py:765] (6/8) Epoch 17, batch 100, train_loss[loss=2.862, ArTop10Accuracy=0.7439, over 14489.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7462, over 4790.39 frames. ], batch size: 61, lr: 7.18e-03 +2024-08-06 06:03:11,849 INFO [trainer.py:765] (6/8) Epoch 17, batch 200, train_loss[loss=2.799, ArTop10Accuracy=0.7474, over 13544.00 frames. ], tot_loss[loss=2.829, ArTop10Accuracy=0.747, over 7780.17 frames. ], batch size: 34, lr: 7.17e-03 +2024-08-06 06:03:57,501 INFO [trainer.py:765] (6/8) Epoch 17, batch 300, train_loss[loss=2.91, ArTop10Accuracy=0.7298, over 14177.00 frames. ], tot_loss[loss=2.818, ArTop10Accuracy=0.7486, over 9405.44 frames. ], batch size: 44, lr: 7.15e-03 +2024-08-06 06:04:42,836 INFO [trainer.py:765] (6/8) Epoch 17, batch 400, train_loss[loss=2.666, ArTop10Accuracy=0.7727, over 10817.00 frames. ], tot_loss[loss=2.818, ArTop10Accuracy=0.7485, over 10324.69 frames. ], batch size: 15, lr: 7.13e-03 +2024-08-06 06:05:29,002 INFO [trainer.py:765] (6/8) Epoch 17, batch 500, train_loss[loss=2.923, ArTop10Accuracy=0.7174, over 12180.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.7484, over 10910.58 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 06:05:49,550 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.359e+02 1.445e+02 1.551e+02 2.741e+02, threshold=2.891e+02, percent-clipped=0.0 +2024-08-06 06:06:20,721 INFO [trainer.py:765] (6/8) Epoch 17, batch 600, train_loss[loss=2.674, ArTop10Accuracy=0.7685, over 11394.00 frames. ], tot_loss[loss=2.822, ArTop10Accuracy=0.7474, over 11438.45 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 06:07:04,693 INFO [trainer.py:765] (6/8) Epoch 17, batch 700, train_loss[loss=2.582, ArTop10Accuracy=0.7838, over 10038.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.7467, over 11556.02 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 06:07:44,894 INFO [trainer.py:765] (6/8) Epoch 17, batch 800, train_loss[loss=2.873, ArTop10Accuracy=0.7407, over 10315.00 frames. ], tot_loss[loss=2.835, ArTop10Accuracy=0.7446, over 11683.34 frames. ], batch size: 12, lr: 7.07e-03 +2024-08-06 06:08:16,382 INFO [trainer.py:765] (6/8) Epoch 17, batch 900, train_loss[loss=2.861, ArTop10Accuracy=0.7367, over 12845.00 frames. ], tot_loss[loss=2.835, ArTop10Accuracy=0.7446, over 11724.20 frames. ], batch size: 27, lr: 7.05e-03 +2024-08-06 06:08:47,993 INFO [trainer.py:765] (6/8) Epoch 17, batch 1000, train_loss[loss=2.76, ArTop10Accuracy=0.7643, over 12848.00 frames. ], tot_loss[loss=2.834, ArTop10Accuracy=0.7449, over 11940.36 frames. ], batch size: 27, lr: 7.04e-03 +2024-08-06 06:09:19,132 INFO [trainer.py:765] (6/8) Epoch 17, batch 1100, train_loss[loss=2.887, ArTop10Accuracy=0.7403, over 13555.00 frames. ], tot_loss[loss=2.844, ArTop10Accuracy=0.7435, over 11992.06 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 06:09:49,443 INFO [trainer.py:765] (6/8) Epoch 17, batch 1200, train_loss[loss=2.942, ArTop10Accuracy=0.7254, over 12182.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7428, over 11940.64 frames. ], batch size: 97, lr: 7.01e-03 +2024-08-06 06:10:14,436 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 06:11:23,102 INFO [trainer.py:765] (6/8) Epoch 18, batch 100, train_loss[loss=2.89, ArTop10Accuracy=0.7325, over 14378.00 frames. ], tot_loss[loss=2.807, ArTop10Accuracy=0.751, over 4781.49 frames. ], batch size: 61, lr: 6.78e-03 +2024-08-06 06:12:16,260 INFO [trainer.py:765] (6/8) Epoch 18, batch 200, train_loss[loss=2.775, ArTop10Accuracy=0.7507, over 13743.00 frames. ], tot_loss[loss=2.81, ArTop10Accuracy=0.7504, over 7783.51 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 06:12:40,318 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 06:12:48,991 INFO [trainer.py:811] (6/8) Epoch 18, validation: loss=2.916, ArTop10Accuracy=0.7343, over 1829298.00 frames. +2024-08-06 06:12:48,992 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 33346MB +2024-08-06 06:12:49,335 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.377e+02 1.476e+02 1.588e+02 2.450e+02, threshold=2.952e+02, percent-clipped=0.0 +2024-08-06 06:13:07,116 INFO [trainer.py:765] (6/8) Epoch 18, batch 300, train_loss[loss=2.856, ArTop10Accuracy=0.7378, over 14420.00 frames. ], tot_loss[loss=2.811, ArTop10Accuracy=0.7506, over 9412.99 frames. ], batch size: 44, lr: 6.75e-03 +2024-08-06 06:13:54,097 INFO [trainer.py:765] (6/8) Epoch 18, batch 400, train_loss[loss=2.679, ArTop10Accuracy=0.7648, over 10949.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.7503, over 10334.13 frames. ], batch size: 15, lr: 6.74e-03 +2024-08-06 06:14:38,488 INFO [trainer.py:765] (6/8) Epoch 18, batch 500, train_loss[loss=2.799, ArTop10Accuracy=0.7553, over 12199.00 frames. ], tot_loss[loss=2.814, ArTop10Accuracy=0.7496, over 10885.29 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 06:15:23,628 INFO [trainer.py:765] (6/8) Epoch 18, batch 600, train_loss[loss=2.822, ArTop10Accuracy=0.7471, over 11570.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.748, over 11414.14 frames. ], batch size: 18, lr: 6.71e-03 +2024-08-06 06:16:17,344 INFO [trainer.py:765] (6/8) Epoch 18, batch 700, train_loss[loss=2.725, ArTop10Accuracy=0.7673, over 10095.00 frames. ], tot_loss[loss=2.819, ArTop10Accuracy=0.7483, over 11559.49 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 06:16:51,429 INFO [trainer.py:765] (6/8) Epoch 18, batch 800, train_loss[loss=2.72, ArTop10Accuracy=0.7681, over 10198.00 frames. ], tot_loss[loss=2.826, ArTop10Accuracy=0.7471, over 11696.50 frames. ], batch size: 12, lr: 6.68e-03 +2024-08-06 06:17:22,913 INFO [trainer.py:765] (6/8) Epoch 18, batch 900, train_loss[loss=2.877, ArTop10Accuracy=0.7344, over 12935.00 frames. ], tot_loss[loss=2.823, ArTop10Accuracy=0.7478, over 11745.85 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 06:17:54,529 INFO [trainer.py:765] (6/8) Epoch 18, batch 1000, train_loss[loss=2.798, ArTop10Accuracy=0.7513, over 12920.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.7467, over 11940.27 frames. ], batch size: 27, lr: 6.65e-03 +2024-08-06 06:18:25,662 INFO [trainer.py:765] (6/8) Epoch 18, batch 1100, train_loss[loss=2.832, ArTop10Accuracy=0.7412, over 13452.00 frames. ], tot_loss[loss=2.834, ArTop10Accuracy=0.7455, over 11976.98 frames. ], batch size: 34, lr: 6.64e-03 +2024-08-06 06:18:55,971 INFO [trainer.py:765] (6/8) Epoch 18, batch 1200, train_loss[loss=3.031, ArTop10Accuracy=0.705, over 12411.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7453, over 11904.59 frames. ], batch size: 99, lr: 6.63e-03 +2024-08-06 06:19:19,163 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.387e+02 1.492e+02 1.607e+02 2.982e+02, threshold=2.983e+02, percent-clipped=0.1 +2024-08-06 06:19:23,775 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 06:20:29,727 INFO [trainer.py:765] (6/8) Epoch 19, batch 100, train_loss[loss=2.916, ArTop10Accuracy=0.7353, over 14458.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.7511, over 4767.52 frames. ], batch size: 62, lr: 6.43e-03 +2024-08-06 06:21:11,274 INFO [trainer.py:765] (6/8) Epoch 19, batch 200, train_loss[loss=2.83, ArTop10Accuracy=0.7477, over 13633.00 frames. ], tot_loss[loss=2.806, ArTop10Accuracy=0.7516, over 7771.17 frames. ], batch size: 34, lr: 6.41e-03 +2024-08-06 06:21:56,077 INFO [trainer.py:765] (6/8) Epoch 19, batch 300, train_loss[loss=2.895, ArTop10Accuracy=0.7362, over 14409.00 frames. ], tot_loss[loss=2.798, ArTop10Accuracy=0.753, over 9417.16 frames. ], batch size: 44, lr: 6.40e-03 +2024-08-06 06:22:36,012 INFO [trainer.py:765] (6/8) Epoch 19, batch 400, train_loss[loss=2.826, ArTop10Accuracy=0.7494, over 10462.00 frames. ], tot_loss[loss=2.8, ArTop10Accuracy=0.7523, over 10308.87 frames. ], batch size: 14, lr: 6.39e-03 +2024-08-06 06:23:18,997 INFO [trainer.py:765] (6/8) Epoch 19, batch 500, train_loss[loss=2.869, ArTop10Accuracy=0.7441, over 12301.00 frames. ], tot_loss[loss=2.799, ArTop10Accuracy=0.7523, over 10883.03 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 06:24:03,685 INFO [trainer.py:765] (6/8) Epoch 19, batch 600, train_loss[loss=2.721, ArTop10Accuracy=0.7642, over 11679.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7509, over 11399.07 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 06:24:46,185 INFO [trainer.py:765] (6/8) Epoch 19, batch 700, train_loss[loss=2.745, ArTop10Accuracy=0.7617, over 10301.00 frames. ], tot_loss[loss=2.809, ArTop10Accuracy=0.7497, over 11560.55 frames. ], batch size: 12, lr: 6.35e-03 +2024-08-06 06:25:22,354 INFO [trainer.py:765] (6/8) Epoch 19, batch 800, train_loss[loss=2.709, ArTop10Accuracy=0.7651, over 10148.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.7486, over 11678.80 frames. ], batch size: 12, lr: 6.33e-03 +2024-08-06 06:25:53,624 INFO [trainer.py:765] (6/8) Epoch 19, batch 900, train_loss[loss=2.786, ArTop10Accuracy=0.7545, over 12945.00 frames. ], tot_loss[loss=2.808, ArTop10Accuracy=0.75, over 11735.33 frames. ], batch size: 27, lr: 6.32e-03 +2024-08-06 06:26:21,772 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 06:26:30,765 INFO [trainer.py:811] (6/8) Epoch 19, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 06:26:30,766 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 33346MB +2024-08-06 06:26:31,053 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.416e+02 1.525e+02 1.662e+02 2.849e+02, threshold=3.050e+02, percent-clipped=0.0 +2024-08-06 06:26:34,031 INFO [trainer.py:765] (6/8) Epoch 19, batch 1000, train_loss[loss=2.785, ArTop10Accuracy=0.7568, over 13121.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.7485, over 11951.16 frames. ], batch size: 27, lr: 6.31e-03 +2024-08-06 06:27:05,190 INFO [trainer.py:765] (6/8) Epoch 19, batch 1100, train_loss[loss=2.72, ArTop10Accuracy=0.7517, over 13850.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.7464, over 12005.23 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 06:27:35,454 INFO [trainer.py:765] (6/8) Epoch 19, batch 1200, train_loss[loss=2.991, ArTop10Accuracy=0.7117, over 11936.00 frames. ], tot_loss[loss=2.83, ArTop10Accuracy=0.7459, over 11936.69 frames. ], batch size: 99, lr: 6.28e-03 +2024-08-06 06:28:00,603 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 06:29:08,984 INFO [trainer.py:765] (6/8) Epoch 20, batch 100, train_loss[loss=2.873, ArTop10Accuracy=0.7427, over 14343.00 frames. ], tot_loss[loss=2.789, ArTop10Accuracy=0.7541, over 4789.62 frames. ], batch size: 61, lr: 6.10e-03 +2024-08-06 06:29:50,317 INFO [trainer.py:765] (6/8) Epoch 20, batch 200, train_loss[loss=2.82, ArTop10Accuracy=0.753, over 13711.00 frames. ], tot_loss[loss=2.79, ArTop10Accuracy=0.7542, over 7783.49 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 06:30:37,105 INFO [trainer.py:765] (6/8) Epoch 20, batch 300, train_loss[loss=2.798, ArTop10Accuracy=0.7507, over 14368.00 frames. ], tot_loss[loss=2.794, ArTop10Accuracy=0.7538, over 9403.81 frames. ], batch size: 44, lr: 6.08e-03 +2024-08-06 06:31:16,354 INFO [trainer.py:765] (6/8) Epoch 20, batch 400, train_loss[loss=2.585, ArTop10Accuracy=0.7829, over 10999.00 frames. ], tot_loss[loss=2.791, ArTop10Accuracy=0.7542, over 10327.37 frames. ], batch size: 15, lr: 6.07e-03 +2024-08-06 06:32:03,758 INFO [trainer.py:765] (6/8) Epoch 20, batch 500, train_loss[loss=2.894, ArTop10Accuracy=0.738, over 12377.00 frames. ], tot_loss[loss=2.791, ArTop10Accuracy=0.7542, over 10900.01 frames. ], batch size: 22, lr: 6.05e-03 +2024-08-06 06:32:43,356 INFO [trainer.py:765] (6/8) Epoch 20, batch 600, train_loss[loss=2.625, ArTop10Accuracy=0.7819, over 11630.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7529, over 11441.89 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 06:33:36,752 INFO [trainer.py:765] (6/8) Epoch 20, batch 700, train_loss[loss=2.81, ArTop10Accuracy=0.755, over 10185.00 frames. ], tot_loss[loss=2.802, ArTop10Accuracy=0.7514, over 11590.91 frames. ], batch size: 12, lr: 6.03e-03 +2024-08-06 06:33:43,829 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.417e+02 1.526e+02 1.639e+02 3.791e+02, threshold=3.052e+02, percent-clipped=0.1 +2024-08-06 06:34:13,303 INFO [trainer.py:765] (6/8) Epoch 20, batch 800, train_loss[loss=2.773, ArTop10Accuracy=0.7652, over 10056.00 frames. ], tot_loss[loss=2.809, ArTop10Accuracy=0.7502, over 11697.57 frames. ], batch size: 12, lr: 6.02e-03 +2024-08-06 06:34:44,579 INFO [trainer.py:765] (6/8) Epoch 20, batch 900, train_loss[loss=2.739, ArTop10Accuracy=0.7591, over 13101.00 frames. ], tot_loss[loss=2.804, ArTop10Accuracy=0.7511, over 11740.56 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 06:35:16,138 INFO [trainer.py:765] (6/8) Epoch 20, batch 1000, train_loss[loss=2.824, ArTop10Accuracy=0.7492, over 13030.00 frames. ], tot_loss[loss=2.814, ArTop10Accuracy=0.7496, over 11931.56 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 06:35:47,214 INFO [trainer.py:765] (6/8) Epoch 20, batch 1100, train_loss[loss=2.844, ArTop10Accuracy=0.7486, over 13725.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7478, over 11997.26 frames. ], batch size: 34, lr: 5.99e-03 +2024-08-06 06:36:17,438 INFO [trainer.py:765] (6/8) Epoch 20, batch 1200, train_loss[loss=2.983, ArTop10Accuracy=0.7186, over 11903.00 frames. ], tot_loss[loss=2.82, ArTop10Accuracy=0.7479, over 11933.95 frames. ], batch size: 98, lr: 5.97e-03 +2024-08-06 06:36:42,591 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 06:36:42,593 INFO [trainer.py:1069] (6/8) Done! diff --git a/libritts/log/log-train-2024-08-06-03-39-40-7 b/libritts/log/log-train-2024-08-06-03-39-40-7 new file mode 100644 index 0000000000000000000000000000000000000000..7e19c7b4fef0503014d09749346b601ea6b39dff --- /dev/null +++ b/libritts/log/log-train-2024-08-06-03-39-40-7 @@ -0,0 +1,336 @@ +2024-08-06 03:39:40,339 INFO [trainer.py:870] (7/8) Training started +2024-08-06 03:39:40,340 INFO [trainer.py:889] (7/8) Device: cuda:7 +2024-08-06 03:39:40,340 INFO [trainer.py:890] (7/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '7d2e5f4-dirty', 'icefall-git-date': 'Tue Aug 6 02:59:12 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 20, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 1, 'dtype': 'bfloat16', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 1, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 320, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 03:39:40,341 INFO [trainer.py:892] (7/8) About to create model +2024-08-06 03:39:41,122 INFO [trainer.py:899] (7/8) Number of model parameters: 367386628 +2024-08-06 03:39:41,926 INFO [trainer.py:914] (7/8) Using DDP +2024-08-06 03:39:43,998 INFO [datamodule.py:427] (7/8) About to get train cuts +2024-08-06 03:39:44,000 INFO [datamodule.py:434] (7/8) About to get dev cuts +2024-08-06 03:39:44,001 INFO [datamodule.py:292] (7/8) Disable SpecAugment +2024-08-06 03:39:44,002 INFO [datamodule.py:294] (7/8) About to create train dataset +2024-08-06 03:39:44,002 INFO [datamodule.py:323] (7/8) Using DynamicBucketingSampler +2024-08-06 03:39:44,610 INFO [datamodule.py:344] (7/8) About to create train dataloader +2024-08-06 03:39:44,611 INFO [datamodule.py:367] (7/8) About to create dev dataset +2024-08-06 03:39:44,941 INFO [datamodule.py:388] (7/8) About to create dev dataloader +2024-08-06 03:40:39,571 INFO [trainer.py:765] (7/8) Epoch 1, batch 100, train_loss[loss=4.129, ArTop10Accuracy=0.5083, over 14809.00 frames. ], tot_loss[loss=4.784, ArTop10Accuracy=0.3964, over 4792.93 frames. ], batch size: 61, lr: 2.25e-02 +2024-08-06 03:41:16,923 INFO [trainer.py:765] (7/8) Epoch 1, batch 200, train_loss[loss=3.934, ArTop10Accuracy=0.5439, over 13819.00 frames. ], tot_loss[loss=4.308, ArTop10Accuracy=0.4752, over 7794.05 frames. ], batch size: 34, lr: 3.00e-02 +2024-08-06 03:41:57,951 INFO [trainer.py:765] (7/8) Epoch 1, batch 300, train_loss[loss=3.846, ArTop10Accuracy=0.5427, over 14222.00 frames. ], tot_loss[loss=4.09, ArTop10Accuracy=0.5104, over 9413.19 frames. ], batch size: 44, lr: 3.00e-02 +2024-08-06 03:42:33,081 INFO [trainer.py:765] (7/8) Epoch 1, batch 400, train_loss[loss=3.626, ArTop10Accuracy=0.5841, over 11074.00 frames. ], tot_loss[loss=3.945, ArTop10Accuracy=0.5338, over 10341.79 frames. ], batch size: 15, lr: 3.00e-02 +2024-08-06 03:43:11,270 INFO [trainer.py:765] (7/8) Epoch 1, batch 500, train_loss[loss=3.737, ArTop10Accuracy=0.5661, over 12277.00 frames. ], tot_loss[loss=3.829, ArTop10Accuracy=0.5533, over 10907.39 frames. ], batch size: 22, lr: 2.99e-02 +2024-08-06 03:43:46,592 INFO [trainer.py:765] (7/8) Epoch 1, batch 600, train_loss[loss=3.518, ArTop10Accuracy=0.6106, over 11558.00 frames. ], tot_loss[loss=3.75, ArTop10Accuracy=0.5673, over 11441.86 frames. ], batch size: 18, lr: 2.99e-02 +2024-08-06 03:44:27,900 INFO [trainer.py:765] (7/8) Epoch 1, batch 700, train_loss[loss=3.401, ArTop10Accuracy=0.6251, over 10127.00 frames. ], tot_loss[loss=3.686, ArTop10Accuracy=0.5785, over 11584.80 frames. ], batch size: 12, lr: 2.99e-02 +2024-08-06 03:45:01,513 INFO [trainer.py:765] (7/8) Epoch 1, batch 800, train_loss[loss=3.423, ArTop10Accuracy=0.625, over 10089.00 frames. ], tot_loss[loss=3.634, ArTop10Accuracy=0.5881, over 11711.73 frames. ], batch size: 12, lr: 2.98e-02 +2024-08-06 03:45:32,557 INFO [trainer.py:765] (7/8) Epoch 1, batch 900, train_loss[loss=3.579, ArTop10Accuracy=0.5984, over 12938.00 frames. ], tot_loss[loss=3.586, ArTop10Accuracy=0.5969, over 11738.21 frames. ], batch size: 27, lr: 2.98e-02 +2024-08-06 03:46:03,648 INFO [trainer.py:765] (7/8) Epoch 1, batch 1000, train_loss[loss=3.551, ArTop10Accuracy=0.6069, over 12928.00 frames. ], tot_loss[loss=3.554, ArTop10Accuracy=0.603, over 11938.35 frames. ], batch size: 27, lr: 2.97e-02 +2024-08-06 03:46:07,988 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 8.169e+01 1.565e+02 2.239e+02 3.485e+02 9.105e+03, threshold=4.478e+02, percent-clipped=0.0 +2024-08-06 03:46:38,611 INFO [trainer.py:765] (7/8) Epoch 1, batch 1100, train_loss[loss=3.42, ArTop10Accuracy=0.623, over 13609.00 frames. ], tot_loss[loss=3.527, ArTop10Accuracy=0.6078, over 12007.78 frames. ], batch size: 34, lr: 2.96e-02 +2024-08-06 03:47:08,744 INFO [trainer.py:765] (7/8) Epoch 1, batch 1200, train_loss[loss=3.49, ArTop10Accuracy=0.6182, over 12024.00 frames. ], tot_loss[loss=3.504, ArTop10Accuracy=0.6121, over 11959.08 frames. ], batch size: 97, lr: 2.96e-02 +2024-08-06 03:47:33,901 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 03:48:38,675 INFO [trainer.py:765] (7/8) Epoch 2, batch 100, train_loss[loss=3.47, ArTop10Accuracy=0.6212, over 14221.00 frames. ], tot_loss[loss=3.466, ArTop10Accuracy=0.6196, over 4767.00 frames. ], batch size: 61, lr: 2.90e-02 +2024-08-06 03:49:14,596 INFO [trainer.py:765] (7/8) Epoch 2, batch 200, train_loss[loss=3.427, ArTop10Accuracy=0.6234, over 13745.00 frames. ], tot_loss[loss=3.439, ArTop10Accuracy=0.6243, over 7776.46 frames. ], batch size: 34, lr: 2.89e-02 +2024-08-06 03:49:56,519 INFO [trainer.py:765] (7/8) Epoch 2, batch 300, train_loss[loss=3.483, ArTop10Accuracy=0.6213, over 14230.00 frames. ], tot_loss[loss=3.416, ArTop10Accuracy=0.6294, over 9397.98 frames. ], batch size: 44, lr: 2.89e-02 +2024-08-06 03:50:31,999 INFO [trainer.py:765] (7/8) Epoch 2, batch 400, train_loss[loss=3.402, ArTop10Accuracy=0.6343, over 10467.00 frames. ], tot_loss[loss=3.41, ArTop10Accuracy=0.6307, over 10298.79 frames. ], batch size: 14, lr: 2.88e-02 +2024-08-06 03:51:17,109 INFO [trainer.py:765] (7/8) Epoch 2, batch 500, train_loss[loss=3.401, ArTop10Accuracy=0.6375, over 12143.00 frames. ], tot_loss[loss=3.404, ArTop10Accuracy=0.632, over 10872.04 frames. ], batch size: 22, lr: 2.87e-02 +2024-08-06 03:51:53,203 INFO [trainer.py:765] (7/8) Epoch 2, batch 600, train_loss[loss=3.297, ArTop10Accuracy=0.6514, over 11641.00 frames. ], tot_loss[loss=3.401, ArTop10Accuracy=0.6323, over 11425.09 frames. ], batch size: 18, lr: 2.86e-02 +2024-08-06 03:52:38,994 INFO [trainer.py:765] (7/8) Epoch 2, batch 700, train_loss[loss=3.344, ArTop10Accuracy=0.6342, over 9135.00 frames. ], tot_loss[loss=3.394, ArTop10Accuracy=0.6336, over 11561.78 frames. ], batch size: 11, lr: 2.85e-02 +2024-08-06 03:52:47,091 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 03:52:56,023 INFO [trainer.py:811] (7/8) Epoch 2, validation: loss=3.327, ArTop10Accuracy=0.6492, over 1829298.00 frames. +2024-08-06 03:52:56,024 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 28662MB +2024-08-06 03:52:56,542 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 8.181e+01 1.431e+02 1.849e+02 2.730e+02 2.344e+03, threshold=3.697e+02, percent-clipped=7.2 +2024-08-06 03:53:21,882 INFO [trainer.py:765] (7/8) Epoch 2, batch 800, train_loss[loss=3.318, ArTop10Accuracy=0.6498, over 9988.00 frames. ], tot_loss[loss=3.387, ArTop10Accuracy=0.635, over 11683.76 frames. ], batch size: 12, lr: 2.84e-02 +2024-08-06 03:53:53,300 INFO [trainer.py:765] (7/8) Epoch 2, batch 900, train_loss[loss=3.401, ArTop10Accuracy=0.63, over 12975.00 frames. ], tot_loss[loss=3.367, ArTop10Accuracy=0.6386, over 11740.71 frames. ], batch size: 27, lr: 2.83e-02 +2024-08-06 03:54:24,809 INFO [trainer.py:765] (7/8) Epoch 2, batch 1000, train_loss[loss=3.42, ArTop10Accuracy=0.635, over 12892.00 frames. ], tot_loss[loss=3.367, ArTop10Accuracy=0.6392, over 11931.42 frames. ], batch size: 27, lr: 2.82e-02 +2024-08-06 03:54:56,007 INFO [trainer.py:765] (7/8) Epoch 2, batch 1100, train_loss[loss=3.335, ArTop10Accuracy=0.6454, over 13818.00 frames. ], tot_loss[loss=3.362, ArTop10Accuracy=0.64, over 12003.56 frames. ], batch size: 34, lr: 2.81e-02 +2024-08-06 03:55:26,229 INFO [trainer.py:765] (7/8) Epoch 2, batch 1200, train_loss[loss=3.372, ArTop10Accuracy=0.6381, over 11813.00 frames. ], tot_loss[loss=3.356, ArTop10Accuracy=0.641, over 11933.31 frames. ], batch size: 97, lr: 2.80e-02 +2024-08-06 03:55:51,139 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 03:57:04,102 INFO [trainer.py:765] (7/8) Epoch 3, batch 100, train_loss[loss=3.371, ArTop10Accuracy=0.6374, over 14601.00 frames. ], tot_loss[loss=3.314, ArTop10Accuracy=0.6503, over 4779.56 frames. ], batch size: 62, lr: 2.67e-02 +2024-08-06 03:57:50,980 INFO [trainer.py:765] (7/8) Epoch 3, batch 200, train_loss[loss=3.284, ArTop10Accuracy=0.6563, over 13693.00 frames. ], tot_loss[loss=3.296, ArTop10Accuracy=0.6538, over 7785.32 frames. ], batch size: 34, lr: 2.66e-02 +2024-08-06 03:58:26,075 INFO [trainer.py:765] (7/8) Epoch 3, batch 300, train_loss[loss=3.175, ArTop10Accuracy=0.6722, over 14066.00 frames. ], tot_loss[loss=3.281, ArTop10Accuracy=0.6564, over 9410.41 frames. ], batch size: 44, lr: 2.64e-02 +2024-08-06 03:59:11,254 INFO [trainer.py:765] (7/8) Epoch 3, batch 400, train_loss[loss=3.326, ArTop10Accuracy=0.6447, over 10334.00 frames. ], tot_loss[loss=3.27, ArTop10Accuracy=0.6584, over 10329.08 frames. ], batch size: 14, lr: 2.63e-02 +2024-08-06 03:59:29,675 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 8.720e+01 1.461e+02 1.775e+02 2.344e+02 9.150e+02, threshold=3.550e+02, percent-clipped=5.2 +2024-08-06 03:59:49,304 INFO [trainer.py:765] (7/8) Epoch 3, batch 500, train_loss[loss=3.121, ArTop10Accuracy=0.682, over 12193.00 frames. ], tot_loss[loss=3.255, ArTop10Accuracy=0.6604, over 10904.15 frames. ], batch size: 22, lr: 2.62e-02 +2024-08-06 04:00:35,096 INFO [trainer.py:765] (7/8) Epoch 3, batch 600, train_loss[loss=3.192, ArTop10Accuracy=0.6709, over 11744.00 frames. ], tot_loss[loss=3.241, ArTop10Accuracy=0.6632, over 11426.58 frames. ], batch size: 18, lr: 2.61e-02 +2024-08-06 04:01:22,059 INFO [trainer.py:765] (7/8) Epoch 3, batch 700, train_loss[loss=2.996, ArTop10Accuracy=0.7045, over 10156.00 frames. ], tot_loss[loss=3.238, ArTop10Accuracy=0.664, over 11562.24 frames. ], batch size: 12, lr: 2.60e-02 +2024-08-06 04:01:56,269 INFO [trainer.py:765] (7/8) Epoch 3, batch 800, train_loss[loss=3.181, ArTop10Accuracy=0.6888, over 9863.00 frames. ], tot_loss[loss=3.232, ArTop10Accuracy=0.6653, over 11678.21 frames. ], batch size: 12, lr: 2.59e-02 +2024-08-06 04:02:27,740 INFO [trainer.py:765] (7/8) Epoch 3, batch 900, train_loss[loss=3.18, ArTop10Accuracy=0.6827, over 12994.00 frames. ], tot_loss[loss=3.212, ArTop10Accuracy=0.6696, over 11732.46 frames. ], batch size: 27, lr: 2.57e-02 +2024-08-06 04:02:59,284 INFO [trainer.py:765] (7/8) Epoch 3, batch 1000, train_loss[loss=3.077, ArTop10Accuracy=0.7003, over 13318.00 frames. ], tot_loss[loss=3.197, ArTop10Accuracy=0.6723, over 11939.63 frames. ], batch size: 28, lr: 2.56e-02 +2024-08-06 04:03:30,942 INFO [trainer.py:765] (7/8) Epoch 3, batch 1100, train_loss[loss=3.118, ArTop10Accuracy=0.6931, over 13902.00 frames. ], tot_loss[loss=3.195, ArTop10Accuracy=0.6724, over 12001.98 frames. ], batch size: 34, lr: 2.55e-02 +2024-08-06 04:04:01,314 INFO [trainer.py:765] (7/8) Epoch 3, batch 1200, train_loss[loss=3.266, ArTop10Accuracy=0.6626, over 12006.00 frames. ], tot_loss[loss=3.18, ArTop10Accuracy=0.6756, over 11951.26 frames. ], batch size: 98, lr: 2.54e-02 +2024-08-06 04:04:26,531 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 04:05:43,369 INFO [trainer.py:765] (7/8) Epoch 4, batch 100, train_loss[loss=3.199, ArTop10Accuracy=0.6694, over 14536.00 frames. ], tot_loss[loss=3.132, ArTop10Accuracy=0.6861, over 4773.15 frames. ], batch size: 61, lr: 2.38e-02 +2024-08-06 04:06:07,077 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 04:06:16,404 INFO [trainer.py:811] (7/8) Epoch 4, validation: loss=3.063, ArTop10Accuracy=0.7031, over 1829298.00 frames. +2024-08-06 04:06:16,405 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29190MB +2024-08-06 04:06:16,746 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.091e+02 1.493e+02 1.709e+02 2.068e+02 7.969e+02, threshold=3.418e+02, percent-clipped=2.9 +2024-08-06 04:06:31,827 INFO [trainer.py:765] (7/8) Epoch 4, batch 200, train_loss[loss=3.208, ArTop10Accuracy=0.6659, over 13285.00 frames. ], tot_loss[loss=3.122, ArTop10Accuracy=0.6878, over 7793.19 frames. ], batch size: 33, lr: 2.37e-02 +2024-08-06 04:07:18,544 INFO [trainer.py:765] (7/8) Epoch 4, batch 300, train_loss[loss=3.184, ArTop10Accuracy=0.6809, over 14081.00 frames. ], tot_loss[loss=3.12, ArTop10Accuracy=0.6883, over 9423.25 frames. ], batch size: 44, lr: 2.36e-02 +2024-08-06 04:08:01,911 INFO [trainer.py:765] (7/8) Epoch 4, batch 400, train_loss[loss=3.047, ArTop10Accuracy=0.7063, over 10851.00 frames. ], tot_loss[loss=3.114, ArTop10Accuracy=0.6892, over 10352.23 frames. ], batch size: 15, lr: 2.34e-02 +2024-08-06 04:08:45,345 INFO [trainer.py:765] (7/8) Epoch 4, batch 500, train_loss[loss=3.135, ArTop10Accuracy=0.691, over 12305.00 frames. ], tot_loss[loss=3.107, ArTop10Accuracy=0.6901, over 10907.45 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 04:09:37,072 INFO [trainer.py:765] (7/8) Epoch 4, batch 600, train_loss[loss=3.077, ArTop10Accuracy=0.6936, over 11653.00 frames. ], tot_loss[loss=3.108, ArTop10Accuracy=0.6902, over 11427.22 frames. ], batch size: 18, lr: 2.32e-02 +2024-08-06 04:10:13,502 INFO [trainer.py:765] (7/8) Epoch 4, batch 700, train_loss[loss=2.914, ArTop10Accuracy=0.7315, over 10206.00 frames. ], tot_loss[loss=3.109, ArTop10Accuracy=0.69, over 11569.47 frames. ], batch size: 12, lr: 2.31e-02 +2024-08-06 04:10:51,960 INFO [trainer.py:765] (7/8) Epoch 4, batch 800, train_loss[loss=3.06, ArTop10Accuracy=0.7008, over 10219.00 frames. ], tot_loss[loss=3.11, ArTop10Accuracy=0.6898, over 11675.66 frames. ], batch size: 12, lr: 2.30e-02 +2024-08-06 04:11:23,334 INFO [trainer.py:765] (7/8) Epoch 4, batch 900, train_loss[loss=3.236, ArTop10Accuracy=0.6636, over 12924.00 frames. ], tot_loss[loss=3.1, ArTop10Accuracy=0.6916, over 11746.39 frames. ], batch size: 27, lr: 2.29e-02 +2024-08-06 04:11:54,827 INFO [trainer.py:765] (7/8) Epoch 4, batch 1000, train_loss[loss=3.025, ArTop10Accuracy=0.7075, over 13048.00 frames. ], tot_loss[loss=3.105, ArTop10Accuracy=0.6909, over 11936.66 frames. ], batch size: 27, lr: 2.28e-02 +2024-08-06 04:12:25,961 INFO [trainer.py:765] (7/8) Epoch 4, batch 1100, train_loss[loss=3.122, ArTop10Accuracy=0.6878, over 13697.00 frames. ], tot_loss[loss=3.105, ArTop10Accuracy=0.6906, over 11990.53 frames. ], batch size: 34, lr: 2.26e-02 +2024-08-06 04:12:48,545 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.106e+02 1.440e+02 1.608e+02 1.893e+02 7.925e+02, threshold=3.216e+02, percent-clipped=2.0 +2024-08-06 04:12:58,828 INFO [trainer.py:765] (7/8) Epoch 4, batch 1200, train_loss[loss=3.22, ArTop10Accuracy=0.6702, over 11768.00 frames. ], tot_loss[loss=3.1, ArTop10Accuracy=0.6917, over 11957.42 frames. ], batch size: 98, lr: 2.25e-02 +2024-08-06 04:13:23,901 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 04:14:38,685 INFO [trainer.py:765] (7/8) Epoch 5, batch 100, train_loss[loss=3.133, ArTop10Accuracy=0.686, over 14554.00 frames. ], tot_loss[loss=3.073, ArTop10Accuracy=0.697, over 4789.25 frames. ], batch size: 61, lr: 2.10e-02 +2024-08-06 04:15:26,826 INFO [trainer.py:765] (7/8) Epoch 5, batch 200, train_loss[loss=3.021, ArTop10Accuracy=0.7045, over 13785.00 frames. ], tot_loss[loss=3.059, ArTop10Accuracy=0.7005, over 7796.18 frames. ], batch size: 34, lr: 2.09e-02 +2024-08-06 04:16:08,011 INFO [trainer.py:765] (7/8) Epoch 5, batch 300, train_loss[loss=3.121, ArTop10Accuracy=0.6867, over 14160.00 frames. ], tot_loss[loss=3.054, ArTop10Accuracy=0.701, over 9403.92 frames. ], batch size: 44, lr: 2.08e-02 +2024-08-06 04:16:53,134 INFO [trainer.py:765] (7/8) Epoch 5, batch 400, train_loss[loss=3.106, ArTop10Accuracy=0.6987, over 10195.00 frames. ], tot_loss[loss=3.055, ArTop10Accuracy=0.7011, over 10331.16 frames. ], batch size: 14, lr: 2.07e-02 +2024-08-06 04:17:36,638 INFO [trainer.py:765] (7/8) Epoch 5, batch 500, train_loss[loss=3.018, ArTop10Accuracy=0.7048, over 12351.00 frames. ], tot_loss[loss=3.053, ArTop10Accuracy=0.7015, over 10902.94 frames. ], batch size: 22, lr: 2.06e-02 +2024-08-06 04:18:22,114 INFO [trainer.py:765] (7/8) Epoch 5, batch 600, train_loss[loss=2.915, ArTop10Accuracy=0.7154, over 11679.00 frames. ], tot_loss[loss=3.051, ArTop10Accuracy=0.7017, over 11434.96 frames. ], batch size: 18, lr: 2.05e-02 +2024-08-06 04:19:17,033 INFO [trainer.py:765] (7/8) Epoch 5, batch 700, train_loss[loss=2.761, ArTop10Accuracy=0.7566, over 10083.00 frames. ], tot_loss[loss=3.056, ArTop10Accuracy=0.7005, over 11586.81 frames. ], batch size: 12, lr: 2.04e-02 +2024-08-06 04:19:51,067 INFO [trainer.py:765] (7/8) Epoch 5, batch 800, train_loss[loss=3.05, ArTop10Accuracy=0.707, over 9901.00 frames. ], tot_loss[loss=3.061, ArTop10Accuracy=0.6994, over 11699.42 frames. ], batch size: 12, lr: 2.03e-02 +2024-08-06 04:20:18,215 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 04:20:27,476 INFO [trainer.py:811] (7/8) Epoch 5, validation: loss=2.998, ArTop10Accuracy=0.7157, over 1829298.00 frames. +2024-08-06 04:20:27,476 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 32945MB +2024-08-06 04:20:27,781 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.057e+02 1.385e+02 1.542e+02 1.759e+02 7.741e+02, threshold=3.083e+02, percent-clipped=0.7 +2024-08-06 04:20:31,767 INFO [trainer.py:765] (7/8) Epoch 5, batch 900, train_loss[loss=3.11, ArTop10Accuracy=0.6938, over 13011.00 frames. ], tot_loss[loss=3.06, ArTop10Accuracy=0.6998, over 11721.33 frames. ], batch size: 27, lr: 2.02e-02 +2024-08-06 04:21:03,306 INFO [trainer.py:765] (7/8) Epoch 5, batch 1000, train_loss[loss=3.075, ArTop10Accuracy=0.7002, over 12775.00 frames. ], tot_loss[loss=3.061, ArTop10Accuracy=0.6997, over 11923.59 frames. ], batch size: 27, lr: 2.01e-02 +2024-08-06 04:21:34,453 INFO [trainer.py:765] (7/8) Epoch 5, batch 1100, train_loss[loss=3.032, ArTop10Accuracy=0.7089, over 13742.00 frames. ], tot_loss[loss=3.059, ArTop10Accuracy=0.7, over 11985.07 frames. ], batch size: 34, lr: 2.00e-02 +2024-08-06 04:22:04,752 INFO [trainer.py:765] (7/8) Epoch 5, batch 1200, train_loss[loss=3.175, ArTop10Accuracy=0.6769, over 12739.00 frames. ], tot_loss[loss=3.058, ArTop10Accuracy=0.7004, over 11931.24 frames. ], batch size: 97, lr: 1.99e-02 +2024-08-06 04:22:30,466 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 04:23:46,282 INFO [trainer.py:765] (7/8) Epoch 6, batch 100, train_loss[loss=3.137, ArTop10Accuracy=0.6858, over 14525.00 frames. ], tot_loss[loss=3.028, ArTop10Accuracy=0.7069, over 4764.51 frames. ], batch size: 62, lr: 1.85e-02 +2024-08-06 04:24:35,255 INFO [trainer.py:765] (7/8) Epoch 6, batch 200, train_loss[loss=3.05, ArTop10Accuracy=0.7099, over 13371.00 frames. ], tot_loss[loss=3.019, ArTop10Accuracy=0.709, over 7788.62 frames. ], batch size: 34, lr: 1.84e-02 +2024-08-06 04:25:16,676 INFO [trainer.py:765] (7/8) Epoch 6, batch 300, train_loss[loss=3.058, ArTop10Accuracy=0.7028, over 14106.00 frames. ], tot_loss[loss=3.013, ArTop10Accuracy=0.7099, over 9415.75 frames. ], batch size: 44, lr: 1.83e-02 +2024-08-06 04:26:08,924 INFO [trainer.py:765] (7/8) Epoch 6, batch 400, train_loss[loss=3.02, ArTop10Accuracy=0.7105, over 10473.00 frames. ], tot_loss[loss=3.011, ArTop10Accuracy=0.7102, over 10331.60 frames. ], batch size: 14, lr: 1.83e-02 +2024-08-06 04:26:51,485 INFO [trainer.py:765] (7/8) Epoch 6, batch 500, train_loss[loss=2.853, ArTop10Accuracy=0.7278, over 12190.00 frames. ], tot_loss[loss=3.009, ArTop10Accuracy=0.7097, over 10890.24 frames. ], batch size: 22, lr: 1.82e-02 +2024-08-06 04:27:39,298 INFO [trainer.py:765] (7/8) Epoch 6, batch 600, train_loss[loss=3.056, ArTop10Accuracy=0.7117, over 11631.00 frames. ], tot_loss[loss=3.011, ArTop10Accuracy=0.709, over 11412.90 frames. ], batch size: 18, lr: 1.81e-02 +2024-08-06 04:27:46,369 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.054e+02 1.343e+02 1.474e+02 1.660e+02 8.574e+02, threshold=2.947e+02, percent-clipped=0.6 +2024-08-06 04:28:33,240 INFO [trainer.py:765] (7/8) Epoch 6, batch 700, train_loss[loss=2.941, ArTop10Accuracy=0.7306, over 10133.00 frames. ], tot_loss[loss=3.021, ArTop10Accuracy=0.7074, over 11568.97 frames. ], batch size: 12, lr: 1.80e-02 +2024-08-06 04:29:11,216 INFO [trainer.py:765] (7/8) Epoch 6, batch 800, train_loss[loss=2.968, ArTop10Accuracy=0.7185, over 10059.00 frames. ], tot_loss[loss=3.025, ArTop10Accuracy=0.7067, over 11665.19 frames. ], batch size: 12, lr: 1.79e-02 +2024-08-06 04:29:42,751 INFO [trainer.py:765] (7/8) Epoch 6, batch 900, train_loss[loss=3.023, ArTop10Accuracy=0.7067, over 13535.00 frames. ], tot_loss[loss=3.024, ArTop10Accuracy=0.7071, over 11723.71 frames. ], batch size: 28, lr: 1.78e-02 +2024-08-06 04:30:14,306 INFO [trainer.py:765] (7/8) Epoch 6, batch 1000, train_loss[loss=2.997, ArTop10Accuracy=0.7111, over 12926.00 frames. ], tot_loss[loss=3.023, ArTop10Accuracy=0.7072, over 11907.02 frames. ], batch size: 27, lr: 1.77e-02 +2024-08-06 04:30:45,384 INFO [trainer.py:765] (7/8) Epoch 6, batch 1100, train_loss[loss=3.086, ArTop10Accuracy=0.6944, over 13694.00 frames. ], tot_loss[loss=3.021, ArTop10Accuracy=0.7074, over 11972.20 frames. ], batch size: 34, lr: 1.77e-02 +2024-08-06 04:31:15,673 INFO [trainer.py:765] (7/8) Epoch 6, batch 1200, train_loss[loss=3.2, ArTop10Accuracy=0.6732, over 12349.00 frames. ], tot_loss[loss=3.023, ArTop10Accuracy=0.7069, over 11929.01 frames. ], batch size: 97, lr: 1.76e-02 +2024-08-06 04:31:40,504 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 04:32:52,405 INFO [trainer.py:765] (7/8) Epoch 7, batch 100, train_loss[loss=3.005, ArTop10Accuracy=0.7082, over 14471.00 frames. ], tot_loss[loss=2.997, ArTop10Accuracy=0.7128, over 4777.01 frames. ], batch size: 61, lr: 1.64e-02 +2024-08-06 04:33:38,223 INFO [trainer.py:765] (7/8) Epoch 7, batch 200, train_loss[loss=2.982, ArTop10Accuracy=0.7117, over 13792.00 frames. ], tot_loss[loss=2.994, ArTop10Accuracy=0.7141, over 7788.20 frames. ], batch size: 34, lr: 1.64e-02 +2024-08-06 04:34:22,609 INFO [trainer.py:765] (7/8) Epoch 7, batch 300, train_loss[loss=3.132, ArTop10Accuracy=0.6902, over 14476.00 frames. ], tot_loss[loss=2.99, ArTop10Accuracy=0.7145, over 9408.60 frames. ], batch size: 44, lr: 1.63e-02 +2024-08-06 04:34:36,848 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 04:34:45,809 INFO [trainer.py:811] (7/8) Epoch 7, validation: loss=2.963, ArTop10Accuracy=0.7233, over 1829298.00 frames. +2024-08-06 04:34:45,810 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 32945MB +2024-08-06 04:34:46,125 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.009e+02 1.306e+02 1.435e+02 1.599e+02 8.689e+02, threshold=2.871e+02, percent-clipped=0.9 +2024-08-06 04:35:17,146 INFO [trainer.py:765] (7/8) Epoch 7, batch 400, train_loss[loss=2.922, ArTop10Accuracy=0.7247, over 10307.00 frames. ], tot_loss[loss=2.988, ArTop10Accuracy=0.7148, over 10321.44 frames. ], batch size: 14, lr: 1.62e-02 +2024-08-06 04:36:01,711 INFO [trainer.py:765] (7/8) Epoch 7, batch 500, train_loss[loss=3.02, ArTop10Accuracy=0.706, over 12451.00 frames. ], tot_loss[loss=2.987, ArTop10Accuracy=0.7146, over 10901.64 frames. ], batch size: 22, lr: 1.61e-02 +2024-08-06 04:36:48,811 INFO [trainer.py:765] (7/8) Epoch 7, batch 600, train_loss[loss=2.976, ArTop10Accuracy=0.7147, over 11378.00 frames. ], tot_loss[loss=2.992, ArTop10Accuracy=0.7136, over 11426.53 frames. ], batch size: 18, lr: 1.61e-02 +2024-08-06 04:37:34,800 INFO [trainer.py:765] (7/8) Epoch 7, batch 700, train_loss[loss=2.91, ArTop10Accuracy=0.7316, over 9960.00 frames. ], tot_loss[loss=2.994, ArTop10Accuracy=0.7128, over 11559.44 frames. ], batch size: 12, lr: 1.60e-02 +2024-08-06 04:38:13,614 INFO [trainer.py:765] (7/8) Epoch 7, batch 800, train_loss[loss=2.98, ArTop10Accuracy=0.7089, over 9939.00 frames. ], tot_loss[loss=2.998, ArTop10Accuracy=0.712, over 11680.97 frames. ], batch size: 12, lr: 1.59e-02 +2024-08-06 04:38:45,111 INFO [trainer.py:765] (7/8) Epoch 7, batch 900, train_loss[loss=2.98, ArTop10Accuracy=0.7173, over 13145.00 frames. ], tot_loss[loss=2.989, ArTop10Accuracy=0.7137, over 11735.27 frames. ], batch size: 27, lr: 1.59e-02 +2024-08-06 04:39:16,576 INFO [trainer.py:765] (7/8) Epoch 7, batch 1000, train_loss[loss=3.062, ArTop10Accuracy=0.697, over 12817.00 frames. ], tot_loss[loss=2.988, ArTop10Accuracy=0.7139, over 11930.57 frames. ], batch size: 27, lr: 1.58e-02 +2024-08-06 04:39:47,572 INFO [trainer.py:765] (7/8) Epoch 7, batch 1100, train_loss[loss=2.984, ArTop10Accuracy=0.7187, over 13764.00 frames. ], tot_loss[loss=2.998, ArTop10Accuracy=0.7119, over 11985.69 frames. ], batch size: 34, lr: 1.57e-02 +2024-08-06 04:40:17,990 INFO [trainer.py:765] (7/8) Epoch 7, batch 1200, train_loss[loss=3.153, ArTop10Accuracy=0.6813, over 12201.00 frames. ], tot_loss[loss=2.997, ArTop10Accuracy=0.7123, over 11940.08 frames. ], batch size: 97, lr: 1.57e-02 +2024-08-06 04:40:43,324 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 04:41:37,492 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 9.816e+01 1.295e+02 1.411e+02 1.574e+02 4.953e+02, threshold=2.821e+02, percent-clipped=1.1 +2024-08-06 04:41:58,371 INFO [trainer.py:765] (7/8) Epoch 8, batch 100, train_loss[loss=2.961, ArTop10Accuracy=0.7196, over 14413.00 frames. ], tot_loss[loss=2.974, ArTop10Accuracy=0.7179, over 4806.07 frames. ], batch size: 61, lr: 1.47e-02 +2024-08-06 04:42:44,986 INFO [trainer.py:765] (7/8) Epoch 8, batch 200, train_loss[loss=3.018, ArTop10Accuracy=0.7182, over 13428.00 frames. ], tot_loss[loss=2.967, ArTop10Accuracy=0.7192, over 7816.68 frames. ], batch size: 34, lr: 1.46e-02 +2024-08-06 04:43:28,045 INFO [trainer.py:765] (7/8) Epoch 8, batch 300, train_loss[loss=3.064, ArTop10Accuracy=0.6965, over 14189.00 frames. ], tot_loss[loss=2.957, ArTop10Accuracy=0.721, over 9440.02 frames. ], batch size: 44, lr: 1.46e-02 +2024-08-06 04:44:14,462 INFO [trainer.py:765] (7/8) Epoch 8, batch 400, train_loss[loss=2.895, ArTop10Accuracy=0.7349, over 10381.00 frames. ], tot_loss[loss=2.958, ArTop10Accuracy=0.7208, over 10338.67 frames. ], batch size: 14, lr: 1.45e-02 +2024-08-06 04:45:00,692 INFO [trainer.py:765] (7/8) Epoch 8, batch 500, train_loss[loss=2.869, ArTop10Accuracy=0.7261, over 12318.00 frames. ], tot_loss[loss=2.956, ArTop10Accuracy=0.7209, over 10903.17 frames. ], batch size: 22, lr: 1.45e-02 +2024-08-06 04:45:45,393 INFO [trainer.py:765] (7/8) Epoch 8, batch 600, train_loss[loss=2.875, ArTop10Accuracy=0.7403, over 11602.00 frames. ], tot_loss[loss=2.962, ArTop10Accuracy=0.7196, over 11414.72 frames. ], batch size: 18, lr: 1.44e-02 +2024-08-06 04:46:34,038 INFO [trainer.py:765] (7/8) Epoch 8, batch 700, train_loss[loss=2.968, ArTop10Accuracy=0.7134, over 10095.00 frames. ], tot_loss[loss=2.972, ArTop10Accuracy=0.7179, over 11574.87 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 04:47:10,208 INFO [trainer.py:765] (7/8) Epoch 8, batch 800, train_loss[loss=2.839, ArTop10Accuracy=0.7445, over 10143.00 frames. ], tot_loss[loss=2.974, ArTop10Accuracy=0.7172, over 11683.39 frames. ], batch size: 12, lr: 1.43e-02 +2024-08-06 04:47:41,605 INFO [trainer.py:765] (7/8) Epoch 8, batch 900, train_loss[loss=2.96, ArTop10Accuracy=0.718, over 12929.00 frames. ], tot_loss[loss=2.96, ArTop10Accuracy=0.7195, over 11725.69 frames. ], batch size: 27, lr: 1.42e-02 +2024-08-06 04:48:13,032 INFO [trainer.py:765] (7/8) Epoch 8, batch 1000, train_loss[loss=2.924, ArTop10Accuracy=0.7384, over 13394.00 frames. ], tot_loss[loss=2.971, ArTop10Accuracy=0.7176, over 11930.37 frames. ], batch size: 28, lr: 1.42e-02 +2024-08-06 04:48:28,827 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 04:48:37,663 INFO [trainer.py:811] (7/8) Epoch 8, validation: loss=2.946, ArTop10Accuracy=0.7266, over 1829298.00 frames. +2024-08-06 04:48:37,664 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 32945MB +2024-08-06 04:48:37,951 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.035e+02 1.289e+02 1.393e+02 1.532e+02 3.557e+02, threshold=2.786e+02, percent-clipped=0.2 +2024-08-06 04:48:52,932 INFO [trainer.py:765] (7/8) Epoch 8, batch 1100, train_loss[loss=2.9, ArTop10Accuracy=0.7299, over 14090.00 frames. ], tot_loss[loss=2.98, ArTop10Accuracy=0.716, over 11976.62 frames. ], batch size: 34, lr: 1.41e-02 +2024-08-06 04:49:23,202 INFO [trainer.py:765] (7/8) Epoch 8, batch 1200, train_loss[loss=3.102, ArTop10Accuracy=0.6899, over 12060.00 frames. ], tot_loss[loss=2.977, ArTop10Accuracy=0.7162, over 11934.38 frames. ], batch size: 98, lr: 1.40e-02 +2024-08-06 04:49:48,360 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 04:51:01,547 INFO [trainer.py:765] (7/8) Epoch 9, batch 100, train_loss[loss=3.039, ArTop10Accuracy=0.7035, over 14529.00 frames. ], tot_loss[loss=2.939, ArTop10Accuracy=0.7243, over 4770.76 frames. ], batch size: 61, lr: 1.32e-02 +2024-08-06 04:51:45,414 INFO [trainer.py:765] (7/8) Epoch 9, batch 200, train_loss[loss=2.903, ArTop10Accuracy=0.7298, over 13628.00 frames. ], tot_loss[loss=2.935, ArTop10Accuracy=0.7257, over 7790.94 frames. ], batch size: 34, lr: 1.32e-02 +2024-08-06 04:52:29,082 INFO [trainer.py:765] (7/8) Epoch 9, batch 300, train_loss[loss=2.973, ArTop10Accuracy=0.7197, over 14170.00 frames. ], tot_loss[loss=2.934, ArTop10Accuracy=0.7255, over 9409.58 frames. ], batch size: 44, lr: 1.31e-02 +2024-08-06 04:53:16,431 INFO [trainer.py:765] (7/8) Epoch 9, batch 400, train_loss[loss=2.927, ArTop10Accuracy=0.7286, over 10517.00 frames. ], tot_loss[loss=2.933, ArTop10Accuracy=0.7258, over 10308.35 frames. ], batch size: 14, lr: 1.31e-02 +2024-08-06 04:53:58,143 INFO [trainer.py:765] (7/8) Epoch 9, batch 500, train_loss[loss=2.974, ArTop10Accuracy=0.7274, over 12342.00 frames. ], tot_loss[loss=2.94, ArTop10Accuracy=0.7238, over 10883.91 frames. ], batch size: 22, lr: 1.30e-02 +2024-08-06 04:54:51,077 INFO [trainer.py:765] (7/8) Epoch 9, batch 600, train_loss[loss=2.987, ArTop10Accuracy=0.7169, over 11449.00 frames. ], tot_loss[loss=2.947, ArTop10Accuracy=0.7225, over 11422.54 frames. ], batch size: 18, lr: 1.30e-02 +2024-08-06 04:55:34,399 INFO [trainer.py:765] (7/8) Epoch 9, batch 700, train_loss[loss=2.88, ArTop10Accuracy=0.7346, over 10007.00 frames. ], tot_loss[loss=2.949, ArTop10Accuracy=0.7221, over 11559.69 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 04:56:04,575 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.029e+02 1.257e+02 1.367e+02 1.507e+02 8.820e+02, threshold=2.735e+02, percent-clipped=0.5 +2024-08-06 04:56:13,597 INFO [trainer.py:765] (7/8) Epoch 9, batch 800, train_loss[loss=2.917, ArTop10Accuracy=0.7269, over 10347.00 frames. ], tot_loss[loss=2.954, ArTop10Accuracy=0.7211, over 11670.55 frames. ], batch size: 12, lr: 1.29e-02 +2024-08-06 04:56:44,975 INFO [trainer.py:765] (7/8) Epoch 9, batch 900, train_loss[loss=2.797, ArTop10Accuracy=0.7471, over 13184.00 frames. ], tot_loss[loss=2.952, ArTop10Accuracy=0.7214, over 11717.30 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:16,491 INFO [trainer.py:765] (7/8) Epoch 9, batch 1000, train_loss[loss=2.87, ArTop10Accuracy=0.7419, over 12925.00 frames. ], tot_loss[loss=2.959, ArTop10Accuracy=0.7203, over 11922.27 frames. ], batch size: 27, lr: 1.28e-02 +2024-08-06 04:57:47,657 INFO [trainer.py:765] (7/8) Epoch 9, batch 1100, train_loss[loss=2.933, ArTop10Accuracy=0.7229, over 13975.00 frames. ], tot_loss[loss=2.963, ArTop10Accuracy=0.7195, over 11978.59 frames. ], batch size: 35, lr: 1.27e-02 +2024-08-06 04:58:18,094 INFO [trainer.py:765] (7/8) Epoch 9, batch 1200, train_loss[loss=3.094, ArTop10Accuracy=0.6954, over 11733.00 frames. ], tot_loss[loss=2.96, ArTop10Accuracy=0.7199, over 11924.05 frames. ], batch size: 99, lr: 1.27e-02 +2024-08-06 04:58:43,766 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 04:59:52,749 INFO [trainer.py:765] (7/8) Epoch 10, batch 100, train_loss[loss=2.94, ArTop10Accuracy=0.7291, over 14618.00 frames. ], tot_loss[loss=2.932, ArTop10Accuracy=0.7274, over 4805.03 frames. ], batch size: 61, lr: 1.20e-02 +2024-08-06 05:00:43,730 INFO [trainer.py:765] (7/8) Epoch 10, batch 200, train_loss[loss=3.014, ArTop10Accuracy=0.7121, over 13804.00 frames. ], tot_loss[loss=2.928, ArTop10Accuracy=0.7279, over 7796.69 frames. ], batch size: 34, lr: 1.20e-02 +2024-08-06 05:01:20,592 INFO [trainer.py:765] (7/8) Epoch 10, batch 300, train_loss[loss=3.029, ArTop10Accuracy=0.7081, over 13831.00 frames. ], tot_loss[loss=2.927, ArTop10Accuracy=0.7273, over 9427.24 frames. ], batch size: 44, lr: 1.19e-02 +2024-08-06 05:02:10,048 INFO [trainer.py:765] (7/8) Epoch 10, batch 400, train_loss[loss=2.787, ArTop10Accuracy=0.7534, over 10233.00 frames. ], tot_loss[loss=2.925, ArTop10Accuracy=0.7278, over 10324.53 frames. ], batch size: 14, lr: 1.19e-02 +2024-08-06 05:02:46,488 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 05:02:55,377 INFO [trainer.py:811] (7/8) Epoch 10, validation: loss=2.927, ArTop10Accuracy=0.7304, over 1829298.00 frames. +2024-08-06 05:02:55,378 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 32945MB +2024-08-06 05:02:55,728 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.023e+02 1.269e+02 1.367e+02 1.518e+02 4.405e+02, threshold=2.733e+02, percent-clipped=0.4 +2024-08-06 05:02:58,361 INFO [trainer.py:765] (7/8) Epoch 10, batch 500, train_loss[loss=2.93, ArTop10Accuracy=0.7277, over 12306.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7279, over 10904.75 frames. ], batch size: 22, lr: 1.19e-02 +2024-08-06 05:03:48,229 INFO [trainer.py:765] (7/8) Epoch 10, batch 600, train_loss[loss=2.851, ArTop10Accuracy=0.7395, over 11544.00 frames. ], tot_loss[loss=2.92, ArTop10Accuracy=0.7277, over 11443.00 frames. ], batch size: 18, lr: 1.18e-02 +2024-08-06 05:04:36,716 INFO [trainer.py:765] (7/8) Epoch 10, batch 700, train_loss[loss=2.832, ArTop10Accuracy=0.739, over 9305.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7271, over 11578.54 frames. ], batch size: 11, lr: 1.18e-02 +2024-08-06 05:05:10,725 INFO [trainer.py:765] (7/8) Epoch 10, batch 800, train_loss[loss=2.897, ArTop10Accuracy=0.7411, over 9992.00 frames. ], tot_loss[loss=2.931, ArTop10Accuracy=0.7256, over 11678.93 frames. ], batch size: 12, lr: 1.17e-02 +2024-08-06 05:05:42,245 INFO [trainer.py:765] (7/8) Epoch 10, batch 900, train_loss[loss=2.902, ArTop10Accuracy=0.7292, over 13011.00 frames. ], tot_loss[loss=2.924, ArTop10Accuracy=0.7268, over 11719.54 frames. ], batch size: 27, lr: 1.17e-02 +2024-08-06 05:06:13,843 INFO [trainer.py:765] (7/8) Epoch 10, batch 1000, train_loss[loss=2.879, ArTop10Accuracy=0.7321, over 12733.00 frames. ], tot_loss[loss=2.928, ArTop10Accuracy=0.726, over 11953.11 frames. ], batch size: 27, lr: 1.16e-02 +2024-08-06 05:06:45,055 INFO [trainer.py:765] (7/8) Epoch 10, batch 1100, train_loss[loss=2.942, ArTop10Accuracy=0.719, over 13572.00 frames. ], tot_loss[loss=2.937, ArTop10Accuracy=0.7246, over 12003.87 frames. ], batch size: 34, lr: 1.16e-02 +2024-08-06 05:07:15,483 INFO [trainer.py:765] (7/8) Epoch 10, batch 1200, train_loss[loss=3.163, ArTop10Accuracy=0.6777, over 12281.00 frames. ], tot_loss[loss=2.938, ArTop10Accuracy=0.7245, over 11927.34 frames. ], batch size: 97, lr: 1.16e-02 +2024-08-06 05:07:40,800 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 05:08:52,967 INFO [trainer.py:765] (7/8) Epoch 11, batch 100, train_loss[loss=2.933, ArTop10Accuracy=0.723, over 14427.00 frames. ], tot_loss[loss=2.911, ArTop10Accuracy=0.7303, over 4794.99 frames. ], batch size: 61, lr: 1.10e-02 +2024-08-06 05:09:41,278 INFO [trainer.py:765] (7/8) Epoch 11, batch 200, train_loss[loss=3.016, ArTop10Accuracy=0.71, over 13705.00 frames. ], tot_loss[loss=2.906, ArTop10Accuracy=0.7316, over 7799.16 frames. ], batch size: 34, lr: 1.10e-02 +2024-08-06 05:09:51,176 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.001e+02 1.278e+02 1.371e+02 1.502e+02 3.785e+02, threshold=2.743e+02, percent-clipped=0.3 +2024-08-06 05:10:24,721 INFO [trainer.py:765] (7/8) Epoch 11, batch 300, train_loss[loss=3.012, ArTop10Accuracy=0.7111, over 14181.00 frames. ], tot_loss[loss=2.908, ArTop10Accuracy=0.7309, over 9412.18 frames. ], batch size: 44, lr: 1.09e-02 +2024-08-06 05:11:11,784 INFO [trainer.py:765] (7/8) Epoch 11, batch 400, train_loss[loss=2.876, ArTop10Accuracy=0.7361, over 11002.00 frames. ], tot_loss[loss=2.909, ArTop10Accuracy=0.7308, over 10335.77 frames. ], batch size: 15, lr: 1.09e-02 +2024-08-06 05:11:52,692 INFO [trainer.py:765] (7/8) Epoch 11, batch 500, train_loss[loss=2.877, ArTop10Accuracy=0.7305, over 12402.00 frames. ], tot_loss[loss=2.902, ArTop10Accuracy=0.732, over 10917.64 frames. ], batch size: 22, lr: 1.09e-02 +2024-08-06 05:12:40,288 INFO [trainer.py:765] (7/8) Epoch 11, batch 600, train_loss[loss=2.898, ArTop10Accuracy=0.7318, over 11560.00 frames. ], tot_loss[loss=2.903, ArTop10Accuracy=0.7313, over 11450.34 frames. ], batch size: 18, lr: 1.08e-02 +2024-08-06 05:13:25,709 INFO [trainer.py:765] (7/8) Epoch 11, batch 700, train_loss[loss=2.809, ArTop10Accuracy=0.7486, over 10109.00 frames. ], tot_loss[loss=2.912, ArTop10Accuracy=0.7297, over 11582.10 frames. ], batch size: 12, lr: 1.08e-02 +2024-08-06 05:14:04,206 INFO [trainer.py:765] (7/8) Epoch 11, batch 800, train_loss[loss=2.855, ArTop10Accuracy=0.737, over 9140.00 frames. ], tot_loss[loss=2.912, ArTop10Accuracy=0.7295, over 11680.97 frames. ], batch size: 11, lr: 1.07e-02 +2024-08-06 05:14:35,667 INFO [trainer.py:765] (7/8) Epoch 11, batch 900, train_loss[loss=2.956, ArTop10Accuracy=0.7271, over 12898.00 frames. ], tot_loss[loss=2.904, ArTop10Accuracy=0.7314, over 11739.77 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 05:15:07,264 INFO [trainer.py:765] (7/8) Epoch 11, batch 1000, train_loss[loss=2.996, ArTop10Accuracy=0.7159, over 12925.00 frames. ], tot_loss[loss=2.912, ArTop10Accuracy=0.7297, over 11935.65 frames. ], batch size: 27, lr: 1.07e-02 +2024-08-06 05:15:38,260 INFO [trainer.py:765] (7/8) Epoch 11, batch 1100, train_loss[loss=2.956, ArTop10Accuracy=0.7185, over 13898.00 frames. ], tot_loss[loss=2.921, ArTop10Accuracy=0.7277, over 12000.99 frames. ], batch size: 35, lr: 1.06e-02 +2024-08-06 05:16:08,498 INFO [trainer.py:765] (7/8) Epoch 11, batch 1200, train_loss[loss=3.11, ArTop10Accuracy=0.6932, over 12329.00 frames. ], tot_loss[loss=2.923, ArTop10Accuracy=0.7273, over 11935.02 frames. ], batch size: 97, lr: 1.06e-02 +2024-08-06 05:16:12,697 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 05:16:21,622 INFO [trainer.py:811] (7/8) Epoch 11, validation: loss=2.923, ArTop10Accuracy=0.7318, over 1829298.00 frames. +2024-08-06 05:16:21,623 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 32945MB +2024-08-06 05:16:21,949 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.076e+02 1.268e+02 1.368e+02 1.481e+02 4.790e+02, threshold=2.736e+02, percent-clipped=0.6 +2024-08-06 05:16:42,778 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 05:18:03,005 INFO [trainer.py:765] (7/8) Epoch 12, batch 100, train_loss[loss=2.937, ArTop10Accuracy=0.724, over 14685.00 frames. ], tot_loss[loss=2.884, ArTop10Accuracy=0.7355, over 4788.97 frames. ], batch size: 61, lr: 1.01e-02 +2024-08-06 05:18:46,004 INFO [trainer.py:765] (7/8) Epoch 12, batch 200, train_loss[loss=2.843, ArTop10Accuracy=0.7439, over 13985.00 frames. ], tot_loss[loss=2.884, ArTop10Accuracy=0.7357, over 7789.85 frames. ], batch size: 34, lr: 1.01e-02 +2024-08-06 05:19:31,946 INFO [trainer.py:765] (7/8) Epoch 12, batch 300, train_loss[loss=2.984, ArTop10Accuracy=0.7123, over 14488.00 frames. ], tot_loss[loss=2.883, ArTop10Accuracy=0.7359, over 9409.31 frames. ], batch size: 45, lr: 1.01e-02 +2024-08-06 05:20:12,430 INFO [trainer.py:765] (7/8) Epoch 12, batch 400, train_loss[loss=2.722, ArTop10Accuracy=0.7559, over 10436.00 frames. ], tot_loss[loss=2.882, ArTop10Accuracy=0.7353, over 10324.29 frames. ], batch size: 14, lr: 1.00e-02 +2024-08-06 05:21:00,640 INFO [trainer.py:765] (7/8) Epoch 12, batch 500, train_loss[loss=2.804, ArTop10Accuracy=0.7499, over 12250.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7365, over 10895.36 frames. ], batch size: 22, lr: 9.99e-03 +2024-08-06 05:21:43,915 INFO [trainer.py:765] (7/8) Epoch 12, batch 600, train_loss[loss=2.829, ArTop10Accuracy=0.7489, over 11619.00 frames. ], tot_loss[loss=2.881, ArTop10Accuracy=0.7353, over 11433.62 frames. ], batch size: 18, lr: 9.96e-03 +2024-08-06 05:22:32,207 INFO [trainer.py:765] (7/8) Epoch 12, batch 700, train_loss[loss=2.936, ArTop10Accuracy=0.7328, over 10181.00 frames. ], tot_loss[loss=2.887, ArTop10Accuracy=0.7342, over 11563.75 frames. ], batch size: 12, lr: 9.93e-03 +2024-08-06 05:23:08,911 INFO [trainer.py:765] (7/8) Epoch 12, batch 800, train_loss[loss=2.758, ArTop10Accuracy=0.7528, over 9162.00 frames. ], tot_loss[loss=2.898, ArTop10Accuracy=0.7319, over 11669.36 frames. ], batch size: 11, lr: 9.90e-03 +2024-08-06 05:23:40,460 INFO [trainer.py:765] (7/8) Epoch 12, batch 900, train_loss[loss=2.834, ArTop10Accuracy=0.7366, over 12971.00 frames. ], tot_loss[loss=2.892, ArTop10Accuracy=0.7334, over 11729.80 frames. ], batch size: 27, lr: 9.87e-03 +2024-08-06 05:23:54,576 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.067e+02 1.273e+02 1.376e+02 1.503e+02 4.050e+02, threshold=2.752e+02, percent-clipped=0.4 +2024-08-06 05:24:14,346 INFO [trainer.py:765] (7/8) Epoch 12, batch 1000, train_loss[loss=2.917, ArTop10Accuracy=0.7296, over 13072.00 frames. ], tot_loss[loss=2.9, ArTop10Accuracy=0.732, over 11918.96 frames. ], batch size: 27, lr: 9.84e-03 +2024-08-06 05:24:45,504 INFO [trainer.py:765] (7/8) Epoch 12, batch 1100, train_loss[loss=2.956, ArTop10Accuracy=0.7238, over 13653.00 frames. ], tot_loss[loss=2.912, ArTop10Accuracy=0.7296, over 11992.21 frames. ], batch size: 34, lr: 9.81e-03 +2024-08-06 05:25:15,882 INFO [trainer.py:765] (7/8) Epoch 12, batch 1200, train_loss[loss=3.062, ArTop10Accuracy=0.6972, over 11811.00 frames. ], tot_loss[loss=2.913, ArTop10Accuracy=0.7297, over 11939.20 frames. ], batch size: 97, lr: 9.78e-03 +2024-08-06 05:25:41,190 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 05:26:46,787 INFO [trainer.py:765] (7/8) Epoch 13, batch 100, train_loss[loss=2.928, ArTop10Accuracy=0.7275, over 14297.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7376, over 4779.82 frames. ], batch size: 61, lr: 9.36e-03 +2024-08-06 05:27:32,553 INFO [trainer.py:765] (7/8) Epoch 13, batch 200, train_loss[loss=2.95, ArTop10Accuracy=0.7271, over 13915.00 frames. ], tot_loss[loss=2.869, ArTop10Accuracy=0.7389, over 7787.10 frames. ], batch size: 34, lr: 9.34e-03 +2024-08-06 05:28:16,036 INFO [trainer.py:765] (7/8) Epoch 13, batch 300, train_loss[loss=3.065, ArTop10Accuracy=0.7023, over 14336.00 frames. ], tot_loss[loss=2.863, ArTop10Accuracy=0.7397, over 9410.31 frames. ], batch size: 44, lr: 9.31e-03 +2024-08-06 05:29:00,149 INFO [trainer.py:765] (7/8) Epoch 13, batch 400, train_loss[loss=2.885, ArTop10Accuracy=0.732, over 10352.00 frames. ], tot_loss[loss=2.862, ArTop10Accuracy=0.7392, over 10334.89 frames. ], batch size: 14, lr: 9.28e-03 +2024-08-06 05:29:43,967 INFO [trainer.py:765] (7/8) Epoch 13, batch 500, train_loss[loss=2.745, ArTop10Accuracy=0.7553, over 12225.00 frames. ], tot_loss[loss=2.865, ArTop10Accuracy=0.7388, over 10899.49 frames. ], batch size: 22, lr: 9.26e-03 +2024-08-06 05:30:24,247 INFO [trainer.py:765] (7/8) Epoch 13, batch 600, train_loss[loss=2.854, ArTop10Accuracy=0.7456, over 11463.00 frames. ], tot_loss[loss=2.874, ArTop10Accuracy=0.7369, over 11438.36 frames. ], batch size: 18, lr: 9.23e-03 +2024-08-06 05:30:58,110 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 05:31:07,054 INFO [trainer.py:811] (7/8) Epoch 13, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 05:31:07,054 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33330MB +2024-08-06 05:31:07,351 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.049e+02 1.283e+02 1.389e+02 1.496e+02 2.729e+02, threshold=2.779e+02, percent-clipped=0.0 +2024-08-06 05:31:24,043 INFO [trainer.py:765] (7/8) Epoch 13, batch 700, train_loss[loss=2.72, ArTop10Accuracy=0.7559, over 10121.00 frames. ], tot_loss[loss=2.88, ArTop10Accuracy=0.7358, over 11564.55 frames. ], batch size: 12, lr: 9.20e-03 +2024-08-06 05:32:00,147 INFO [trainer.py:765] (7/8) Epoch 13, batch 800, train_loss[loss=2.683, ArTop10Accuracy=0.7698, over 10204.00 frames. ], tot_loss[loss=2.881, ArTop10Accuracy=0.7357, over 11662.77 frames. ], batch size: 12, lr: 9.18e-03 +2024-08-06 05:32:31,521 INFO [trainer.py:765] (7/8) Epoch 13, batch 900, train_loss[loss=2.813, ArTop10Accuracy=0.7501, over 13011.00 frames. ], tot_loss[loss=2.877, ArTop10Accuracy=0.7366, over 11735.97 frames. ], batch size: 27, lr: 9.15e-03 +2024-08-06 05:33:03,043 INFO [trainer.py:765] (7/8) Epoch 13, batch 1000, train_loss[loss=2.809, ArTop10Accuracy=0.7527, over 12993.00 frames. ], tot_loss[loss=2.886, ArTop10Accuracy=0.7348, over 11942.54 frames. ], batch size: 27, lr: 9.13e-03 +2024-08-06 05:33:34,232 INFO [trainer.py:765] (7/8) Epoch 13, batch 1100, train_loss[loss=3.034, ArTop10Accuracy=0.7081, over 13594.00 frames. ], tot_loss[loss=2.893, ArTop10Accuracy=0.7332, over 12002.91 frames. ], batch size: 34, lr: 9.10e-03 +2024-08-06 05:34:04,519 INFO [trainer.py:765] (7/8) Epoch 13, batch 1200, train_loss[loss=2.917, ArTop10Accuracy=0.7239, over 12703.00 frames. ], tot_loss[loss=2.891, ArTop10Accuracy=0.7337, over 11938.74 frames. ], batch size: 98, lr: 9.07e-03 +2024-08-06 05:34:29,769 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 05:35:39,198 INFO [trainer.py:765] (7/8) Epoch 14, batch 100, train_loss[loss=3.021, ArTop10Accuracy=0.7104, over 14762.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.7414, over 4786.81 frames. ], batch size: 61, lr: 8.71e-03 +2024-08-06 05:36:23,063 INFO [trainer.py:765] (7/8) Epoch 14, batch 200, train_loss[loss=2.909, ArTop10Accuracy=0.7284, over 13763.00 frames. ], tot_loss[loss=2.86, ArTop10Accuracy=0.7409, over 7808.29 frames. ], batch size: 34, lr: 8.68e-03 +2024-08-06 05:37:09,309 INFO [trainer.py:765] (7/8) Epoch 14, batch 300, train_loss[loss=2.872, ArTop10Accuracy=0.7372, over 14461.00 frames. ], tot_loss[loss=2.861, ArTop10Accuracy=0.7405, over 9434.89 frames. ], batch size: 44, lr: 8.66e-03 +2024-08-06 05:37:46,030 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.097e+02 1.304e+02 1.410e+02 1.531e+02 2.912e+02, threshold=2.820e+02, percent-clipped=0.2 +2024-08-06 05:37:55,139 INFO [trainer.py:765] (7/8) Epoch 14, batch 400, train_loss[loss=2.817, ArTop10Accuracy=0.744, over 10288.00 frames. ], tot_loss[loss=2.86, ArTop10Accuracy=0.7402, over 10351.66 frames. ], batch size: 14, lr: 8.64e-03 +2024-08-06 05:38:42,025 INFO [trainer.py:765] (7/8) Epoch 14, batch 500, train_loss[loss=2.833, ArTop10Accuracy=0.7431, over 12302.00 frames. ], tot_loss[loss=2.857, ArTop10Accuracy=0.7405, over 10918.81 frames. ], batch size: 22, lr: 8.61e-03 +2024-08-06 05:39:22,374 INFO [trainer.py:765] (7/8) Epoch 14, batch 600, train_loss[loss=2.904, ArTop10Accuracy=0.7352, over 11537.00 frames. ], tot_loss[loss=2.86, ArTop10Accuracy=0.7397, over 11438.29 frames. ], batch size: 18, lr: 8.59e-03 +2024-08-06 05:40:15,143 INFO [trainer.py:765] (7/8) Epoch 14, batch 700, train_loss[loss=2.907, ArTop10Accuracy=0.7416, over 10214.00 frames. ], tot_loss[loss=2.87, ArTop10Accuracy=0.738, over 11584.81 frames. ], batch size: 12, lr: 8.57e-03 +2024-08-06 05:40:49,135 INFO [trainer.py:765] (7/8) Epoch 14, batch 800, train_loss[loss=2.774, ArTop10Accuracy=0.7546, over 10079.00 frames. ], tot_loss[loss=2.872, ArTop10Accuracy=0.738, over 11701.95 frames. ], batch size: 12, lr: 8.55e-03 +2024-08-06 05:41:20,466 INFO [trainer.py:765] (7/8) Epoch 14, batch 900, train_loss[loss=2.781, ArTop10Accuracy=0.7551, over 12897.00 frames. ], tot_loss[loss=2.868, ArTop10Accuracy=0.7382, over 11750.67 frames. ], batch size: 27, lr: 8.52e-03 +2024-08-06 05:41:51,996 INFO [trainer.py:765] (7/8) Epoch 14, batch 1000, train_loss[loss=2.865, ArTop10Accuracy=0.7385, over 13192.00 frames. ], tot_loss[loss=2.873, ArTop10Accuracy=0.7378, over 11959.68 frames. ], batch size: 27, lr: 8.50e-03 +2024-08-06 05:42:23,216 INFO [trainer.py:765] (7/8) Epoch 14, batch 1100, train_loss[loss=2.838, ArTop10Accuracy=0.747, over 13818.00 frames. ], tot_loss[loss=2.88, ArTop10Accuracy=0.7362, over 12008.10 frames. ], batch size: 34, lr: 8.48e-03 +2024-08-06 05:42:53,549 INFO [trainer.py:765] (7/8) Epoch 14, batch 1200, train_loss[loss=3.06, ArTop10Accuracy=0.7085, over 11500.00 frames. ], tot_loss[loss=2.881, ArTop10Accuracy=0.7361, over 11952.39 frames. ], batch size: 99, lr: 8.46e-03 +2024-08-06 05:43:19,099 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 05:44:28,571 INFO [trainer.py:765] (7/8) Epoch 15, batch 100, train_loss[loss=2.855, ArTop10Accuracy=0.7372, over 14711.00 frames. ], tot_loss[loss=2.84, ArTop10Accuracy=0.7443, over 4779.25 frames. ], batch size: 62, lr: 8.14e-03 +2024-08-06 05:44:29,213 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 05:44:38,024 INFO [trainer.py:811] (7/8) Epoch 15, validation: loss=2.913, ArTop10Accuracy=0.7339, over 1829298.00 frames. +2024-08-06 05:44:38,024 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33330MB +2024-08-06 05:44:38,413 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.100e+02 1.307e+02 1.417e+02 1.528e+02 2.981e+02, threshold=2.833e+02, percent-clipped=0.1 +2024-08-06 05:45:20,184 INFO [trainer.py:765] (7/8) Epoch 15, batch 200, train_loss[loss=2.772, ArTop10Accuracy=0.755, over 13671.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7434, over 7798.34 frames. ], batch size: 34, lr: 8.11e-03 +2024-08-06 05:46:04,647 INFO [trainer.py:765] (7/8) Epoch 15, batch 300, train_loss[loss=2.874, ArTop10Accuracy=0.7398, over 14197.00 frames. ], tot_loss[loss=2.847, ArTop10Accuracy=0.7434, over 9419.25 frames. ], batch size: 44, lr: 8.09e-03 +2024-08-06 05:46:51,902 INFO [trainer.py:765] (7/8) Epoch 15, batch 400, train_loss[loss=2.894, ArTop10Accuracy=0.7382, over 11023.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7431, over 10331.10 frames. ], batch size: 15, lr: 8.07e-03 +2024-08-06 05:47:36,911 INFO [trainer.py:765] (7/8) Epoch 15, batch 500, train_loss[loss=2.803, ArTop10Accuracy=0.7451, over 11984.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7431, over 10898.05 frames. ], batch size: 22, lr: 8.05e-03 +2024-08-06 05:48:24,723 INFO [trainer.py:765] (7/8) Epoch 15, batch 600, train_loss[loss=2.866, ArTop10Accuracy=0.7361, over 11603.00 frames. ], tot_loss[loss=2.848, ArTop10Accuracy=0.7421, over 11431.42 frames. ], batch size: 18, lr: 8.03e-03 +2024-08-06 05:49:11,855 INFO [trainer.py:765] (7/8) Epoch 15, batch 700, train_loss[loss=2.791, ArTop10Accuracy=0.7493, over 10176.00 frames. ], tot_loss[loss=2.853, ArTop10Accuracy=0.7411, over 11571.28 frames. ], batch size: 12, lr: 8.01e-03 +2024-08-06 05:49:45,778 INFO [trainer.py:765] (7/8) Epoch 15, batch 800, train_loss[loss=2.948, ArTop10Accuracy=0.7084, over 9865.00 frames. ], tot_loss[loss=2.859, ArTop10Accuracy=0.7398, over 11676.56 frames. ], batch size: 12, lr: 7.99e-03 +2024-08-06 05:50:17,210 INFO [trainer.py:765] (7/8) Epoch 15, batch 900, train_loss[loss=2.927, ArTop10Accuracy=0.7297, over 13065.00 frames. ], tot_loss[loss=2.856, ArTop10Accuracy=0.7408, over 11719.16 frames. ], batch size: 27, lr: 7.97e-03 +2024-08-06 05:50:48,829 INFO [trainer.py:765] (7/8) Epoch 15, batch 1000, train_loss[loss=2.937, ArTop10Accuracy=0.7292, over 12817.00 frames. ], tot_loss[loss=2.864, ArTop10Accuracy=0.7392, over 11931.16 frames. ], batch size: 27, lr: 7.95e-03 +2024-08-06 05:51:20,069 INFO [trainer.py:765] (7/8) Epoch 15, batch 1100, train_loss[loss=2.935, ArTop10Accuracy=0.7279, over 13793.00 frames. ], tot_loss[loss=2.875, ArTop10Accuracy=0.7372, over 11987.27 frames. ], batch size: 34, lr: 7.93e-03 +2024-08-06 05:51:23,515 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 1.337e+02 1.431e+02 1.541e+02 2.784e+02, threshold=2.862e+02, percent-clipped=0.0 +2024-08-06 05:51:53,082 INFO [trainer.py:765] (7/8) Epoch 15, batch 1200, train_loss[loss=3.025, ArTop10Accuracy=0.7066, over 12796.00 frames. ], tot_loss[loss=2.878, ArTop10Accuracy=0.7364, over 11924.70 frames. ], batch size: 98, lr: 7.91e-03 +2024-08-06 05:52:18,170 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 05:53:29,263 INFO [trainer.py:765] (7/8) Epoch 16, batch 100, train_loss[loss=2.91, ArTop10Accuracy=0.7335, over 14452.00 frames. ], tot_loss[loss=2.841, ArTop10Accuracy=0.7447, over 4792.27 frames. ], batch size: 61, lr: 7.63e-03 +2024-08-06 05:54:12,877 INFO [trainer.py:765] (7/8) Epoch 16, batch 200, train_loss[loss=2.824, ArTop10Accuracy=0.7481, over 13704.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.745, over 7808.32 frames. ], batch size: 34, lr: 7.61e-03 +2024-08-06 05:54:59,737 INFO [trainer.py:765] (7/8) Epoch 16, batch 300, train_loss[loss=2.967, ArTop10Accuracy=0.7217, over 14224.00 frames. ], tot_loss[loss=2.838, ArTop10Accuracy=0.7446, over 9417.88 frames. ], batch size: 44, lr: 7.59e-03 +2024-08-06 05:55:41,931 INFO [trainer.py:765] (7/8) Epoch 16, batch 400, train_loss[loss=2.81, ArTop10Accuracy=0.7495, over 10345.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7451, over 10344.76 frames. ], batch size: 14, lr: 7.58e-03 +2024-08-06 05:56:27,680 INFO [trainer.py:765] (7/8) Epoch 16, batch 500, train_loss[loss=2.813, ArTop10Accuracy=0.7427, over 12488.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7445, over 10892.21 frames. ], batch size: 22, lr: 7.56e-03 +2024-08-06 05:57:12,439 INFO [trainer.py:765] (7/8) Epoch 16, batch 600, train_loss[loss=2.741, ArTop10Accuracy=0.7625, over 11649.00 frames. ], tot_loss[loss=2.836, ArTop10Accuracy=0.7445, over 11409.96 frames. ], batch size: 18, lr: 7.54e-03 +2024-08-06 05:58:00,040 INFO [trainer.py:765] (7/8) Epoch 16, batch 700, train_loss[loss=2.781, ArTop10Accuracy=0.7502, over 9260.00 frames. ], tot_loss[loss=2.843, ArTop10Accuracy=0.7433, over 11558.03 frames. ], batch size: 11, lr: 7.52e-03 +2024-08-06 05:58:34,024 INFO [trainer.py:765] (7/8) Epoch 16, batch 800, train_loss[loss=2.638, ArTop10Accuracy=0.7731, over 10124.00 frames. ], tot_loss[loss=2.849, ArTop10Accuracy=0.7422, over 11686.87 frames. ], batch size: 12, lr: 7.50e-03 +2024-08-06 05:58:41,569 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 05:58:50,426 INFO [trainer.py:811] (7/8) Epoch 16, validation: loss=2.915, ArTop10Accuracy=0.7338, over 1829298.00 frames. +2024-08-06 05:58:50,427 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33330MB +2024-08-06 05:58:50,730 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.121e+02 1.335e+02 1.445e+02 1.570e+02 3.252e+02, threshold=2.890e+02, percent-clipped=0.1 +2024-08-06 05:59:14,321 INFO [trainer.py:765] (7/8) Epoch 16, batch 900, train_loss[loss=2.922, ArTop10Accuracy=0.7276, over 13025.00 frames. ], tot_loss[loss=2.84, ArTop10Accuracy=0.7438, over 11734.24 frames. ], batch size: 27, lr: 7.49e-03 +2024-08-06 05:59:45,915 INFO [trainer.py:765] (7/8) Epoch 16, batch 1000, train_loss[loss=2.942, ArTop10Accuracy=0.728, over 12972.00 frames. ], tot_loss[loss=2.846, ArTop10Accuracy=0.7424, over 11936.83 frames. ], batch size: 27, lr: 7.47e-03 +2024-08-06 06:00:17,092 INFO [trainer.py:765] (7/8) Epoch 16, batch 1100, train_loss[loss=2.712, ArTop10Accuracy=0.7669, over 13554.00 frames. ], tot_loss[loss=2.854, ArTop10Accuracy=0.7412, over 11992.15 frames. ], batch size: 34, lr: 7.45e-03 +2024-08-06 06:00:47,465 INFO [trainer.py:765] (7/8) Epoch 16, batch 1200, train_loss[loss=3.043, ArTop10Accuracy=0.707, over 11286.00 frames. ], tot_loss[loss=2.853, ArTop10Accuracy=0.7415, over 11929.58 frames. ], batch size: 97, lr: 7.43e-03 +2024-08-06 06:01:12,505 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 06:02:27,261 INFO [trainer.py:765] (7/8) Epoch 17, batch 100, train_loss[loss=2.849, ArTop10Accuracy=0.7423, over 14226.00 frames. ], tot_loss[loss=2.835, ArTop10Accuracy=0.746, over 4787.10 frames. ], batch size: 61, lr: 7.18e-03 +2024-08-06 06:03:11,850 INFO [trainer.py:765] (7/8) Epoch 17, batch 200, train_loss[loss=2.721, ArTop10Accuracy=0.7681, over 13637.00 frames. ], tot_loss[loss=2.824, ArTop10Accuracy=0.748, over 7790.35 frames. ], batch size: 34, lr: 7.17e-03 +2024-08-06 06:03:57,502 INFO [trainer.py:765] (7/8) Epoch 17, batch 300, train_loss[loss=2.877, ArTop10Accuracy=0.7331, over 14629.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7482, over 9438.25 frames. ], batch size: 44, lr: 7.15e-03 +2024-08-06 06:04:42,837 INFO [trainer.py:765] (7/8) Epoch 17, batch 400, train_loss[loss=2.743, ArTop10Accuracy=0.7543, over 10921.00 frames. ], tot_loss[loss=2.821, ArTop10Accuracy=0.7481, over 10342.37 frames. ], batch size: 15, lr: 7.13e-03 +2024-08-06 06:05:29,004 INFO [trainer.py:765] (7/8) Epoch 17, batch 500, train_loss[loss=2.778, ArTop10Accuracy=0.7533, over 12312.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.749, over 10913.72 frames. ], batch size: 22, lr: 7.12e-03 +2024-08-06 06:05:49,551 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.142e+02 1.359e+02 1.445e+02 1.551e+02 2.741e+02, threshold=2.891e+02, percent-clipped=0.0 +2024-08-06 06:06:20,723 INFO [trainer.py:765] (7/8) Epoch 17, batch 600, train_loss[loss=2.677, ArTop10Accuracy=0.7716, over 11626.00 frames. ], tot_loss[loss=2.824, ArTop10Accuracy=0.7473, over 11448.56 frames. ], batch size: 18, lr: 7.10e-03 +2024-08-06 06:07:04,694 INFO [trainer.py:765] (7/8) Epoch 17, batch 700, train_loss[loss=2.844, ArTop10Accuracy=0.7481, over 10138.00 frames. ], tot_loss[loss=2.833, ArTop10Accuracy=0.7456, over 11579.32 frames. ], batch size: 12, lr: 7.09e-03 +2024-08-06 06:07:44,896 INFO [trainer.py:765] (7/8) Epoch 17, batch 800, train_loss[loss=2.5, ArTop10Accuracy=0.7955, over 10062.00 frames. ], tot_loss[loss=2.837, ArTop10Accuracy=0.7444, over 11683.64 frames. ], batch size: 12, lr: 7.07e-03 +2024-08-06 06:08:16,384 INFO [trainer.py:765] (7/8) Epoch 17, batch 900, train_loss[loss=2.784, ArTop10Accuracy=0.7562, over 12850.00 frames. ], tot_loss[loss=2.828, ArTop10Accuracy=0.7462, over 11737.83 frames. ], batch size: 27, lr: 7.05e-03 +2024-08-06 06:08:47,994 INFO [trainer.py:765] (7/8) Epoch 17, batch 1000, train_loss[loss=2.828, ArTop10Accuracy=0.7479, over 13056.00 frames. ], tot_loss[loss=2.829, ArTop10Accuracy=0.7461, over 11930.06 frames. ], batch size: 27, lr: 7.04e-03 +2024-08-06 06:09:19,134 INFO [trainer.py:765] (7/8) Epoch 17, batch 1100, train_loss[loss=2.878, ArTop10Accuracy=0.7387, over 13781.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.743, over 12004.86 frames. ], batch size: 34, lr: 7.02e-03 +2024-08-06 06:09:49,444 INFO [trainer.py:765] (7/8) Epoch 17, batch 1200, train_loss[loss=3.044, ArTop10Accuracy=0.707, over 11693.00 frames. ], tot_loss[loss=2.845, ArTop10Accuracy=0.7427, over 11935.28 frames. ], batch size: 99, lr: 7.01e-03 +2024-08-06 06:10:14,194 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 06:11:23,103 INFO [trainer.py:765] (7/8) Epoch 18, batch 100, train_loss[loss=2.881, ArTop10Accuracy=0.7371, over 14889.00 frames. ], tot_loss[loss=2.819, ArTop10Accuracy=0.7493, over 4799.69 frames. ], batch size: 61, lr: 6.78e-03 +2024-08-06 06:12:16,260 INFO [trainer.py:765] (7/8) Epoch 18, batch 200, train_loss[loss=2.763, ArTop10Accuracy=0.7598, over 13617.00 frames. ], tot_loss[loss=2.818, ArTop10Accuracy=0.7497, over 7821.13 frames. ], batch size: 34, lr: 6.77e-03 +2024-08-06 06:12:40,318 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 06:12:48,991 INFO [trainer.py:811] (7/8) Epoch 18, validation: loss=2.916, ArTop10Accuracy=0.7343, over 1829298.00 frames. +2024-08-06 06:12:48,992 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33330MB +2024-08-06 06:12:49,335 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.163e+02 1.377e+02 1.476e+02 1.588e+02 2.450e+02, threshold=2.952e+02, percent-clipped=0.0 +2024-08-06 06:13:07,116 INFO [trainer.py:765] (7/8) Epoch 18, batch 300, train_loss[loss=2.901, ArTop10Accuracy=0.7325, over 14308.00 frames. ], tot_loss[loss=2.815, ArTop10Accuracy=0.7496, over 9430.32 frames. ], batch size: 44, lr: 6.75e-03 +2024-08-06 06:13:54,097 INFO [trainer.py:765] (7/8) Epoch 18, batch 400, train_loss[loss=2.773, ArTop10Accuracy=0.7506, over 10390.00 frames. ], tot_loss[loss=2.809, ArTop10Accuracy=0.7505, over 10345.82 frames. ], batch size: 14, lr: 6.74e-03 +2024-08-06 06:14:38,488 INFO [trainer.py:765] (7/8) Epoch 18, batch 500, train_loss[loss=2.731, ArTop10Accuracy=0.7656, over 12024.00 frames. ], tot_loss[loss=2.81, ArTop10Accuracy=0.7503, over 10901.56 frames. ], batch size: 22, lr: 6.73e-03 +2024-08-06 06:15:23,628 INFO [trainer.py:765] (7/8) Epoch 18, batch 600, train_loss[loss=2.73, ArTop10Accuracy=0.7583, over 11567.00 frames. ], tot_loss[loss=2.813, ArTop10Accuracy=0.7492, over 11428.91 frames. ], batch size: 18, lr: 6.71e-03 +2024-08-06 06:16:17,342 INFO [trainer.py:765] (7/8) Epoch 18, batch 700, train_loss[loss=2.82, ArTop10Accuracy=0.7657, over 10137.00 frames. ], tot_loss[loss=2.819, ArTop10Accuracy=0.7481, over 11563.74 frames. ], batch size: 12, lr: 6.70e-03 +2024-08-06 06:16:51,428 INFO [trainer.py:765] (7/8) Epoch 18, batch 800, train_loss[loss=2.826, ArTop10Accuracy=0.7416, over 10036.00 frames. ], tot_loss[loss=2.825, ArTop10Accuracy=0.7467, over 11673.34 frames. ], batch size: 12, lr: 6.68e-03 +2024-08-06 06:17:22,913 INFO [trainer.py:765] (7/8) Epoch 18, batch 900, train_loss[loss=2.732, ArTop10Accuracy=0.7624, over 12841.00 frames. ], tot_loss[loss=2.819, ArTop10Accuracy=0.748, over 11713.59 frames. ], batch size: 27, lr: 6.67e-03 +2024-08-06 06:17:54,528 INFO [trainer.py:765] (7/8) Epoch 18, batch 1000, train_loss[loss=2.8, ArTop10Accuracy=0.7475, over 13072.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7464, over 11924.02 frames. ], batch size: 27, lr: 6.65e-03 +2024-08-06 06:18:25,663 INFO [trainer.py:765] (7/8) Epoch 18, batch 1100, train_loss[loss=2.814, ArTop10Accuracy=0.7458, over 13834.00 frames. ], tot_loss[loss=2.838, ArTop10Accuracy=0.7444, over 11968.67 frames. ], batch size: 34, lr: 6.64e-03 +2024-08-06 06:18:55,971 INFO [trainer.py:765] (7/8) Epoch 18, batch 1200, train_loss[loss=2.987, ArTop10Accuracy=0.7172, over 12370.00 frames. ], tot_loss[loss=2.84, ArTop10Accuracy=0.7439, over 11941.77 frames. ], batch size: 98, lr: 6.63e-03 +2024-08-06 06:19:19,163 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.178e+02 1.387e+02 1.492e+02 1.607e+02 2.982e+02, threshold=2.983e+02, percent-clipped=0.1 +2024-08-06 06:19:23,796 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 06:20:29,729 INFO [trainer.py:765] (7/8) Epoch 19, batch 100, train_loss[loss=2.899, ArTop10Accuracy=0.7365, over 14522.00 frames. ], tot_loss[loss=2.813, ArTop10Accuracy=0.7499, over 4776.03 frames. ], batch size: 61, lr: 6.43e-03 +2024-08-06 06:21:11,275 INFO [trainer.py:765] (7/8) Epoch 19, batch 200, train_loss[loss=2.826, ArTop10Accuracy=0.7503, over 14068.00 frames. ], tot_loss[loss=2.8, ArTop10Accuracy=0.7525, over 7785.96 frames. ], batch size: 35, lr: 6.41e-03 +2024-08-06 06:21:56,079 INFO [trainer.py:765] (7/8) Epoch 19, batch 300, train_loss[loss=2.851, ArTop10Accuracy=0.7433, over 14342.00 frames. ], tot_loss[loss=2.798, ArTop10Accuracy=0.753, over 9419.81 frames. ], batch size: 44, lr: 6.40e-03 +2024-08-06 06:22:36,013 INFO [trainer.py:765] (7/8) Epoch 19, batch 400, train_loss[loss=2.761, ArTop10Accuracy=0.7579, over 10993.00 frames. ], tot_loss[loss=2.797, ArTop10Accuracy=0.7528, over 10328.28 frames. ], batch size: 15, lr: 6.39e-03 +2024-08-06 06:23:18,998 INFO [trainer.py:765] (7/8) Epoch 19, batch 500, train_loss[loss=2.733, ArTop10Accuracy=0.7586, over 12113.00 frames. ], tot_loss[loss=2.792, ArTop10Accuracy=0.7532, over 10891.54 frames. ], batch size: 22, lr: 6.37e-03 +2024-08-06 06:24:03,685 INFO [trainer.py:765] (7/8) Epoch 19, batch 600, train_loss[loss=2.733, ArTop10Accuracy=0.7635, over 11651.00 frames. ], tot_loss[loss=2.802, ArTop10Accuracy=0.7516, over 11428.00 frames. ], batch size: 18, lr: 6.36e-03 +2024-08-06 06:24:46,186 INFO [trainer.py:765] (7/8) Epoch 19, batch 700, train_loss[loss=2.761, ArTop10Accuracy=0.7629, over 10146.00 frames. ], tot_loss[loss=2.806, ArTop10Accuracy=0.7508, over 11564.64 frames. ], batch size: 12, lr: 6.35e-03 +2024-08-06 06:25:22,355 INFO [trainer.py:765] (7/8) Epoch 19, batch 800, train_loss[loss=2.826, ArTop10Accuracy=0.7441, over 10221.00 frames. ], tot_loss[loss=2.816, ArTop10Accuracy=0.7485, over 11678.87 frames. ], batch size: 12, lr: 6.33e-03 +2024-08-06 06:25:53,625 INFO [trainer.py:765] (7/8) Epoch 19, batch 900, train_loss[loss=2.86, ArTop10Accuracy=0.7408, over 13085.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.7493, over 11728.87 frames. ], batch size: 27, lr: 6.32e-03 +2024-08-06 06:26:21,773 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 06:26:30,765 INFO [trainer.py:811] (7/8) Epoch 19, validation: loss=2.918, ArTop10Accuracy=0.733, over 1829298.00 frames. +2024-08-06 06:26:30,766 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 33330MB +2024-08-06 06:26:31,053 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.198e+02 1.416e+02 1.525e+02 1.662e+02 2.849e+02, threshold=3.050e+02, percent-clipped=0.0 +2024-08-06 06:26:34,030 INFO [trainer.py:765] (7/8) Epoch 19, batch 1000, train_loss[loss=2.896, ArTop10Accuracy=0.733, over 13050.00 frames. ], tot_loss[loss=2.819, ArTop10Accuracy=0.7481, over 11956.93 frames. ], batch size: 27, lr: 6.31e-03 +2024-08-06 06:27:05,190 INFO [trainer.py:765] (7/8) Epoch 19, batch 1100, train_loss[loss=2.718, ArTop10Accuracy=0.7668, over 13817.00 frames. ], tot_loss[loss=2.827, ArTop10Accuracy=0.7463, over 12009.26 frames. ], batch size: 34, lr: 6.30e-03 +2024-08-06 06:27:35,453 INFO [trainer.py:765] (7/8) Epoch 19, batch 1200, train_loss[loss=2.966, ArTop10Accuracy=0.7175, over 11568.00 frames. ], tot_loss[loss=2.829, ArTop10Accuracy=0.7461, over 11933.03 frames. ], batch size: 99, lr: 6.28e-03 +2024-08-06 06:28:00,587 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 06:29:08,985 INFO [trainer.py:765] (7/8) Epoch 20, batch 100, train_loss[loss=2.844, ArTop10Accuracy=0.7501, over 14479.00 frames. ], tot_loss[loss=2.801, ArTop10Accuracy=0.7528, over 4787.27 frames. ], batch size: 61, lr: 6.10e-03 +2024-08-06 06:29:50,318 INFO [trainer.py:765] (7/8) Epoch 20, batch 200, train_loss[loss=2.834, ArTop10Accuracy=0.7514, over 13578.00 frames. ], tot_loss[loss=2.788, ArTop10Accuracy=0.7551, over 7764.62 frames. ], batch size: 34, lr: 6.09e-03 +2024-08-06 06:30:37,106 INFO [trainer.py:765] (7/8) Epoch 20, batch 300, train_loss[loss=2.862, ArTop10Accuracy=0.7425, over 14228.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.754, over 9402.11 frames. ], batch size: 44, lr: 6.08e-03 +2024-08-06 06:31:16,353 INFO [trainer.py:765] (7/8) Epoch 20, batch 400, train_loss[loss=2.766, ArTop10Accuracy=0.7557, over 11066.00 frames. ], tot_loss[loss=2.792, ArTop10Accuracy=0.7543, over 10323.43 frames. ], batch size: 15, lr: 6.07e-03 +2024-08-06 06:32:03,759 INFO [trainer.py:765] (7/8) Epoch 20, batch 500, train_loss[loss=2.76, ArTop10Accuracy=0.7643, over 12218.00 frames. ], tot_loss[loss=2.789, ArTop10Accuracy=0.7548, over 10904.40 frames. ], batch size: 22, lr: 6.05e-03 +2024-08-06 06:32:43,357 INFO [trainer.py:765] (7/8) Epoch 20, batch 600, train_loss[loss=2.783, ArTop10Accuracy=0.7616, over 11421.00 frames. ], tot_loss[loss=2.795, ArTop10Accuracy=0.7531, over 11422.83 frames. ], batch size: 18, lr: 6.04e-03 +2024-08-06 06:33:36,752 INFO [trainer.py:765] (7/8) Epoch 20, batch 700, train_loss[loss=2.64, ArTop10Accuracy=0.7865, over 9276.00 frames. ], tot_loss[loss=2.805, ArTop10Accuracy=0.7511, over 11564.03 frames. ], batch size: 11, lr: 6.03e-03 +2024-08-06 06:33:43,829 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.196e+02 1.417e+02 1.526e+02 1.639e+02 3.791e+02, threshold=3.052e+02, percent-clipped=0.1 +2024-08-06 06:34:13,304 INFO [trainer.py:765] (7/8) Epoch 20, batch 800, train_loss[loss=2.722, ArTop10Accuracy=0.7663, over 10072.00 frames. ], tot_loss[loss=2.81, ArTop10Accuracy=0.7496, over 11684.37 frames. ], batch size: 12, lr: 6.02e-03 +2024-08-06 06:34:44,580 INFO [trainer.py:765] (7/8) Epoch 20, batch 900, train_loss[loss=2.875, ArTop10Accuracy=0.7347, over 12869.00 frames. ], tot_loss[loss=2.806, ArTop10Accuracy=0.7507, over 11714.01 frames. ], batch size: 27, lr: 6.01e-03 +2024-08-06 06:35:16,139 INFO [trainer.py:765] (7/8) Epoch 20, batch 1000, train_loss[loss=2.824, ArTop10Accuracy=0.7471, over 12908.00 frames. ], tot_loss[loss=2.812, ArTop10Accuracy=0.7495, over 11907.52 frames. ], batch size: 27, lr: 6.00e-03 +2024-08-06 06:35:47,214 INFO [trainer.py:765] (7/8) Epoch 20, batch 1100, train_loss[loss=2.749, ArTop10Accuracy=0.7582, over 13606.00 frames. ], tot_loss[loss=2.817, ArTop10Accuracy=0.7485, over 11969.14 frames. ], batch size: 34, lr: 5.99e-03 +2024-08-06 06:36:17,439 INFO [trainer.py:765] (7/8) Epoch 20, batch 1200, train_loss[loss=2.948, ArTop10Accuracy=0.7227, over 12958.00 frames. ], tot_loss[loss=2.818, ArTop10Accuracy=0.7484, over 11919.73 frames. ], batch size: 98, lr: 5.97e-03 +2024-08-06 06:36:42,404 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 06:36:42,406 INFO [trainer.py:1069] (7/8) Done! diff --git a/libritts/log/log-train-2024-08-06-06-38-43-0 b/libritts/log/log-train-2024-08-06-06-38-43-0 new file mode 100644 index 0000000000000000000000000000000000000000..fa49934a6424998159f1658bc9f3df72372d5087 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-38-43-0 @@ -0,0 +1,5 @@ +2024-08-06 06:38:43,496 INFO [trainer.py:870] (0/8) Training started +2024-08-06 06:38:43,501 INFO [trainer.py:889] (0/8) Device: cuda:0 +2024-08-06 06:38:43,501 INFO [trainer.py:890] (0/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:38:43,501 INFO [trainer.py:892] (0/8) About to create model +2024-08-06 06:38:44,280 INFO [trainer.py:899] (0/8) Number of model parameters: 367386628 diff --git a/libritts/log/log-train-2024-08-06-06-38-43-1 b/libritts/log/log-train-2024-08-06-06-38-43-1 new file mode 100644 index 0000000000000000000000000000000000000000..257a1782af538e72fd6937f23f36ed68ae987962 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-38-43-1 @@ -0,0 +1,5 @@ +2024-08-06 06:38:43,495 INFO [trainer.py:870] (1/8) Training started +2024-08-06 06:38:43,496 INFO [trainer.py:889] (1/8) Device: cuda:1 +2024-08-06 06:38:43,496 INFO [trainer.py:890] (1/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:38:43,496 INFO [trainer.py:892] (1/8) About to create model +2024-08-06 06:38:44,234 INFO [trainer.py:899] (1/8) Number of model parameters: 367386628 diff --git a/libritts/log/log-train-2024-08-06-06-38-43-2 b/libritts/log/log-train-2024-08-06-06-38-43-2 new file mode 100644 index 0000000000000000000000000000000000000000..9d6d07b228315be56b5729d1a95aa2994876e550 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-38-43-2 @@ -0,0 +1,5 @@ +2024-08-06 06:38:43,496 INFO [trainer.py:870] (2/8) Training started +2024-08-06 06:38:43,497 INFO [trainer.py:889] (2/8) Device: cuda:2 +2024-08-06 06:38:43,497 INFO [trainer.py:890] (2/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:38:43,497 INFO [trainer.py:892] (2/8) About to create model +2024-08-06 06:38:44,235 INFO [trainer.py:899] (2/8) Number of model parameters: 367386628 diff --git a/libritts/log/log-train-2024-08-06-06-38-43-3 b/libritts/log/log-train-2024-08-06-06-38-43-3 new file mode 100644 index 0000000000000000000000000000000000000000..20c9a290e4e815be0bc1637aa006a906d724bccd --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-38-43-3 @@ -0,0 +1,5 @@ +2024-08-06 06:38:43,496 INFO [trainer.py:870] (3/8) Training started +2024-08-06 06:38:43,497 INFO [trainer.py:889] (3/8) Device: cuda:3 +2024-08-06 06:38:43,497 INFO [trainer.py:890] (3/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:38:43,497 INFO [trainer.py:892] (3/8) About to create model +2024-08-06 06:38:44,282 INFO [trainer.py:899] (3/8) Number of model parameters: 367386628 diff --git a/libritts/log/log-train-2024-08-06-06-38-43-4 b/libritts/log/log-train-2024-08-06-06-38-43-4 new file mode 100644 index 0000000000000000000000000000000000000000..d42d8da1de37f69af5c33be63700849afd495582 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-38-43-4 @@ -0,0 +1,5 @@ +2024-08-06 06:38:43,495 INFO [trainer.py:870] (4/8) Training started +2024-08-06 06:38:43,496 INFO [trainer.py:889] (4/8) Device: cuda:4 +2024-08-06 06:38:43,496 INFO [trainer.py:890] (4/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:38:43,496 INFO [trainer.py:892] (4/8) About to create model +2024-08-06 06:38:44,266 INFO [trainer.py:899] (4/8) Number of model parameters: 367386628 diff --git a/libritts/log/log-train-2024-08-06-06-38-43-5 b/libritts/log/log-train-2024-08-06-06-38-43-5 new file mode 100644 index 0000000000000000000000000000000000000000..8b9061dc3134cdf92013056e3ad9b65ac92ff112 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-38-43-5 @@ -0,0 +1,5 @@ +2024-08-06 06:38:43,494 INFO [trainer.py:870] (5/8) Training started +2024-08-06 06:38:43,495 INFO [trainer.py:889] (5/8) Device: cuda:5 +2024-08-06 06:38:43,495 INFO [trainer.py:890] (5/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:38:43,496 INFO [trainer.py:892] (5/8) About to create model +2024-08-06 06:38:44,263 INFO [trainer.py:899] (5/8) Number of model parameters: 367386628 diff --git a/libritts/log/log-train-2024-08-06-06-38-43-6 b/libritts/log/log-train-2024-08-06-06-38-43-6 new file mode 100644 index 0000000000000000000000000000000000000000..126ce73be000a1be52d7d49fd0d3aacdaeb786fa --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-38-43-6 @@ -0,0 +1,5 @@ +2024-08-06 06:38:43,497 INFO [trainer.py:870] (6/8) Training started +2024-08-06 06:38:43,498 INFO [trainer.py:889] (6/8) Device: cuda:6 +2024-08-06 06:38:43,498 INFO [trainer.py:890] (6/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:38:43,498 INFO [trainer.py:892] (6/8) About to create model +2024-08-06 06:38:44,273 INFO [trainer.py:899] (6/8) Number of model parameters: 367386628 diff --git a/libritts/log/log-train-2024-08-06-06-38-43-7 b/libritts/log/log-train-2024-08-06-06-38-43-7 new file mode 100644 index 0000000000000000000000000000000000000000..ec44e8e4b65aa36ea8fe0a03cc4f18887b864a16 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-38-43-7 @@ -0,0 +1,5 @@ +2024-08-06 06:38:43,497 INFO [trainer.py:870] (7/8) Training started +2024-08-06 06:38:43,498 INFO [trainer.py:889] (7/8) Device: cuda:7 +2024-08-06 06:38:43,498 INFO [trainer.py:890] (7/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:38:43,498 INFO [trainer.py:892] (7/8) About to create model +2024-08-06 06:38:44,276 INFO [trainer.py:899] (7/8) Number of model parameters: 367386628 diff --git a/libritts/log/log-train-2024-08-06-06-41-41-0 b/libritts/log/log-train-2024-08-06-06-41-41-0 new file mode 100644 index 0000000000000000000000000000000000000000..8ce93e50200412bcd2376e183abb4752a8c1f7e9 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-41-41-0 @@ -0,0 +1,1404 @@ +2024-08-06 06:41:41,476 INFO [trainer.py:870] (0/8) Training started +2024-08-06 06:41:41,481 INFO [trainer.py:889] (0/8) Device: cuda:0 +2024-08-06 06:41:41,481 INFO [trainer.py:890] (0/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:41:41,481 INFO [trainer.py:892] (0/8) About to create model +2024-08-06 06:41:42,388 INFO [trainer.py:899] (0/8) Number of model parameters: 367386628 +2024-08-06 06:41:42,388 INFO [checkpoint.py:112] (0/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 06:41:44,859 INFO [trainer.py:914] (0/8) Using DDP +2024-08-06 06:41:46,902 INFO [datamodule.py:427] (0/8) About to get train cuts +2024-08-06 06:41:46,905 INFO [datamodule.py:434] (0/8) About to get dev cuts +2024-08-06 06:41:46,906 INFO [datamodule.py:292] (0/8) Disable SpecAugment +2024-08-06 06:41:46,906 INFO [datamodule.py:294] (0/8) About to create train dataset +2024-08-06 06:41:46,906 INFO [datamodule.py:323] (0/8) Using DynamicBucketingSampler +2024-08-06 06:41:47,527 INFO [datamodule.py:344] (0/8) About to create train dataloader +2024-08-06 06:41:47,527 INFO [datamodule.py:367] (0/8) About to create dev dataset +2024-08-06 06:41:47,861 INFO [datamodule.py:388] (0/8) About to create dev dataloader +2024-08-06 06:42:36,135 INFO [trainer.py:765] (0/8) Epoch 1, batch 100, train_loss[loss=93.96, NarTop10Accuracy=0.02016, over 7247.00 frames. ], tot_loss[loss=80.09, NarTop10Accuracy=0.05241, over 2370.46 frames. ], batch size: 31, lr: 2.25e-02 +2024-08-06 06:43:05,818 INFO [trainer.py:765] (0/8) Epoch 1, batch 200, train_loss[loss=126.1, NarTop10Accuracy=0.02241, over 6956.00 frames. ], tot_loss[loss=98.98, NarTop10Accuracy=0.04469, over 3855.17 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 06:43:33,848 INFO [trainer.py:765] (0/8) Epoch 1, batch 300, train_loss[loss=74.09, NarTop10Accuracy=0.02428, over 7160.00 frames. ], tot_loss[loss=86.75, NarTop10Accuracy=0.04646, over 4649.32 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 06:44:05,251 INFO [trainer.py:765] (0/8) Epoch 1, batch 400, train_loss[loss=32.01, NarTop10Accuracy=0.05699, over 5162.00 frames. ], tot_loss[loss=67.92, NarTop10Accuracy=0.05099, over 5120.94 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 06:44:33,445 INFO [trainer.py:765] (0/8) Epoch 1, batch 500, train_loss[loss=16.29, NarTop10Accuracy=0.02792, over 6165.00 frames. ], tot_loss[loss=48.69, NarTop10Accuracy=0.05609, over 5399.63 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 06:45:02,924 INFO [trainer.py:765] (0/8) Epoch 1, batch 600, train_loss[loss=5.921, NarTop10Accuracy=0.1809, over 5785.00 frames. ], tot_loss[loss=33.34, NarTop10Accuracy=0.06239, over 5667.07 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 06:45:40,481 INFO [trainer.py:765] (0/8) Epoch 1, batch 700, train_loss[loss=7.066, NarTop10Accuracy=0.1038, over 5001.00 frames. ], tot_loss[loss=23.5, NarTop10Accuracy=0.07028, over 5746.27 frames. ], batch size: 6, lr: 2.99e-02 +2024-08-06 06:46:09,663 INFO [trainer.py:765] (0/8) Epoch 1, batch 800, train_loss[loss=6.654, NarTop10Accuracy=0.09871, over 4448.00 frames. ], tot_loss[loss=17.52, NarTop10Accuracy=0.08054, over 5789.51 frames. ], batch size: 5, lr: 2.98e-02 +2024-08-06 06:46:37,733 INFO [trainer.py:765] (0/8) Epoch 1, batch 900, train_loss[loss=6.064, NarTop10Accuracy=0.1631, over 6318.00 frames. ], tot_loss[loss=13.02, NarTop10Accuracy=0.1115, over 5813.80 frames. ], batch size: 13, lr: 2.98e-02 +2024-08-06 06:47:13,908 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-1000.pt +2024-08-06 06:47:17,131 INFO [trainer.py:765] (0/8) Epoch 1, batch 1000, train_loss[loss=5.936, NarTop10Accuracy=0.1929, over 6259.00 frames. ], tot_loss[loss=10.17, NarTop10Accuracy=0.1374, over 5906.22 frames. ], batch size: 13, lr: 2.97e-02 +2024-08-06 06:47:47,141 INFO [trainer.py:765] (0/8) Epoch 1, batch 1100, train_loss[loss=5.61, NarTop10Accuracy=0.1818, over 6846.00 frames. ], tot_loss[loss=8.416, NarTop10Accuracy=0.1573, over 5941.35 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 06:48:15,709 INFO [trainer.py:765] (0/8) Epoch 1, batch 1200, train_loss[loss=6.284, NarTop10Accuracy=0.142, over 7266.00 frames. ], tot_loss[loss=7.309, NarTop10Accuracy=0.174, over 5955.98 frames. ], batch size: 30, lr: 2.96e-02 +2024-08-06 06:48:47,235 INFO [trainer.py:765] (0/8) Epoch 1, batch 1300, train_loss[loss=5.459, NarTop10Accuracy=0.2113, over 4197.00 frames. ], tot_loss[loss=6.614, NarTop10Accuracy=0.1851, over 6014.77 frames. ], batch size: 5, lr: 2.95e-02 +2024-08-06 06:49:23,567 INFO [trainer.py:765] (0/8) Epoch 1, batch 1400, train_loss[loss=5.373, NarTop10Accuracy=0.2092, over 6088.00 frames. ], tot_loss[loss=6.191, NarTop10Accuracy=0.1918, over 6033.51 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 06:49:51,506 INFO [trainer.py:765] (0/8) Epoch 1, batch 1500, train_loss[loss=5.58, NarTop10Accuracy=0.1949, over 6435.00 frames. ], tot_loss[loss=5.925, NarTop10Accuracy=0.1986, over 5983.61 frames. ], batch size: 49, lr: 2.94e-02 +2024-08-06 06:50:19,162 INFO [trainer.py:765] (0/8) Epoch 1, batch 1600, train_loss[loss=5.545, NarTop10Accuracy=0.2081, over 7181.00 frames. ], tot_loss[loss=5.748, NarTop10Accuracy=0.2047, over 5961.91 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 06:50:45,597 INFO [trainer.py:765] (0/8) Epoch 1, batch 1700, train_loss[loss=5.247, NarTop10Accuracy=0.2516, over 6695.00 frames. ], tot_loss[loss=5.628, NarTop10Accuracy=0.2103, over 5947.33 frames. ], batch size: 14, lr: 2.92e-02 +2024-08-06 06:51:11,954 INFO [trainer.py:765] (0/8) Epoch 1, batch 1800, train_loss[loss=5.415, NarTop10Accuracy=0.2226, over 7117.00 frames. ], tot_loss[loss=5.548, NarTop10Accuracy=0.2161, over 6025.23 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 06:51:38,222 INFO [trainer.py:765] (0/8) Epoch 1, batch 1900, train_loss[loss=5.482, NarTop10Accuracy=0.2136, over 6163.00 frames. ], tot_loss[loss=5.492, NarTop10Accuracy=0.2209, over 6050.10 frames. ], batch size: 51, lr: 2.90e-02 +2024-08-06 06:52:03,652 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-2000.pt +2024-08-06 06:52:07,190 INFO [trainer.py:765] (0/8) Epoch 1, batch 2000, train_loss[loss=5.5, NarTop10Accuracy=0.2182, over 6349.00 frames. ], tot_loss[loss=5.438, NarTop10Accuracy=0.2287, over 6013.65 frames. ], batch size: 50, lr: 2.89e-02 +2024-08-06 06:52:07,192 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 06:52:13,994 INFO [trainer.py:811] (0/8) Epoch 1, validation: loss=5.351, NarTop10Accuracy=0.2423, over 1907754.00 frames. +2024-08-06 06:52:13,994 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29794MB +2024-08-06 06:52:14,534 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 4.341e+01 2.262e+02 7.241e+02 2.074e+04 7.259e+05, threshold=1.448e+03, percent-clipped=0.0 +2024-08-06 06:52:39,585 INFO [trainer.py:765] (0/8) Epoch 1, batch 2100, train_loss[loss=5.466, NarTop10Accuracy=0.224, over 4745.00 frames. ], tot_loss[loss=5.393, NarTop10Accuracy=0.2367, over 6010.12 frames. ], batch size: 5, lr: 2.88e-02 +2024-08-06 06:53:05,354 INFO [trainer.py:765] (0/8) Epoch 1, batch 2200, train_loss[loss=5.164, NarTop10Accuracy=0.285, over 7317.00 frames. ], tot_loss[loss=5.36, NarTop10Accuracy=0.2409, over 6049.38 frames. ], batch size: 31, lr: 2.87e-02 +2024-08-06 06:53:30,700 INFO [trainer.py:765] (0/8) Epoch 1, batch 2300, train_loss[loss=5.41, NarTop10Accuracy=0.2237, over 5848.00 frames. ], tot_loss[loss=5.345, NarTop10Accuracy=0.2433, over 6072.25 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 06:53:55,358 INFO [trainer.py:765] (0/8) Epoch 1, batch 2400, train_loss[loss=5.3, NarTop10Accuracy=0.2495, over 6106.00 frames. ], tot_loss[loss=5.317, NarTop10Accuracy=0.2493, over 5902.86 frames. ], batch size: 48, lr: 2.85e-02 +2024-08-06 06:54:18,659 INFO [trainer.py:765] (0/8) Epoch 1, batch 2500, train_loss[loss=5.107, NarTop10Accuracy=0.3016, over 5209.00 frames. ], tot_loss[loss=5.258, NarTop10Accuracy=0.2607, over 5555.34 frames. ], batch size: 6, lr: 2.84e-02 +2024-08-06 06:54:39,603 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 06:54:39,606 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-1.pt +2024-08-06 06:55:37,936 INFO [trainer.py:765] (0/8) Epoch 2, batch 100, train_loss[loss=5.107, NarTop10Accuracy=0.2974, over 7423.00 frames. ], tot_loss[loss=5.185, NarTop10Accuracy=0.2793, over 2373.96 frames. ], batch size: 31, lr: 2.77e-02 +2024-08-06 06:56:16,405 INFO [trainer.py:765] (0/8) Epoch 2, batch 200, train_loss[loss=5.004, NarTop10Accuracy=0.3274, over 6486.00 frames. ], tot_loss[loss=5.164, NarTop10Accuracy=0.2823, over 3861.00 frames. ], batch size: 16, lr: 2.76e-02 +2024-08-06 06:56:44,972 INFO [trainer.py:765] (0/8) Epoch 2, batch 300, train_loss[loss=5.149, NarTop10Accuracy=0.2906, over 7045.00 frames. ], tot_loss[loss=5.157, NarTop10Accuracy=0.2838, over 4668.94 frames. ], batch size: 22, lr: 2.75e-02 +2024-08-06 06:57:13,938 INFO [trainer.py:765] (0/8) Epoch 2, batch 400, train_loss[loss=5.292, NarTop10Accuracy=0.24, over 5163.00 frames. ], tot_loss[loss=5.148, NarTop10Accuracy=0.2853, over 5112.78 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 06:57:18,919 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-3000.pt +2024-08-06 06:57:56,208 INFO [trainer.py:765] (0/8) Epoch 2, batch 500, train_loss[loss=4.984, NarTop10Accuracy=0.3199, over 6097.00 frames. ], tot_loss[loss=5.111, NarTop10Accuracy=0.2922, over 5392.69 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 06:58:25,425 INFO [trainer.py:765] (0/8) Epoch 2, batch 600, train_loss[loss=4.903, NarTop10Accuracy=0.3415, over 5838.00 frames. ], tot_loss[loss=5.091, NarTop10Accuracy=0.2964, over 5666.11 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 06:58:55,282 INFO [trainer.py:765] (0/8) Epoch 2, batch 700, train_loss[loss=4.706, NarTop10Accuracy=0.3613, over 4987.00 frames. ], tot_loss[loss=5.08, NarTop10Accuracy=0.2984, over 5734.87 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 06:59:31,889 INFO [trainer.py:765] (0/8) Epoch 2, batch 800, train_loss[loss=5.114, NarTop10Accuracy=0.273, over 5006.00 frames. ], tot_loss[loss=5.082, NarTop10Accuracy=0.2977, over 5789.50 frames. ], batch size: 6, lr: 2.69e-02 +2024-08-06 07:00:03,183 INFO [trainer.py:765] (0/8) Epoch 2, batch 900, train_loss[loss=5.388, NarTop10Accuracy=0.2271, over 6248.00 frames. ], tot_loss[loss=5.05, NarTop10Accuracy=0.3043, over 5820.64 frames. ], batch size: 13, lr: 2.68e-02 +2024-08-06 07:00:33,141 INFO [trainer.py:765] (0/8) Epoch 2, batch 1000, train_loss[loss=4.861, NarTop10Accuracy=0.34, over 6331.00 frames. ], tot_loss[loss=5.019, NarTop10Accuracy=0.3106, over 5931.97 frames. ], batch size: 13, lr: 2.66e-02 +2024-08-06 07:01:05,572 INFO [trainer.py:765] (0/8) Epoch 2, batch 1100, train_loss[loss=4.974, NarTop10Accuracy=0.317, over 6833.00 frames. ], tot_loss[loss=5.001, NarTop10Accuracy=0.3133, over 5970.00 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 07:01:46,285 INFO [trainer.py:765] (0/8) Epoch 2, batch 1200, train_loss[loss=4.804, NarTop10Accuracy=0.3518, over 7399.00 frames. ], tot_loss[loss=4.994, NarTop10Accuracy=0.3142, over 5966.24 frames. ], batch size: 31, lr: 2.64e-02 +2024-08-06 07:02:15,644 INFO [trainer.py:765] (0/8) Epoch 2, batch 1300, train_loss[loss=5.246, NarTop10Accuracy=0.245, over 5093.00 frames. ], tot_loss[loss=4.956, NarTop10Accuracy=0.3217, over 6021.52 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 07:02:45,252 INFO [trainer.py:765] (0/8) Epoch 2, batch 1400, train_loss[loss=4.802, NarTop10Accuracy=0.3457, over 6083.00 frames. ], tot_loss[loss=4.941, NarTop10Accuracy=0.3247, over 6036.88 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 07:02:50,267 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-4000.pt +2024-08-06 07:02:54,010 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 07:03:02,094 INFO [trainer.py:811] (0/8) Epoch 2, validation: loss=4.943, NarTop10Accuracy=0.3266, over 1907754.00 frames. +2024-08-06 07:03:02,095 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29794MB +2024-08-06 07:03:02,638 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 5.429e+01 1.166e+02 1.425e+02 1.750e+02 6.435e+02, threshold=2.851e+02, percent-clipped=0.0 +2024-08-06 07:03:25,472 INFO [trainer.py:765] (0/8) Epoch 2, batch 1500, train_loss[loss=5.067, NarTop10Accuracy=0.3091, over 6205.00 frames. ], tot_loss[loss=4.936, NarTop10Accuracy=0.3252, over 5965.12 frames. ], batch size: 49, lr: 2.60e-02 +2024-08-06 07:03:53,553 INFO [trainer.py:765] (0/8) Epoch 2, batch 1600, train_loss[loss=4.736, NarTop10Accuracy=0.3577, over 7148.00 frames. ], tot_loss[loss=4.919, NarTop10Accuracy=0.3286, over 5952.23 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 07:04:20,313 INFO [trainer.py:765] (0/8) Epoch 2, batch 1700, train_loss[loss=4.841, NarTop10Accuracy=0.3504, over 6386.00 frames. ], tot_loss[loss=4.914, NarTop10Accuracy=0.3306, over 5937.54 frames. ], batch size: 13, lr: 2.58e-02 +2024-08-06 07:04:46,888 INFO [trainer.py:765] (0/8) Epoch 2, batch 1800, train_loss[loss=4.922, NarTop10Accuracy=0.3277, over 7229.00 frames. ], tot_loss[loss=4.896, NarTop10Accuracy=0.3339, over 6006.51 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 07:05:13,587 INFO [trainer.py:765] (0/8) Epoch 2, batch 1900, train_loss[loss=4.81, NarTop10Accuracy=0.3594, over 5208.00 frames. ], tot_loss[loss=4.875, NarTop10Accuracy=0.3384, over 6037.71 frames. ], batch size: 49, lr: 2.55e-02 +2024-08-06 07:05:39,285 INFO [trainer.py:765] (0/8) Epoch 2, batch 2000, train_loss[loss=4.661, NarTop10Accuracy=0.3837, over 6449.00 frames. ], tot_loss[loss=4.851, NarTop10Accuracy=0.3429, over 6038.78 frames. ], batch size: 49, lr: 2.54e-02 +2024-08-06 07:06:04,829 INFO [trainer.py:765] (0/8) Epoch 2, batch 2100, train_loss[loss=4.561, NarTop10Accuracy=0.4018, over 4970.00 frames. ], tot_loss[loss=4.856, NarTop10Accuracy=0.3421, over 6016.72 frames. ], batch size: 5, lr: 2.52e-02 +2024-08-06 07:06:30,372 INFO [trainer.py:765] (0/8) Epoch 2, batch 2200, train_loss[loss=4.835, NarTop10Accuracy=0.3473, over 7292.00 frames. ], tot_loss[loss=4.816, NarTop10Accuracy=0.3505, over 6055.14 frames. ], batch size: 31, lr: 2.51e-02 +2024-08-06 07:06:55,874 INFO [trainer.py:765] (0/8) Epoch 2, batch 2300, train_loss[loss=4.2, NarTop10Accuracy=0.4668, over 5777.00 frames. ], tot_loss[loss=4.802, NarTop10Accuracy=0.3537, over 6090.25 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 07:07:20,576 INFO [trainer.py:765] (0/8) Epoch 2, batch 2400, train_loss[loss=4.942, NarTop10Accuracy=0.3168, over 5457.00 frames. ], tot_loss[loss=4.777, NarTop10Accuracy=0.359, over 5889.56 frames. ], batch size: 49, lr: 2.49e-02 +2024-08-06 07:07:24,788 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-5000.pt +2024-08-06 07:07:47,111 INFO [trainer.py:765] (0/8) Epoch 2, batch 2500, train_loss[loss=4.602, NarTop10Accuracy=0.3809, over 5104.00 frames. ], tot_loss[loss=4.745, NarTop10Accuracy=0.3655, over 5555.13 frames. ], batch size: 6, lr: 2.47e-02 +2024-08-06 07:08:08,378 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 07:08:08,382 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-2.pt +2024-08-06 07:09:08,538 INFO [trainer.py:765] (0/8) Epoch 3, batch 100, train_loss[loss=5.007, NarTop10Accuracy=0.3196, over 7128.00 frames. ], tot_loss[loss=4.639, NarTop10Accuracy=0.3864, over 2395.52 frames. ], batch size: 30, lr: 2.35e-02 +2024-08-06 07:09:41,499 INFO [trainer.py:765] (0/8) Epoch 3, batch 200, train_loss[loss=4.336, NarTop10Accuracy=0.4421, over 6873.00 frames. ], tot_loss[loss=4.614, NarTop10Accuracy=0.3917, over 3879.35 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 07:10:16,976 INFO [trainer.py:765] (0/8) Epoch 3, batch 300, train_loss[loss=4.443, NarTop10Accuracy=0.4248, over 7191.00 frames. ], tot_loss[loss=4.606, NarTop10Accuracy=0.3923, over 4667.32 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 07:10:49,792 INFO [trainer.py:765] (0/8) Epoch 3, batch 400, train_loss[loss=4.52, NarTop10Accuracy=0.4115, over 5232.00 frames. ], tot_loss[loss=4.576, NarTop10Accuracy=0.3981, over 5119.60 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 07:11:18,179 INFO [trainer.py:765] (0/8) Epoch 3, batch 500, train_loss[loss=4.636, NarTop10Accuracy=0.387, over 6175.00 frames. ], tot_loss[loss=4.585, NarTop10Accuracy=0.3967, over 5400.55 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 07:11:51,263 INFO [trainer.py:765] (0/8) Epoch 3, batch 600, train_loss[loss=4.497, NarTop10Accuracy=0.4153, over 5869.00 frames. ], tot_loss[loss=4.564, NarTop10Accuracy=0.4006, over 5667.42 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 07:12:32,101 INFO [trainer.py:765] (0/8) Epoch 3, batch 700, train_loss[loss=4.554, NarTop10Accuracy=0.4046, over 5124.00 frames. ], tot_loss[loss=4.552, NarTop10Accuracy=0.4031, over 5751.37 frames. ], batch size: 6, lr: 2.29e-02 +2024-08-06 07:13:01,919 INFO [trainer.py:765] (0/8) Epoch 3, batch 800, train_loss[loss=4.45, NarTop10Accuracy=0.4264, over 4969.00 frames. ], tot_loss[loss=4.534, NarTop10Accuracy=0.4065, over 5803.93 frames. ], batch size: 6, lr: 2.27e-02 +2024-08-06 07:13:12,668 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-6000.pt +2024-08-06 07:13:16,175 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 07:13:22,883 INFO [trainer.py:811] (0/8) Epoch 3, validation: loss=4.43, NarTop10Accuracy=0.4285, over 1907754.00 frames. +2024-08-06 07:13:22,884 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29794MB +2024-08-06 07:13:23,429 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 6.823e+01 1.318e+02 1.583e+02 1.978e+02 8.364e+02, threshold=3.166e+02, percent-clipped=5.2 +2024-08-06 07:13:42,435 INFO [trainer.py:765] (0/8) Epoch 3, batch 900, train_loss[loss=4.246, NarTop10Accuracy=0.4538, over 6706.00 frames. ], tot_loss[loss=4.517, NarTop10Accuracy=0.4102, over 5818.43 frames. ], batch size: 14, lr: 2.26e-02 +2024-08-06 07:14:25,627 INFO [trainer.py:765] (0/8) Epoch 3, batch 1000, train_loss[loss=4.282, NarTop10Accuracy=0.4584, over 6615.00 frames. ], tot_loss[loss=4.498, NarTop10Accuracy=0.4135, over 5924.68 frames. ], batch size: 14, lr: 2.25e-02 +2024-08-06 07:14:56,325 INFO [trainer.py:765] (0/8) Epoch 3, batch 1100, train_loss[loss=4.468, NarTop10Accuracy=0.4242, over 6930.00 frames. ], tot_loss[loss=4.492, NarTop10Accuracy=0.4142, over 5983.32 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 07:15:29,866 INFO [trainer.py:765] (0/8) Epoch 3, batch 1200, train_loss[loss=4.324, NarTop10Accuracy=0.4345, over 7263.00 frames. ], tot_loss[loss=4.481, NarTop10Accuracy=0.4166, over 5973.90 frames. ], batch size: 31, lr: 2.23e-02 +2024-08-06 07:16:12,665 INFO [trainer.py:765] (0/8) Epoch 3, batch 1300, train_loss[loss=4.505, NarTop10Accuracy=0.4198, over 4982.00 frames. ], tot_loss[loss=4.458, NarTop10Accuracy=0.4211, over 6023.75 frames. ], batch size: 6, lr: 2.22e-02 +2024-08-06 07:16:42,204 INFO [trainer.py:765] (0/8) Epoch 3, batch 1400, train_loss[loss=4.225, NarTop10Accuracy=0.4661, over 6138.00 frames. ], tot_loss[loss=4.45, NarTop10Accuracy=0.4225, over 6034.71 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 07:17:10,663 INFO [trainer.py:765] (0/8) Epoch 3, batch 1500, train_loss[loss=4.618, NarTop10Accuracy=0.3886, over 6121.00 frames. ], tot_loss[loss=4.445, NarTop10Accuracy=0.4233, over 5970.62 frames. ], batch size: 49, lr: 2.20e-02 +2024-08-06 07:17:38,769 INFO [trainer.py:765] (0/8) Epoch 3, batch 1600, train_loss[loss=4.131, NarTop10Accuracy=0.4947, over 7237.00 frames. ], tot_loss[loss=4.42, NarTop10Accuracy=0.4283, over 5945.24 frames. ], batch size: 22, lr: 2.19e-02 +2024-08-06 07:18:05,503 INFO [trainer.py:765] (0/8) Epoch 3, batch 1700, train_loss[loss=4.357, NarTop10Accuracy=0.4364, over 6318.00 frames. ], tot_loss[loss=4.404, NarTop10Accuracy=0.4316, over 5933.14 frames. ], batch size: 13, lr: 2.18e-02 +2024-08-06 07:18:32,160 INFO [trainer.py:765] (0/8) Epoch 3, batch 1800, train_loss[loss=4.321, NarTop10Accuracy=0.4576, over 7219.00 frames. ], tot_loss[loss=4.386, NarTop10Accuracy=0.4348, over 5994.89 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 07:18:41,965 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-7000.pt +2024-08-06 07:19:01,958 INFO [trainer.py:765] (0/8) Epoch 3, batch 1900, train_loss[loss=4.678, NarTop10Accuracy=0.3898, over 5797.00 frames. ], tot_loss[loss=4.373, NarTop10Accuracy=0.4368, over 6034.36 frames. ], batch size: 49, lr: 2.16e-02 +2024-08-06 07:19:27,621 INFO [trainer.py:765] (0/8) Epoch 3, batch 2000, train_loss[loss=4.507, NarTop10Accuracy=0.4059, over 7021.00 frames. ], tot_loss[loss=4.354, NarTop10Accuracy=0.4406, over 6014.79 frames. ], batch size: 49, lr: 2.15e-02 +2024-08-06 07:19:53,070 INFO [trainer.py:765] (0/8) Epoch 3, batch 2100, train_loss[loss=4.051, NarTop10Accuracy=0.5002, over 3993.00 frames. ], tot_loss[loss=4.332, NarTop10Accuracy=0.4447, over 5994.85 frames. ], batch size: 4, lr: 2.14e-02 +2024-08-06 07:20:18,553 INFO [trainer.py:765] (0/8) Epoch 3, batch 2200, train_loss[loss=4.518, NarTop10Accuracy=0.4067, over 7138.00 frames. ], tot_loss[loss=4.32, NarTop10Accuracy=0.4478, over 6027.39 frames. ], batch size: 30, lr: 2.13e-02 +2024-08-06 07:20:44,051 INFO [trainer.py:765] (0/8) Epoch 3, batch 2300, train_loss[loss=3.863, NarTop10Accuracy=0.539, over 5800.00 frames. ], tot_loss[loss=4.315, NarTop10Accuracy=0.449, over 6060.69 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 07:21:08,677 INFO [trainer.py:765] (0/8) Epoch 3, batch 2400, train_loss[loss=4.235, NarTop10Accuracy=0.4759, over 6444.00 frames. ], tot_loss[loss=4.303, NarTop10Accuracy=0.4513, over 5891.07 frames. ], batch size: 50, lr: 2.11e-02 +2024-08-06 07:21:32,172 INFO [trainer.py:765] (0/8) Epoch 3, batch 2500, train_loss[loss=4.417, NarTop10Accuracy=0.4266, over 5040.00 frames. ], tot_loss[loss=4.256, NarTop10Accuracy=0.4605, over 5543.46 frames. ], batch size: 6, lr: 2.10e-02 +2024-08-06 07:21:53,322 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 07:21:53,325 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-3.pt +2024-08-06 07:23:00,978 INFO [trainer.py:765] (0/8) Epoch 4, batch 100, train_loss[loss=4.225, NarTop10Accuracy=0.4607, over 7485.00 frames. ], tot_loss[loss=4.209, NarTop10Accuracy=0.4717, over 2378.70 frames. ], batch size: 31, lr: 1.97e-02 +2024-08-06 07:23:33,304 INFO [trainer.py:765] (0/8) Epoch 4, batch 200, train_loss[loss=4.236, NarTop10Accuracy=0.4627, over 6966.00 frames. ], tot_loss[loss=4.189, NarTop10Accuracy=0.4754, over 3867.02 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 07:23:51,466 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-8000.pt +2024-08-06 07:23:55,118 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 07:24:01,516 INFO [trainer.py:811] (0/8) Epoch 4, validation: loss=4.035, NarTop10Accuracy=0.5085, over 1907754.00 frames. +2024-08-06 07:24:01,517 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29794MB +2024-08-06 07:24:02,097 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 9.910e+01 1.530e+02 1.750e+02 2.064e+02 5.317e+02, threshold=3.500e+02, percent-clipped=3.3 +2024-08-06 07:24:14,361 INFO [trainer.py:765] (0/8) Epoch 4, batch 300, train_loss[loss=4.035, NarTop10Accuracy=0.5032, over 7167.00 frames. ], tot_loss[loss=4.19, NarTop10Accuracy=0.4757, over 4676.63 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 07:24:53,596 INFO [trainer.py:765] (0/8) Epoch 4, batch 400, train_loss[loss=3.847, NarTop10Accuracy=0.5487, over 5217.00 frames. ], tot_loss[loss=4.184, NarTop10Accuracy=0.4764, over 5131.72 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 07:25:25,295 INFO [trainer.py:765] (0/8) Epoch 4, batch 500, train_loss[loss=4.022, NarTop10Accuracy=0.5057, over 6180.00 frames. ], tot_loss[loss=4.164, NarTop10Accuracy=0.48, over 5422.50 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 07:25:56,975 INFO [trainer.py:765] (0/8) Epoch 4, batch 600, train_loss[loss=3.766, NarTop10Accuracy=0.5386, over 5705.00 frames. ], tot_loss[loss=4.151, NarTop10Accuracy=0.482, over 5686.48 frames. ], batch size: 9, lr: 1.92e-02 +2024-08-06 07:26:37,607 INFO [trainer.py:765] (0/8) Epoch 4, batch 700, train_loss[loss=4.5, NarTop10Accuracy=0.4179, over 5033.00 frames. ], tot_loss[loss=4.154, NarTop10Accuracy=0.4814, over 5746.74 frames. ], batch size: 6, lr: 1.92e-02 +2024-08-06 07:27:07,433 INFO [trainer.py:765] (0/8) Epoch 4, batch 800, train_loss[loss=3.995, NarTop10Accuracy=0.5081, over 5076.00 frames. ], tot_loss[loss=4.15, NarTop10Accuracy=0.4826, over 5798.88 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 07:27:42,042 INFO [trainer.py:765] (0/8) Epoch 4, batch 900, train_loss[loss=4.08, NarTop10Accuracy=0.4962, over 6214.00 frames. ], tot_loss[loss=4.12, NarTop10Accuracy=0.489, over 5824.43 frames. ], batch size: 13, lr: 1.90e-02 +2024-08-06 07:28:20,670 INFO [trainer.py:765] (0/8) Epoch 4, batch 1000, train_loss[loss=3.86, NarTop10Accuracy=0.5365, over 6180.00 frames. ], tot_loss[loss=4.116, NarTop10Accuracy=0.49, over 5923.54 frames. ], batch size: 13, lr: 1.89e-02 +2024-08-06 07:28:54,071 INFO [trainer.py:765] (0/8) Epoch 4, batch 1100, train_loss[loss=3.716, NarTop10Accuracy=0.5732, over 6826.00 frames. ], tot_loss[loss=4.112, NarTop10Accuracy=0.4907, over 5955.32 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 07:29:29,599 INFO [trainer.py:765] (0/8) Epoch 4, batch 1200, train_loss[loss=4.136, NarTop10Accuracy=0.489, over 7319.00 frames. ], tot_loss[loss=4.109, NarTop10Accuracy=0.491, over 5931.23 frames. ], batch size: 31, lr: 1.87e-02 +2024-08-06 07:29:45,948 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-9000.pt +2024-08-06 07:30:04,991 INFO [trainer.py:765] (0/8) Epoch 4, batch 1300, train_loss[loss=3.691, NarTop10Accuracy=0.5897, over 5150.00 frames. ], tot_loss[loss=4.075, NarTop10Accuracy=0.498, over 6010.56 frames. ], batch size: 6, lr: 1.87e-02 +2024-08-06 07:30:43,380 INFO [trainer.py:765] (0/8) Epoch 4, batch 1400, train_loss[loss=3.82, NarTop10Accuracy=0.5404, over 6030.00 frames. ], tot_loss[loss=4.069, NarTop10Accuracy=0.4988, over 6018.05 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 07:31:11,831 INFO [trainer.py:765] (0/8) Epoch 4, batch 1500, train_loss[loss=4.153, NarTop10Accuracy=0.4855, over 5703.00 frames. ], tot_loss[loss=4.06, NarTop10Accuracy=0.5003, over 5955.79 frames. ], batch size: 50, lr: 1.85e-02 +2024-08-06 07:31:39,961 INFO [trainer.py:765] (0/8) Epoch 4, batch 1600, train_loss[loss=4.31, NarTop10Accuracy=0.4543, over 7170.00 frames. ], tot_loss[loss=4.07, NarTop10Accuracy=0.4984, over 5953.83 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 07:32:06,855 INFO [trainer.py:765] (0/8) Epoch 4, batch 1700, train_loss[loss=4.332, NarTop10Accuracy=0.4537, over 6176.00 frames. ], tot_loss[loss=4.048, NarTop10Accuracy=0.5027, over 5940.33 frames. ], batch size: 13, lr: 1.84e-02 +2024-08-06 07:32:33,483 INFO [trainer.py:765] (0/8) Epoch 4, batch 1800, train_loss[loss=4.251, NarTop10Accuracy=0.4669, over 7138.00 frames. ], tot_loss[loss=4.043, NarTop10Accuracy=0.5035, over 6000.80 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 07:33:00,194 INFO [trainer.py:765] (0/8) Epoch 4, batch 1900, train_loss[loss=4.237, NarTop10Accuracy=0.4618, over 5634.00 frames. ], tot_loss[loss=4.054, NarTop10Accuracy=0.5013, over 6026.72 frames. ], batch size: 49, lr: 1.82e-02 +2024-08-06 07:33:25,990 INFO [trainer.py:765] (0/8) Epoch 4, batch 2000, train_loss[loss=4.368, NarTop10Accuracy=0.4382, over 6237.00 frames. ], tot_loss[loss=4.032, NarTop10Accuracy=0.5057, over 6011.95 frames. ], batch size: 51, lr: 1.81e-02 +2024-08-06 07:33:51,512 INFO [trainer.py:765] (0/8) Epoch 4, batch 2100, train_loss[loss=4.003, NarTop10Accuracy=0.5062, over 4663.00 frames. ], tot_loss[loss=4.022, NarTop10Accuracy=0.5082, over 5984.70 frames. ], batch size: 5, lr: 1.81e-02 +2024-08-06 07:34:16,906 INFO [trainer.py:765] (0/8) Epoch 4, batch 2200, train_loss[loss=4.317, NarTop10Accuracy=0.453, over 7218.00 frames. ], tot_loss[loss=4.032, NarTop10Accuracy=0.5061, over 6035.10 frames. ], batch size: 30, lr: 1.80e-02 +2024-08-06 07:34:31,430 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-10000.pt +2024-08-06 07:34:34,952 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 07:34:41,462 INFO [trainer.py:811] (0/8) Epoch 4, validation: loss=3.858, NarTop10Accuracy=0.5445, over 1907754.00 frames. +2024-08-06 07:34:41,463 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29794MB +2024-08-06 07:34:41,980 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.721e+02 1.919e+02 2.225e+02 9.682e+02, threshold=3.839e+02, percent-clipped=2.3 +2024-08-06 07:34:52,442 INFO [trainer.py:765] (0/8) Epoch 4, batch 2300, train_loss[loss=3.907, NarTop10Accuracy=0.5275, over 5759.00 frames. ], tot_loss[loss=4.036, NarTop10Accuracy=0.5062, over 6060.62 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 07:35:17,166 INFO [trainer.py:765] (0/8) Epoch 4, batch 2400, train_loss[loss=3.878, NarTop10Accuracy=0.5344, over 5245.00 frames. ], tot_loss[loss=4.025, NarTop10Accuracy=0.5084, over 5877.31 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 07:35:40,623 INFO [trainer.py:765] (0/8) Epoch 4, batch 2500, train_loss[loss=4.214, NarTop10Accuracy=0.475, over 5089.00 frames. ], tot_loss[loss=4.008, NarTop10Accuracy=0.5124, over 5534.14 frames. ], batch size: 6, lr: 1.78e-02 +2024-08-06 07:36:01,729 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 07:36:01,732 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-4.pt +2024-08-06 07:37:02,524 INFO [trainer.py:765] (0/8) Epoch 5, batch 100, train_loss[loss=4.023, NarTop10Accuracy=0.5128, over 7185.00 frames. ], tot_loss[loss=3.96, NarTop10Accuracy=0.5236, over 2380.81 frames. ], batch size: 30, lr: 1.66e-02 +2024-08-06 07:37:39,815 INFO [trainer.py:765] (0/8) Epoch 5, batch 200, train_loss[loss=4.217, NarTop10Accuracy=0.4781, over 6633.00 frames. ], tot_loss[loss=3.951, NarTop10Accuracy=0.524, over 3870.44 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 07:38:13,471 INFO [trainer.py:765] (0/8) Epoch 5, batch 300, train_loss[loss=4.154, NarTop10Accuracy=0.4822, over 7207.00 frames. ], tot_loss[loss=3.926, NarTop10Accuracy=0.5293, over 4686.12 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 07:38:42,429 INFO [trainer.py:765] (0/8) Epoch 5, batch 400, train_loss[loss=3.81, NarTop10Accuracy=0.549, over 5116.00 frames. ], tot_loss[loss=3.924, NarTop10Accuracy=0.5296, over 5130.33 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 07:39:17,020 INFO [trainer.py:765] (0/8) Epoch 5, batch 500, train_loss[loss=3.741, NarTop10Accuracy=0.5683, over 6048.00 frames. ], tot_loss[loss=3.93, NarTop10Accuracy=0.5286, over 5414.03 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 07:39:51,944 INFO [trainer.py:765] (0/8) Epoch 5, batch 600, train_loss[loss=3.898, NarTop10Accuracy=0.5393, over 5757.00 frames. ], tot_loss[loss=3.916, NarTop10Accuracy=0.5314, over 5674.01 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 07:40:18,576 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-11000.pt +2024-08-06 07:40:28,626 INFO [trainer.py:765] (0/8) Epoch 5, batch 700, train_loss[loss=3.796, NarTop10Accuracy=0.5604, over 5058.00 frames. ], tot_loss[loss=3.911, NarTop10Accuracy=0.5322, over 5738.73 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:02,367 INFO [trainer.py:765] (0/8) Epoch 5, batch 800, train_loss[loss=4.414, NarTop10Accuracy=0.43, over 4948.00 frames. ], tot_loss[loss=3.912, NarTop10Accuracy=0.5317, over 5800.87 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:37,938 INFO [trainer.py:765] (0/8) Epoch 5, batch 900, train_loss[loss=4.103, NarTop10Accuracy=0.4947, over 6739.00 frames. ], tot_loss[loss=3.9, NarTop10Accuracy=0.5343, over 5821.68 frames. ], batch size: 14, lr: 1.61e-02 +2024-08-06 07:42:13,846 INFO [trainer.py:765] (0/8) Epoch 5, batch 1000, train_loss[loss=3.994, NarTop10Accuracy=0.5065, over 6303.00 frames. ], tot_loss[loss=3.892, NarTop10Accuracy=0.5355, over 5936.81 frames. ], batch size: 13, lr: 1.60e-02 +2024-08-06 07:42:46,468 INFO [trainer.py:765] (0/8) Epoch 5, batch 1100, train_loss[loss=3.841, NarTop10Accuracy=0.5297, over 6847.00 frames. ], tot_loss[loss=3.898, NarTop10Accuracy=0.5343, over 5953.53 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 07:43:25,226 INFO [trainer.py:765] (0/8) Epoch 5, batch 1200, train_loss[loss=4.162, NarTop10Accuracy=0.4769, over 7097.00 frames. ], tot_loss[loss=3.914, NarTop10Accuracy=0.5312, over 5946.32 frames. ], batch size: 30, lr: 1.59e-02 +2024-08-06 07:44:00,557 INFO [trainer.py:765] (0/8) Epoch 5, batch 1300, train_loss[loss=3.749, NarTop10Accuracy=0.5476, over 5044.00 frames. ], tot_loss[loss=3.904, NarTop10Accuracy=0.5333, over 6014.80 frames. ], batch size: 6, lr: 1.59e-02 +2024-08-06 07:44:30,238 INFO [trainer.py:765] (0/8) Epoch 5, batch 1400, train_loss[loss=4.027, NarTop10Accuracy=0.5139, over 6042.00 frames. ], tot_loss[loss=3.899, NarTop10Accuracy=0.5344, over 6043.64 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 07:45:02,845 INFO [trainer.py:765] (0/8) Epoch 5, batch 1500, train_loss[loss=3.874, NarTop10Accuracy=0.5432, over 6027.00 frames. ], tot_loss[loss=3.9, NarTop10Accuracy=0.5341, over 5977.47 frames. ], batch size: 49, lr: 1.57e-02 +2024-08-06 07:45:31,008 INFO [trainer.py:765] (0/8) Epoch 5, batch 1600, train_loss[loss=4.071, NarTop10Accuracy=0.5072, over 7257.00 frames. ], tot_loss[loss=3.906, NarTop10Accuracy=0.5335, over 5965.25 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 07:45:51,057 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-12000.pt +2024-08-06 07:45:54,929 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 07:46:01,621 INFO [trainer.py:811] (0/8) Epoch 5, validation: loss=3.749, NarTop10Accuracy=0.5672, over 1907754.00 frames. +2024-08-06 07:46:01,622 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29794MB +2024-08-06 07:46:02,122 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.669e+02 1.884e+02 2.190e+02 6.243e+02, threshold=3.768e+02, percent-clipped=1.8 +2024-08-06 07:46:08,362 INFO [trainer.py:765] (0/8) Epoch 5, batch 1700, train_loss[loss=3.812, NarTop10Accuracy=0.5525, over 6171.00 frames. ], tot_loss[loss=3.91, NarTop10Accuracy=0.5327, over 5935.53 frames. ], batch size: 13, lr: 1.56e-02 +2024-08-06 07:46:34,966 INFO [trainer.py:765] (0/8) Epoch 5, batch 1800, train_loss[loss=3.871, NarTop10Accuracy=0.5512, over 6856.00 frames. ], tot_loss[loss=3.892, NarTop10Accuracy=0.5361, over 6007.50 frames. ], batch size: 21, lr: 1.56e-02 +2024-08-06 07:47:01,489 INFO [trainer.py:765] (0/8) Epoch 5, batch 1900, train_loss[loss=3.793, NarTop10Accuracy=0.5572, over 6095.00 frames. ], tot_loss[loss=3.903, NarTop10Accuracy=0.534, over 6052.75 frames. ], batch size: 49, lr: 1.55e-02 +2024-08-06 07:47:27,146 INFO [trainer.py:765] (0/8) Epoch 5, batch 2000, train_loss[loss=3.932, NarTop10Accuracy=0.5216, over 5586.00 frames. ], tot_loss[loss=3.897, NarTop10Accuracy=0.5349, over 6022.66 frames. ], batch size: 49, lr: 1.55e-02 +2024-08-06 07:47:52,618 INFO [trainer.py:765] (0/8) Epoch 5, batch 2100, train_loss[loss=3.834, NarTop10Accuracy=0.5505, over 4008.00 frames. ], tot_loss[loss=3.897, NarTop10Accuracy=0.5349, over 6000.46 frames. ], batch size: 4, lr: 1.54e-02 +2024-08-06 07:48:17,992 INFO [trainer.py:765] (0/8) Epoch 5, batch 2200, train_loss[loss=3.933, NarTop10Accuracy=0.5267, over 7032.00 frames. ], tot_loss[loss=3.888, NarTop10Accuracy=0.537, over 6049.62 frames. ], batch size: 30, lr: 1.54e-02 +2024-08-06 07:48:43,421 INFO [trainer.py:765] (0/8) Epoch 5, batch 2300, train_loss[loss=3.987, NarTop10Accuracy=0.5127, over 5813.00 frames. ], tot_loss[loss=3.892, NarTop10Accuracy=0.5363, over 6070.35 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 07:49:08,169 INFO [trainer.py:765] (0/8) Epoch 5, batch 2400, train_loss[loss=3.91, NarTop10Accuracy=0.5391, over 6175.00 frames. ], tot_loss[loss=3.887, NarTop10Accuracy=0.5375, over 5894.74 frames. ], batch size: 49, lr: 1.53e-02 +2024-08-06 07:49:31,644 INFO [trainer.py:765] (0/8) Epoch 5, batch 2500, train_loss[loss=3.758, NarTop10Accuracy=0.5574, over 5077.00 frames. ], tot_loss[loss=3.851, NarTop10Accuracy=0.5444, over 5534.22 frames. ], batch size: 6, lr: 1.52e-02 +2024-08-06 07:49:53,739 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 07:49:53,743 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-5.pt +2024-08-06 07:50:52,546 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-13000.pt +2024-08-06 07:50:58,969 INFO [trainer.py:765] (0/8) Epoch 6, batch 100, train_loss[loss=3.766, NarTop10Accuracy=0.5628, over 7183.00 frames. ], tot_loss[loss=3.796, NarTop10Accuracy=0.5584, over 2387.39 frames. ], batch size: 30, lr: 1.42e-02 +2024-08-06 07:51:31,789 INFO [trainer.py:765] (0/8) Epoch 6, batch 200, train_loss[loss=3.693, NarTop10Accuracy=0.5765, over 6813.00 frames. ], tot_loss[loss=3.798, NarTop10Accuracy=0.5565, over 3882.51 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 07:52:04,696 INFO [trainer.py:765] (0/8) Epoch 6, batch 300, train_loss[loss=3.572, NarTop10Accuracy=0.601, over 7170.00 frames. ], tot_loss[loss=3.789, NarTop10Accuracy=0.5577, over 4672.39 frames. ], batch size: 22, lr: 1.41e-02 +2024-08-06 07:52:36,200 INFO [trainer.py:765] (0/8) Epoch 6, batch 400, train_loss[loss=3.608, NarTop10Accuracy=0.5833, over 5201.00 frames. ], tot_loss[loss=3.788, NarTop10Accuracy=0.5575, over 5135.26 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 07:53:06,103 INFO [trainer.py:765] (0/8) Epoch 6, batch 500, train_loss[loss=3.798, NarTop10Accuracy=0.5516, over 6246.00 frames. ], tot_loss[loss=3.777, NarTop10Accuracy=0.5601, over 5398.67 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 07:53:43,286 INFO [trainer.py:765] (0/8) Epoch 6, batch 600, train_loss[loss=3.62, NarTop10Accuracy=0.5927, over 5724.00 frames. ], tot_loss[loss=3.78, NarTop10Accuracy=0.5599, over 5663.40 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 07:54:15,439 INFO [trainer.py:765] (0/8) Epoch 6, batch 700, train_loss[loss=4.058, NarTop10Accuracy=0.5069, over 4982.00 frames. ], tot_loss[loss=3.783, NarTop10Accuracy=0.5591, over 5743.36 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:54:49,526 INFO [trainer.py:765] (0/8) Epoch 6, batch 800, train_loss[loss=3.829, NarTop10Accuracy=0.5426, over 5141.00 frames. ], tot_loss[loss=3.793, NarTop10Accuracy=0.5565, over 5804.15 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:55:21,984 INFO [trainer.py:765] (0/8) Epoch 6, batch 900, train_loss[loss=3.552, NarTop10Accuracy=0.6199, over 6370.00 frames. ], tot_loss[loss=3.782, NarTop10Accuracy=0.5581, over 5822.09 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 07:56:00,804 INFO [trainer.py:765] (0/8) Epoch 6, batch 1000, train_loss[loss=3.55, NarTop10Accuracy=0.6039, over 6241.00 frames. ], tot_loss[loss=3.803, NarTop10Accuracy=0.5542, over 5924.05 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 07:56:34,171 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-14000.pt +2024-08-06 07:56:38,061 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 07:56:44,742 INFO [trainer.py:811] (0/8) Epoch 6, validation: loss=3.634, NarTop10Accuracy=0.5919, over 1907754.00 frames. +2024-08-06 07:56:44,743 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29794MB +2024-08-06 07:56:45,277 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.300e+02 1.714e+02 1.918e+02 2.211e+02 6.360e+02, threshold=3.836e+02, percent-clipped=1.6 +2024-08-06 07:56:46,639 INFO [trainer.py:765] (0/8) Epoch 6, batch 1100, train_loss[loss=3.639, NarTop10Accuracy=0.5988, over 7013.00 frames. ], tot_loss[loss=3.803, NarTop10Accuracy=0.5545, over 5947.35 frames. ], batch size: 17, lr: 1.37e-02 +2024-08-06 07:57:24,888 INFO [trainer.py:765] (0/8) Epoch 6, batch 1200, train_loss[loss=4.046, NarTop10Accuracy=0.5026, over 7308.00 frames. ], tot_loss[loss=3.801, NarTop10Accuracy=0.555, over 5935.69 frames. ], batch size: 30, lr: 1.37e-02 +2024-08-06 07:57:56,612 INFO [trainer.py:765] (0/8) Epoch 6, batch 1300, train_loss[loss=3.539, NarTop10Accuracy=0.599, over 4821.00 frames. ], tot_loss[loss=3.796, NarTop10Accuracy=0.5559, over 5989.63 frames. ], batch size: 6, lr: 1.37e-02 +2024-08-06 07:58:30,736 INFO [trainer.py:765] (0/8) Epoch 6, batch 1400, train_loss[loss=4.025, NarTop10Accuracy=0.4994, over 6203.00 frames. ], tot_loss[loss=3.799, NarTop10Accuracy=0.5552, over 6022.36 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 07:59:00,998 INFO [trainer.py:765] (0/8) Epoch 6, batch 1500, train_loss[loss=4.182, NarTop10Accuracy=0.4805, over 6020.00 frames. ], tot_loss[loss=3.808, NarTop10Accuracy=0.5535, over 5957.77 frames. ], batch size: 48, lr: 1.36e-02 +2024-08-06 07:59:28,933 INFO [trainer.py:765] (0/8) Epoch 6, batch 1600, train_loss[loss=3.648, NarTop10Accuracy=0.5844, over 7129.00 frames. ], tot_loss[loss=3.795, NarTop10Accuracy=0.556, over 5949.86 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 07:59:55,617 INFO [trainer.py:765] (0/8) Epoch 6, batch 1700, train_loss[loss=3.841, NarTop10Accuracy=0.5529, over 6668.00 frames. ], tot_loss[loss=3.787, NarTop10Accuracy=0.558, over 5938.53 frames. ], batch size: 14, lr: 1.35e-02 +2024-08-06 08:00:22,187 INFO [trainer.py:765] (0/8) Epoch 6, batch 1800, train_loss[loss=3.951, NarTop10Accuracy=0.5256, over 7145.00 frames. ], tot_loss[loss=3.781, NarTop10Accuracy=0.5594, over 6004.87 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 08:00:48,794 INFO [trainer.py:765] (0/8) Epoch 6, batch 1900, train_loss[loss=4.029, NarTop10Accuracy=0.505, over 6344.00 frames. ], tot_loss[loss=3.82, NarTop10Accuracy=0.5518, over 6054.42 frames. ], batch size: 49, lr: 1.34e-02 +2024-08-06 08:01:14,461 INFO [trainer.py:765] (0/8) Epoch 6, batch 2000, train_loss[loss=3.903, NarTop10Accuracy=0.5459, over 6469.00 frames. ], tot_loss[loss=3.801, NarTop10Accuracy=0.5551, over 6020.45 frames. ], batch size: 51, lr: 1.34e-02 +2024-08-06 08:01:38,209 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-15000.pt +2024-08-06 08:01:43,133 INFO [trainer.py:765] (0/8) Epoch 6, batch 2100, train_loss[loss=3.615, NarTop10Accuracy=0.5819, over 4832.00 frames. ], tot_loss[loss=3.797, NarTop10Accuracy=0.5559, over 5996.85 frames. ], batch size: 5, lr: 1.33e-02 +2024-08-06 08:02:08,518 INFO [trainer.py:765] (0/8) Epoch 6, batch 2200, train_loss[loss=3.706, NarTop10Accuracy=0.5771, over 7176.00 frames. ], tot_loss[loss=3.801, NarTop10Accuracy=0.555, over 6039.11 frames. ], batch size: 30, lr: 1.33e-02 +2024-08-06 08:02:33,916 INFO [trainer.py:765] (0/8) Epoch 6, batch 2300, train_loss[loss=3.911, NarTop10Accuracy=0.538, over 5772.00 frames. ], tot_loss[loss=3.8, NarTop10Accuracy=0.5552, over 6081.85 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 08:02:58,616 INFO [trainer.py:765] (0/8) Epoch 6, batch 2400, train_loss[loss=3.485, NarTop10Accuracy=0.5952, over 5157.00 frames. ], tot_loss[loss=3.798, NarTop10Accuracy=0.5556, over 5906.70 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 08:03:21,939 INFO [trainer.py:765] (0/8) Epoch 6, batch 2500, train_loss[loss=3.944, NarTop10Accuracy=0.5241, over 5126.00 frames. ], tot_loss[loss=3.782, NarTop10Accuracy=0.5587, over 5553.33 frames. ], batch size: 6, lr: 1.32e-02 +2024-08-06 08:03:43,317 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 08:03:43,319 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-6.pt +2024-08-06 08:04:42,817 INFO [trainer.py:765] (0/8) Epoch 7, batch 100, train_loss[loss=3.71, NarTop10Accuracy=0.5733, over 7101.00 frames. ], tot_loss[loss=3.68, NarTop10Accuracy=0.5808, over 2384.29 frames. ], batch size: 31, lr: 1.23e-02 +2024-08-06 08:05:18,347 INFO [trainer.py:765] (0/8) Epoch 7, batch 200, train_loss[loss=4.046, NarTop10Accuracy=0.5141, over 6905.00 frames. ], tot_loss[loss=3.693, NarTop10Accuracy=0.5778, over 3882.60 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 08:05:46,773 INFO [trainer.py:765] (0/8) Epoch 7, batch 300, train_loss[loss=3.413, NarTop10Accuracy=0.637, over 7017.00 frames. ], tot_loss[loss=3.708, NarTop10Accuracy=0.5749, over 4679.21 frames. ], batch size: 22, lr: 1.23e-02 +2024-08-06 08:06:22,091 INFO [trainer.py:765] (0/8) Epoch 7, batch 400, train_loss[loss=3.999, NarTop10Accuracy=0.5076, over 5019.00 frames. ], tot_loss[loss=3.714, NarTop10Accuracy=0.5732, over 5132.09 frames. ], batch size: 7, lr: 1.22e-02 +2024-08-06 08:06:52,315 INFO [trainer.py:765] (0/8) Epoch 7, batch 500, train_loss[loss=3.674, NarTop10Accuracy=0.5838, over 6192.00 frames. ], tot_loss[loss=3.712, NarTop10Accuracy=0.5736, over 5418.25 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 08:06:56,085 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-16000.pt +2024-08-06 08:06:59,575 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 08:07:06,251 INFO [trainer.py:811] (0/8) Epoch 7, validation: loss=3.56, NarTop10Accuracy=0.6069, over 1907754.00 frames. +2024-08-06 08:07:06,252 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29794MB +2024-08-06 08:07:06,837 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 1.760e+02 1.958e+02 2.227e+02 5.399e+02, threshold=3.916e+02, percent-clipped=0.8 +2024-08-06 08:07:33,151 INFO [trainer.py:765] (0/8) Epoch 7, batch 600, train_loss[loss=3.501, NarTop10Accuracy=0.6139, over 5778.00 frames. ], tot_loss[loss=3.714, NarTop10Accuracy=0.5733, over 5680.18 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 08:08:11,333 INFO [trainer.py:765] (0/8) Epoch 7, batch 700, train_loss[loss=3.457, NarTop10Accuracy=0.6335, over 5219.00 frames. ], tot_loss[loss=3.719, NarTop10Accuracy=0.5722, over 5742.75 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 08:08:45,557 INFO [trainer.py:765] (0/8) Epoch 7, batch 800, train_loss[loss=3.584, NarTop10Accuracy=0.6009, over 5006.00 frames. ], tot_loss[loss=3.707, NarTop10Accuracy=0.5745, over 5817.44 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 08:09:17,739 INFO [trainer.py:765] (0/8) Epoch 7, batch 900, train_loss[loss=3.603, NarTop10Accuracy=0.5988, over 6320.00 frames. ], tot_loss[loss=3.713, NarTop10Accuracy=0.5729, over 5828.88 frames. ], batch size: 13, lr: 1.21e-02 +2024-08-06 08:09:54,191 INFO [trainer.py:765] (0/8) Epoch 7, batch 1000, train_loss[loss=3.798, NarTop10Accuracy=0.553, over 6254.00 frames. ], tot_loss[loss=3.715, NarTop10Accuracy=0.573, over 5931.26 frames. ], batch size: 13, lr: 1.20e-02 +2024-08-06 08:10:29,570 INFO [trainer.py:765] (0/8) Epoch 7, batch 1100, train_loss[loss=3.548, NarTop10Accuracy=0.6054, over 6857.00 frames. ], tot_loss[loss=3.715, NarTop10Accuracy=0.5727, over 5942.19 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 08:11:02,491 INFO [trainer.py:765] (0/8) Epoch 7, batch 1200, train_loss[loss=3.97, NarTop10Accuracy=0.5151, over 7255.00 frames. ], tot_loss[loss=3.712, NarTop10Accuracy=0.5729, over 5938.27 frames. ], batch size: 32, lr: 1.20e-02 +2024-08-06 08:11:33,447 INFO [trainer.py:765] (0/8) Epoch 7, batch 1300, train_loss[loss=3.198, NarTop10Accuracy=0.6754, over 4914.00 frames. ], tot_loss[loss=3.715, NarTop10Accuracy=0.5722, over 6008.26 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 08:12:10,912 INFO [trainer.py:765] (0/8) Epoch 7, batch 1400, train_loss[loss=3.559, NarTop10Accuracy=0.6001, over 6241.00 frames. ], tot_loss[loss=3.717, NarTop10Accuracy=0.572, over 6036.16 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 08:12:42,109 INFO [trainer.py:765] (0/8) Epoch 7, batch 1500, train_loss[loss=3.78, NarTop10Accuracy=0.5628, over 5622.00 frames. ], tot_loss[loss=3.718, NarTop10Accuracy=0.5721, over 5979.06 frames. ], batch size: 48, lr: 1.19e-02 +2024-08-06 08:12:45,668 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-17000.pt +2024-08-06 08:13:13,237 INFO [trainer.py:765] (0/8) Epoch 7, batch 1600, train_loss[loss=3.596, NarTop10Accuracy=0.5943, over 6905.00 frames. ], tot_loss[loss=3.715, NarTop10Accuracy=0.5726, over 5954.22 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:13:40,016 INFO [trainer.py:765] (0/8) Epoch 7, batch 1700, train_loss[loss=3.771, NarTop10Accuracy=0.5594, over 6304.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.5711, over 5943.68 frames. ], batch size: 13, lr: 1.18e-02 +2024-08-06 08:14:06,584 INFO [trainer.py:765] (0/8) Epoch 7, batch 1800, train_loss[loss=3.773, NarTop10Accuracy=0.5594, over 7108.00 frames. ], tot_loss[loss=3.726, NarTop10Accuracy=0.5702, over 6021.24 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:14:33,223 INFO [trainer.py:765] (0/8) Epoch 7, batch 1900, train_loss[loss=4.128, NarTop10Accuracy=0.4834, over 6151.00 frames. ], tot_loss[loss=3.732, NarTop10Accuracy=0.5693, over 6051.65 frames. ], batch size: 49, lr: 1.17e-02 +2024-08-06 08:14:58,994 INFO [trainer.py:765] (0/8) Epoch 7, batch 2000, train_loss[loss=3.703, NarTop10Accuracy=0.5799, over 6365.00 frames. ], tot_loss[loss=3.723, NarTop10Accuracy=0.5713, over 6027.13 frames. ], batch size: 49, lr: 1.17e-02 +2024-08-06 08:15:24,423 INFO [trainer.py:765] (0/8) Epoch 7, batch 2100, train_loss[loss=3.872, NarTop10Accuracy=0.5542, over 3951.00 frames. ], tot_loss[loss=3.726, NarTop10Accuracy=0.5706, over 6003.19 frames. ], batch size: 4, lr: 1.17e-02 +2024-08-06 08:15:49,960 INFO [trainer.py:765] (0/8) Epoch 7, batch 2200, train_loss[loss=3.922, NarTop10Accuracy=0.5381, over 7523.00 frames. ], tot_loss[loss=3.746, NarTop10Accuracy=0.5667, over 6046.09 frames. ], batch size: 31, lr: 1.17e-02 +2024-08-06 08:16:15,490 INFO [trainer.py:765] (0/8) Epoch 7, batch 2300, train_loss[loss=4.085, NarTop10Accuracy=0.5026, over 5875.00 frames. ], tot_loss[loss=3.742, NarTop10Accuracy=0.5673, over 6073.98 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 08:16:40,319 INFO [trainer.py:765] (0/8) Epoch 7, batch 2400, train_loss[loss=4.011, NarTop10Accuracy=0.521, over 6125.00 frames. ], tot_loss[loss=3.739, NarTop10Accuracy=0.5678, over 5886.93 frames. ], batch size: 49, lr: 1.16e-02 +2024-08-06 08:17:03,739 INFO [trainer.py:765] (0/8) Epoch 7, batch 2500, train_loss[loss=3.48, NarTop10Accuracy=0.6075, over 5034.00 frames. ], tot_loss[loss=3.717, NarTop10Accuracy=0.5717, over 5548.56 frames. ], batch size: 6, lr: 1.16e-02 +2024-08-06 08:17:06,843 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-18000.pt +2024-08-06 08:17:10,507 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 08:17:17,432 INFO [trainer.py:811] (0/8) Epoch 7, validation: loss=3.591, NarTop10Accuracy=0.6002, over 1907754.00 frames. +2024-08-06 08:17:17,433 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 29794MB +2024-08-06 08:17:17,901 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.794e+02 1.981e+02 2.246e+02 4.644e+02, threshold=3.962e+02, percent-clipped=1.0 +2024-08-06 08:17:35,456 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 08:17:35,459 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-7.pt +2024-08-06 08:18:36,194 INFO [trainer.py:765] (0/8) Epoch 8, batch 100, train_loss[loss=3.696, NarTop10Accuracy=0.5774, over 7227.00 frames. ], tot_loss[loss=3.662, NarTop10Accuracy=0.5843, over 2373.95 frames. ], batch size: 31, lr: 1.09e-02 +2024-08-06 08:19:15,020 INFO [trainer.py:765] (0/8) Epoch 8, batch 200, train_loss[loss=3.474, NarTop10Accuracy=0.616, over 6771.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5828, over 3857.37 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 08:19:43,561 INFO [trainer.py:765] (0/8) Epoch 8, batch 300, train_loss[loss=3.674, NarTop10Accuracy=0.5813, over 7306.00 frames. ], tot_loss[loss=3.659, NarTop10Accuracy=0.5855, over 4678.62 frames. ], batch size: 22, lr: 1.08e-02 +2024-08-06 08:20:16,269 INFO [trainer.py:765] (0/8) Epoch 8, batch 400, train_loss[loss=3.518, NarTop10Accuracy=0.6075, over 5025.00 frames. ], tot_loss[loss=3.659, NarTop10Accuracy=0.5852, over 5137.89 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 08:20:48,421 INFO [trainer.py:765] (0/8) Epoch 8, batch 500, train_loss[loss=3.312, NarTop10Accuracy=0.6541, over 6240.00 frames. ], tot_loss[loss=3.653, NarTop10Accuracy=0.5862, over 5401.92 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 08:21:23,737 INFO [trainer.py:765] (0/8) Epoch 8, batch 600, train_loss[loss=3.518, NarTop10Accuracy=0.6197, over 5738.00 frames. ], tot_loss[loss=3.663, NarTop10Accuracy=0.5839, over 5667.19 frames. ], batch size: 9, lr: 1.07e-02 +2024-08-06 08:21:57,607 INFO [trainer.py:765] (0/8) Epoch 8, batch 700, train_loss[loss=3.987, NarTop10Accuracy=0.5082, over 4878.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.5817, over 5750.33 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:22:27,342 INFO [trainer.py:765] (0/8) Epoch 8, batch 800, train_loss[loss=3.516, NarTop10Accuracy=0.6262, over 4999.00 frames. ], tot_loss[loss=3.672, NarTop10Accuracy=0.5819, over 5795.84 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:23:06,892 INFO [trainer.py:765] (0/8) Epoch 8, batch 900, train_loss[loss=3.176, NarTop10Accuracy=0.6618, over 6355.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5835, over 5817.36 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 08:23:16,216 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-19000.pt +2024-08-06 08:23:42,943 INFO [trainer.py:765] (0/8) Epoch 8, batch 1000, train_loss[loss=3.636, NarTop10Accuracy=0.5943, over 6188.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5835, over 5912.06 frames. ], batch size: 13, lr: 1.06e-02 +2024-08-06 08:24:15,105 INFO [trainer.py:765] (0/8) Epoch 8, batch 1100, train_loss[loss=3.734, NarTop10Accuracy=0.5818, over 6773.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.5836, over 5936.16 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 08:24:57,340 INFO [trainer.py:765] (0/8) Epoch 8, batch 1200, train_loss[loss=3.464, NarTop10Accuracy=0.6215, over 7169.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5825, over 5944.29 frames. ], batch size: 30, lr: 1.06e-02 +2024-08-06 08:25:26,605 INFO [trainer.py:765] (0/8) Epoch 8, batch 1300, train_loss[loss=3.823, NarTop10Accuracy=0.5542, over 5229.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5833, over 6001.06 frames. ], batch size: 6, lr: 1.06e-02 +2024-08-06 08:26:00,605 INFO [trainer.py:765] (0/8) Epoch 8, batch 1400, train_loss[loss=3.892, NarTop10Accuracy=0.551, over 6098.00 frames. ], tot_loss[loss=3.682, NarTop10Accuracy=0.5794, over 6028.09 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 08:26:28,987 INFO [trainer.py:765] (0/8) Epoch 8, batch 1500, train_loss[loss=3.722, NarTop10Accuracy=0.5765, over 6464.00 frames. ], tot_loss[loss=3.677, NarTop10Accuracy=0.5802, over 5958.48 frames. ], batch size: 49, lr: 1.05e-02 +2024-08-06 08:26:56,933 INFO [trainer.py:765] (0/8) Epoch 8, batch 1600, train_loss[loss=3.731, NarTop10Accuracy=0.5692, over 7239.00 frames. ], tot_loss[loss=3.676, NarTop10Accuracy=0.5804, over 5946.21 frames. ], batch size: 22, lr: 1.05e-02 +2024-08-06 08:27:23,763 INFO [trainer.py:765] (0/8) Epoch 8, batch 1700, train_loss[loss=3.467, NarTop10Accuracy=0.6268, over 6320.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5816, over 5937.85 frames. ], batch size: 13, lr: 1.05e-02 +2024-08-06 08:27:50,462 INFO [trainer.py:765] (0/8) Epoch 8, batch 1800, train_loss[loss=3.611, NarTop10Accuracy=0.5911, over 7052.00 frames. ], tot_loss[loss=3.663, NarTop10Accuracy=0.5834, over 5994.76 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 08:28:17,181 INFO [trainer.py:765] (0/8) Epoch 8, batch 1900, train_loss[loss=4.195, NarTop10Accuracy=0.4752, over 6141.00 frames. ], tot_loss[loss=3.66, NarTop10Accuracy=0.5843, over 6023.73 frames. ], batch size: 49, lr: 1.04e-02 +2024-08-06 08:28:25,164 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-20000.pt +2024-08-06 08:28:28,748 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 08:28:35,290 INFO [trainer.py:811] (0/8) Epoch 8, validation: loss=3.507, NarTop10Accuracy=0.6181, over 1907754.00 frames. +2024-08-06 08:28:35,291 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 08:28:35,795 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.304e+02 1.789e+02 1.988e+02 2.230e+02 4.452e+02, threshold=3.975e+02, percent-clipped=0.5 +2024-08-06 08:28:52,983 INFO [trainer.py:765] (0/8) Epoch 8, batch 2000, train_loss[loss=3.849, NarTop10Accuracy=0.5466, over 6268.00 frames. ], tot_loss[loss=3.662, NarTop10Accuracy=0.5838, over 6010.19 frames. ], batch size: 48, lr: 1.04e-02 +2024-08-06 08:29:18,485 INFO [trainer.py:765] (0/8) Epoch 8, batch 2100, train_loss[loss=3.569, NarTop10Accuracy=0.5995, over 4763.00 frames. ], tot_loss[loss=3.667, NarTop10Accuracy=0.583, over 6022.36 frames. ], batch size: 5, lr: 1.04e-02 +2024-08-06 08:29:43,790 INFO [trainer.py:765] (0/8) Epoch 8, batch 2200, train_loss[loss=3.836, NarTop10Accuracy=0.5446, over 7488.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5833, over 6063.11 frames. ], batch size: 30, lr: 1.03e-02 +2024-08-06 08:30:09,134 INFO [trainer.py:765] (0/8) Epoch 8, batch 2300, train_loss[loss=3.471, NarTop10Accuracy=0.6154, over 5858.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5824, over 6080.13 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 08:30:33,791 INFO [trainer.py:765] (0/8) Epoch 8, batch 2400, train_loss[loss=3.797, NarTop10Accuracy=0.565, over 6106.00 frames. ], tot_loss[loss=3.684, NarTop10Accuracy=0.5793, over 5879.89 frames. ], batch size: 49, lr: 1.03e-02 +2024-08-06 08:30:57,139 INFO [trainer.py:765] (0/8) Epoch 8, batch 2500, train_loss[loss=3.598, NarTop10Accuracy=0.5972, over 4948.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.5824, over 5547.01 frames. ], batch size: 6, lr: 1.03e-02 +2024-08-06 08:31:18,653 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 08:31:18,660 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-8.pt +2024-08-06 08:32:19,098 INFO [trainer.py:765] (0/8) Epoch 9, batch 100, train_loss[loss=3.86, NarTop10Accuracy=0.5517, over 7244.00 frames. ], tot_loss[loss=3.607, NarTop10Accuracy=0.5953, over 2383.37 frames. ], batch size: 31, lr: 9.71e-03 +2024-08-06 08:32:51,461 INFO [trainer.py:765] (0/8) Epoch 9, batch 200, train_loss[loss=3.394, NarTop10Accuracy=0.6332, over 6838.00 frames. ], tot_loss[loss=3.584, NarTop10Accuracy=0.6004, over 3857.41 frames. ], batch size: 17, lr: 9.69e-03 +2024-08-06 08:33:27,115 INFO [trainer.py:765] (0/8) Epoch 9, batch 300, train_loss[loss=3.586, NarTop10Accuracy=0.5872, over 7265.00 frames. ], tot_loss[loss=3.586, NarTop10Accuracy=0.6002, over 4673.13 frames. ], batch size: 23, lr: 9.67e-03 +2024-08-06 08:33:44,054 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-21000.pt +2024-08-06 08:34:00,964 INFO [trainer.py:765] (0/8) Epoch 9, batch 400, train_loss[loss=3.459, NarTop10Accuracy=0.6349, over 5188.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6025, over 5123.22 frames. ], batch size: 7, lr: 9.64e-03 +2024-08-06 08:34:32,880 INFO [trainer.py:765] (0/8) Epoch 9, batch 500, train_loss[loss=3.84, NarTop10Accuracy=0.5522, over 6004.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6031, over 5425.79 frames. ], batch size: 11, lr: 9.62e-03 +2024-08-06 08:35:07,498 INFO [trainer.py:765] (0/8) Epoch 9, batch 600, train_loss[loss=3.64, NarTop10Accuracy=0.5954, over 5720.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.6017, over 5681.69 frames. ], batch size: 9, lr: 9.60e-03 +2024-08-06 08:35:42,824 INFO [trainer.py:765] (0/8) Epoch 9, batch 700, train_loss[loss=3.922, NarTop10Accuracy=0.5215, over 4245.00 frames. ], tot_loss[loss=3.593, NarTop10Accuracy=0.5986, over 5741.95 frames. ], batch size: 5, lr: 9.58e-03 +2024-08-06 08:36:14,822 INFO [trainer.py:765] (0/8) Epoch 9, batch 800, train_loss[loss=3.575, NarTop10Accuracy=0.6104, over 4300.00 frames. ], tot_loss[loss=3.613, NarTop10Accuracy=0.5941, over 5816.00 frames. ], batch size: 5, lr: 9.56e-03 +2024-08-06 08:36:46,455 INFO [trainer.py:765] (0/8) Epoch 9, batch 900, train_loss[loss=3.455, NarTop10Accuracy=0.6297, over 6230.00 frames. ], tot_loss[loss=3.619, NarTop10Accuracy=0.5931, over 5833.15 frames. ], batch size: 13, lr: 9.54e-03 +2024-08-06 08:37:26,564 INFO [trainer.py:765] (0/8) Epoch 9, batch 1000, train_loss[loss=3.451, NarTop10Accuracy=0.6105, over 6207.00 frames. ], tot_loss[loss=3.623, NarTop10Accuracy=0.5917, over 5924.30 frames. ], batch size: 13, lr: 9.52e-03 +2024-08-06 08:37:59,421 INFO [trainer.py:765] (0/8) Epoch 9, batch 1100, train_loss[loss=3.868, NarTop10Accuracy=0.5473, over 6856.00 frames. ], tot_loss[loss=3.64, NarTop10Accuracy=0.5886, over 5971.38 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 08:38:31,995 INFO [trainer.py:765] (0/8) Epoch 9, batch 1200, train_loss[loss=3.725, NarTop10Accuracy=0.5711, over 7134.00 frames. ], tot_loss[loss=3.644, NarTop10Accuracy=0.5878, over 5945.82 frames. ], batch size: 30, lr: 9.48e-03 +2024-08-06 08:39:11,841 INFO [trainer.py:765] (0/8) Epoch 9, batch 1300, train_loss[loss=3.837, NarTop10Accuracy=0.5482, over 5159.00 frames. ], tot_loss[loss=3.64, NarTop10Accuracy=0.5879, over 6022.17 frames. ], batch size: 6, lr: 9.46e-03 +2024-08-06 08:39:27,116 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-22000.pt +2024-08-06 08:39:30,597 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 08:39:38,196 INFO [trainer.py:811] (0/8) Epoch 9, validation: loss=3.495, NarTop10Accuracy=0.6214, over 1907754.00 frames. +2024-08-06 08:39:38,197 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 08:39:38,758 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 1.781e+02 1.970e+02 2.189e+02 6.315e+02, threshold=3.940e+02, percent-clipped=0.6 +2024-08-06 08:39:52,278 INFO [trainer.py:765] (0/8) Epoch 9, batch 1400, train_loss[loss=3.58, NarTop10Accuracy=0.6019, over 6107.00 frames. ], tot_loss[loss=3.631, NarTop10Accuracy=0.5901, over 6032.03 frames. ], batch size: 11, lr: 9.43e-03 +2024-08-06 08:40:22,332 INFO [trainer.py:765] (0/8) Epoch 9, batch 1500, train_loss[loss=3.918, NarTop10Accuracy=0.5263, over 6152.00 frames. ], tot_loss[loss=3.634, NarTop10Accuracy=0.5898, over 5972.55 frames. ], batch size: 50, lr: 9.41e-03 +2024-08-06 08:40:50,368 INFO [trainer.py:765] (0/8) Epoch 9, batch 1600, train_loss[loss=3.872, NarTop10Accuracy=0.5455, over 7338.00 frames. ], tot_loss[loss=3.634, NarTop10Accuracy=0.5898, over 5959.86 frames. ], batch size: 22, lr: 9.39e-03 +2024-08-06 08:41:17,152 INFO [trainer.py:765] (0/8) Epoch 9, batch 1700, train_loss[loss=3.459, NarTop10Accuracy=0.6202, over 6248.00 frames. ], tot_loss[loss=3.642, NarTop10Accuracy=0.5884, over 5939.35 frames. ], batch size: 13, lr: 9.37e-03 +2024-08-06 08:41:43,812 INFO [trainer.py:765] (0/8) Epoch 9, batch 1800, train_loss[loss=4.052, NarTop10Accuracy=0.4983, over 7041.00 frames. ], tot_loss[loss=3.633, NarTop10Accuracy=0.5898, over 5998.82 frames. ], batch size: 22, lr: 9.35e-03 +2024-08-06 08:42:10,496 INFO [trainer.py:765] (0/8) Epoch 9, batch 1900, train_loss[loss=3.715, NarTop10Accuracy=0.5691, over 5946.00 frames. ], tot_loss[loss=3.637, NarTop10Accuracy=0.5893, over 6036.97 frames. ], batch size: 49, lr: 9.33e-03 +2024-08-06 08:42:36,203 INFO [trainer.py:765] (0/8) Epoch 9, batch 2000, train_loss[loss=3.916, NarTop10Accuracy=0.5382, over 6131.00 frames. ], tot_loss[loss=3.642, NarTop10Accuracy=0.5883, over 6009.19 frames. ], batch size: 50, lr: 9.31e-03 +2024-08-06 08:43:01,668 INFO [trainer.py:765] (0/8) Epoch 9, batch 2100, train_loss[loss=3.36, NarTop10Accuracy=0.6465, over 3969.00 frames. ], tot_loss[loss=3.633, NarTop10Accuracy=0.5903, over 5987.78 frames. ], batch size: 4, lr: 9.30e-03 +2024-08-06 08:43:27,179 INFO [trainer.py:765] (0/8) Epoch 9, batch 2200, train_loss[loss=3.549, NarTop10Accuracy=0.6043, over 7247.00 frames. ], tot_loss[loss=3.638, NarTop10Accuracy=0.589, over 6034.43 frames. ], batch size: 30, lr: 9.28e-03 +2024-08-06 08:43:52,671 INFO [trainer.py:765] (0/8) Epoch 9, batch 2300, train_loss[loss=3.575, NarTop10Accuracy=0.5945, over 5779.00 frames. ], tot_loss[loss=3.655, NarTop10Accuracy=0.5856, over 6074.03 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 08:44:05,503 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-23000.pt +2024-08-06 08:44:20,550 INFO [trainer.py:765] (0/8) Epoch 9, batch 2400, train_loss[loss=3.688, NarTop10Accuracy=0.5842, over 6015.00 frames. ], tot_loss[loss=3.659, NarTop10Accuracy=0.5847, over 5903.28 frames. ], batch size: 49, lr: 9.24e-03 +2024-08-06 08:44:44,002 INFO [trainer.py:765] (0/8) Epoch 9, batch 2500, train_loss[loss=3.972, NarTop10Accuracy=0.5312, over 4333.00 frames. ], tot_loss[loss=3.624, NarTop10Accuracy=0.5912, over 5553.37 frames. ], batch size: 5, lr: 9.22e-03 +2024-08-06 08:45:05,295 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 08:45:05,298 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-9.pt +2024-08-06 08:46:09,064 INFO [trainer.py:765] (0/8) Epoch 10, batch 100, train_loss[loss=3.477, NarTop10Accuracy=0.6214, over 7408.00 frames. ], tot_loss[loss=3.588, NarTop10Accuracy=0.5995, over 2351.73 frames. ], batch size: 31, lr: 8.75e-03 +2024-08-06 08:46:44,075 INFO [trainer.py:765] (0/8) Epoch 10, batch 200, train_loss[loss=3.558, NarTop10Accuracy=0.6102, over 6961.00 frames. ], tot_loss[loss=3.573, NarTop10Accuracy=0.6026, over 3849.94 frames. ], batch size: 17, lr: 8.73e-03 +2024-08-06 08:47:14,444 INFO [trainer.py:765] (0/8) Epoch 10, batch 300, train_loss[loss=3.661, NarTop10Accuracy=0.5858, over 7158.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.6019, over 4654.40 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 08:47:46,120 INFO [trainer.py:765] (0/8) Epoch 10, batch 400, train_loss[loss=3.983, NarTop10Accuracy=0.5173, over 5216.00 frames. ], tot_loss[loss=3.586, NarTop10Accuracy=0.6, over 5117.58 frames. ], batch size: 7, lr: 8.70e-03 +2024-08-06 08:48:22,371 INFO [trainer.py:765] (0/8) Epoch 10, batch 500, train_loss[loss=3.296, NarTop10Accuracy=0.6624, over 5996.00 frames. ], tot_loss[loss=3.579, NarTop10Accuracy=0.6016, over 5391.67 frames. ], batch size: 11, lr: 8.68e-03 +2024-08-06 08:48:53,460 INFO [trainer.py:765] (0/8) Epoch 10, batch 600, train_loss[loss=3.585, NarTop10Accuracy=0.5987, over 5835.00 frames. ], tot_loss[loss=3.587, NarTop10Accuracy=0.5997, over 5650.31 frames. ], batch size: 9, lr: 8.66e-03 +2024-08-06 08:49:26,707 INFO [trainer.py:765] (0/8) Epoch 10, batch 700, train_loss[loss=3.334, NarTop10Accuracy=0.6538, over 5053.00 frames. ], tot_loss[loss=3.595, NarTop10Accuracy=0.5979, over 5741.09 frames. ], batch size: 6, lr: 8.65e-03 +2024-08-06 08:49:49,164 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-24000.pt +2024-08-06 08:49:53,523 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 08:50:00,983 INFO [trainer.py:811] (0/8) Epoch 10, validation: loss=3.46, NarTop10Accuracy=0.6279, over 1907754.00 frames. +2024-08-06 08:50:00,984 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 08:50:01,725 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.353e+02 1.818e+02 1.985e+02 2.213e+02 4.843e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 08:50:09,801 INFO [trainer.py:765] (0/8) Epoch 10, batch 800, train_loss[loss=3.514, NarTop10Accuracy=0.623, over 4311.00 frames. ], tot_loss[loss=3.595, NarTop10Accuracy=0.5978, over 5785.74 frames. ], batch size: 5, lr: 8.63e-03 +2024-08-06 08:50:42,891 INFO [trainer.py:765] (0/8) Epoch 10, batch 900, train_loss[loss=3.509, NarTop10Accuracy=0.6133, over 6149.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6025, over 5802.10 frames. ], batch size: 13, lr: 8.61e-03 +2024-08-06 08:51:18,460 INFO [trainer.py:765] (0/8) Epoch 10, batch 1000, train_loss[loss=3.806, NarTop10Accuracy=0.5503, over 6758.00 frames. ], tot_loss[loss=3.586, NarTop10Accuracy=0.5998, over 5903.04 frames. ], batch size: 14, lr: 8.59e-03 +2024-08-06 08:51:57,363 INFO [trainer.py:765] (0/8) Epoch 10, batch 1100, train_loss[loss=3.428, NarTop10Accuracy=0.6365, over 6821.00 frames. ], tot_loss[loss=3.603, NarTop10Accuracy=0.5958, over 5935.00 frames. ], batch size: 17, lr: 8.58e-03 +2024-08-06 08:52:32,048 INFO [trainer.py:765] (0/8) Epoch 10, batch 1200, train_loss[loss=3.485, NarTop10Accuracy=0.6121, over 7264.00 frames. ], tot_loss[loss=3.6, NarTop10Accuracy=0.5959, over 5941.85 frames. ], batch size: 31, lr: 8.56e-03 +2024-08-06 08:53:06,607 INFO [trainer.py:765] (0/8) Epoch 10, batch 1300, train_loss[loss=3.453, NarTop10Accuracy=0.6196, over 5114.00 frames. ], tot_loss[loss=3.599, NarTop10Accuracy=0.5964, over 6019.00 frames. ], batch size: 6, lr: 8.54e-03 +2024-08-06 08:53:46,881 INFO [trainer.py:765] (0/8) Epoch 10, batch 1400, train_loss[loss=3.609, NarTop10Accuracy=0.5969, over 6199.00 frames. ], tot_loss[loss=3.616, NarTop10Accuracy=0.5928, over 6033.05 frames. ], batch size: 11, lr: 8.53e-03 +2024-08-06 08:54:17,501 INFO [trainer.py:765] (0/8) Epoch 10, batch 1500, train_loss[loss=3.629, NarTop10Accuracy=0.5957, over 6407.00 frames. ], tot_loss[loss=3.609, NarTop10Accuracy=0.5947, over 5977.42 frames. ], batch size: 48, lr: 8.51e-03 +2024-08-06 08:54:45,526 INFO [trainer.py:765] (0/8) Epoch 10, batch 1600, train_loss[loss=3.567, NarTop10Accuracy=0.6043, over 7212.00 frames. ], tot_loss[loss=3.61, NarTop10Accuracy=0.5942, over 5955.29 frames. ], batch size: 22, lr: 8.49e-03 +2024-08-06 08:55:12,300 INFO [trainer.py:765] (0/8) Epoch 10, batch 1700, train_loss[loss=3.432, NarTop10Accuracy=0.625, over 6419.00 frames. ], tot_loss[loss=3.612, NarTop10Accuracy=0.5942, over 5947.06 frames. ], batch size: 13, lr: 8.48e-03 +2024-08-06 08:55:30,798 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-25000.pt +2024-08-06 08:55:41,989 INFO [trainer.py:765] (0/8) Epoch 10, batch 1800, train_loss[loss=3.685, NarTop10Accuracy=0.5767, over 7196.00 frames. ], tot_loss[loss=3.606, NarTop10Accuracy=0.5954, over 5993.31 frames. ], batch size: 22, lr: 8.46e-03 +2024-08-06 08:56:08,572 INFO [trainer.py:765] (0/8) Epoch 10, batch 1900, train_loss[loss=4.071, NarTop10Accuracy=0.5032, over 5872.00 frames. ], tot_loss[loss=3.603, NarTop10Accuracy=0.5957, over 6022.31 frames. ], batch size: 48, lr: 8.45e-03 +2024-08-06 08:56:34,287 INFO [trainer.py:765] (0/8) Epoch 10, batch 2000, train_loss[loss=3.722, NarTop10Accuracy=0.5667, over 6291.00 frames. ], tot_loss[loss=3.606, NarTop10Accuracy=0.5953, over 6005.84 frames. ], batch size: 48, lr: 8.43e-03 +2024-08-06 08:56:59,751 INFO [trainer.py:765] (0/8) Epoch 10, batch 2100, train_loss[loss=3.376, NarTop10Accuracy=0.6549, over 3931.00 frames. ], tot_loss[loss=3.609, NarTop10Accuracy=0.5946, over 5973.46 frames. ], batch size: 4, lr: 8.41e-03 +2024-08-06 08:57:25,279 INFO [trainer.py:765] (0/8) Epoch 10, batch 2200, train_loss[loss=3.725, NarTop10Accuracy=0.5691, over 7294.00 frames. ], tot_loss[loss=3.606, NarTop10Accuracy=0.5949, over 6029.47 frames. ], batch size: 31, lr: 8.40e-03 +2024-08-06 08:57:50,682 INFO [trainer.py:765] (0/8) Epoch 10, batch 2300, train_loss[loss=3.448, NarTop10Accuracy=0.6167, over 5719.00 frames. ], tot_loss[loss=3.617, NarTop10Accuracy=0.5929, over 6063.88 frames. ], batch size: 9, lr: 8.38e-03 +2024-08-06 08:58:15,344 INFO [trainer.py:765] (0/8) Epoch 10, batch 2400, train_loss[loss=3.615, NarTop10Accuracy=0.6009, over 6453.00 frames. ], tot_loss[loss=3.622, NarTop10Accuracy=0.5921, over 5895.18 frames. ], batch size: 51, lr: 8.37e-03 +2024-08-06 08:58:38,808 INFO [trainer.py:765] (0/8) Epoch 10, batch 2500, train_loss[loss=3.404, NarTop10Accuracy=0.6459, over 5151.00 frames. ], tot_loss[loss=3.613, NarTop10Accuracy=0.5937, over 5535.09 frames. ], batch size: 6, lr: 8.35e-03 +2024-08-06 08:59:00,223 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 08:59:00,226 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-10.pt +2024-08-06 09:00:03,681 INFO [trainer.py:765] (0/8) Epoch 11, batch 100, train_loss[loss=3.437, NarTop10Accuracy=0.6268, over 7166.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.611, over 2362.90 frames. ], batch size: 30, lr: 7.96e-03 +2024-08-06 09:00:30,916 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-26000.pt +2024-08-06 09:00:34,445 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 09:00:41,217 INFO [trainer.py:811] (0/8) Epoch 11, validation: loss=3.404, NarTop10Accuracy=0.6396, over 1907754.00 frames. +2024-08-06 09:00:41,218 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 09:00:41,774 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 1.800e+02 1.980e+02 2.200e+02 4.491e+02, threshold=3.959e+02, percent-clipped=0.2 +2024-08-06 09:00:46,860 INFO [trainer.py:765] (0/8) Epoch 11, batch 200, train_loss[loss=3.882, NarTop10Accuracy=0.5397, over 6881.00 frames. ], tot_loss[loss=3.529, NarTop10Accuracy=0.6121, over 3860.45 frames. ], batch size: 17, lr: 7.94e-03 +2024-08-06 09:01:17,854 INFO [trainer.py:765] (0/8) Epoch 11, batch 300, train_loss[loss=3.279, NarTop10Accuracy=0.664, over 7183.00 frames. ], tot_loss[loss=3.536, NarTop10Accuracy=0.6106, over 4669.68 frames. ], batch size: 22, lr: 7.93e-03 +2024-08-06 09:01:50,534 INFO [trainer.py:765] (0/8) Epoch 11, batch 400, train_loss[loss=3.23, NarTop10Accuracy=0.6779, over 5099.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.6111, over 5109.41 frames. ], batch size: 7, lr: 7.91e-03 +2024-08-06 09:02:21,239 INFO [trainer.py:765] (0/8) Epoch 11, batch 500, train_loss[loss=3.391, NarTop10Accuracy=0.6398, over 6167.00 frames. ], tot_loss[loss=3.535, NarTop10Accuracy=0.6111, over 5394.60 frames. ], batch size: 11, lr: 7.90e-03 +2024-08-06 09:03:01,743 INFO [trainer.py:765] (0/8) Epoch 11, batch 600, train_loss[loss=3.516, NarTop10Accuracy=0.6248, over 5799.00 frames. ], tot_loss[loss=3.54, NarTop10Accuracy=0.6097, over 5672.92 frames. ], batch size: 9, lr: 7.88e-03 +2024-08-06 09:03:38,237 INFO [trainer.py:765] (0/8) Epoch 11, batch 700, train_loss[loss=3.289, NarTop10Accuracy=0.6565, over 5103.00 frames. ], tot_loss[loss=3.54, NarTop10Accuracy=0.6098, over 5746.87 frames. ], batch size: 6, lr: 7.87e-03 +2024-08-06 09:04:10,756 INFO [trainer.py:765] (0/8) Epoch 11, batch 800, train_loss[loss=3.281, NarTop10Accuracy=0.6708, over 5065.00 frames. ], tot_loss[loss=3.562, NarTop10Accuracy=0.6057, over 5799.96 frames. ], batch size: 6, lr: 7.86e-03 +2024-08-06 09:04:50,084 INFO [trainer.py:765] (0/8) Epoch 11, batch 900, train_loss[loss=3.419, NarTop10Accuracy=0.6239, over 6649.00 frames. ], tot_loss[loss=3.548, NarTop10Accuracy=0.6077, over 5818.17 frames. ], batch size: 14, lr: 7.84e-03 +2024-08-06 09:05:27,013 INFO [trainer.py:765] (0/8) Epoch 11, batch 1000, train_loss[loss=3.426, NarTop10Accuracy=0.6302, over 6308.00 frames. ], tot_loss[loss=3.551, NarTop10Accuracy=0.6069, over 5931.02 frames. ], batch size: 13, lr: 7.83e-03 +2024-08-06 09:06:00,351 INFO [trainer.py:765] (0/8) Epoch 11, batch 1100, train_loss[loss=3.558, NarTop10Accuracy=0.6073, over 6791.00 frames. ], tot_loss[loss=3.564, NarTop10Accuracy=0.6042, over 5964.91 frames. ], batch size: 17, lr: 7.81e-03 +2024-08-06 09:06:31,393 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-27000.pt +2024-08-06 09:06:40,946 INFO [trainer.py:765] (0/8) Epoch 11, batch 1200, train_loss[loss=3.694, NarTop10Accuracy=0.5734, over 7219.00 frames. ], tot_loss[loss=3.573, NarTop10Accuracy=0.6019, over 5962.45 frames. ], batch size: 30, lr: 7.80e-03 +2024-08-06 09:07:15,495 INFO [trainer.py:765] (0/8) Epoch 11, batch 1300, train_loss[loss=3.566, NarTop10Accuracy=0.6176, over 5192.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.6023, over 6022.95 frames. ], batch size: 6, lr: 7.79e-03 +2024-08-06 09:07:47,629 INFO [trainer.py:765] (0/8) Epoch 11, batch 1400, train_loss[loss=3.413, NarTop10Accuracy=0.6282, over 6160.00 frames. ], tot_loss[loss=3.577, NarTop10Accuracy=0.6006, over 6029.30 frames. ], batch size: 11, lr: 7.77e-03 +2024-08-06 09:08:18,988 INFO [trainer.py:765] (0/8) Epoch 11, batch 1500, train_loss[loss=3.576, NarTop10Accuracy=0.6082, over 6282.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.5993, over 5978.15 frames. ], batch size: 48, lr: 7.76e-03 +2024-08-06 09:08:47,149 INFO [trainer.py:765] (0/8) Epoch 11, batch 1600, train_loss[loss=3.532, NarTop10Accuracy=0.6103, over 7195.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.6013, over 5976.97 frames. ], batch size: 22, lr: 7.74e-03 +2024-08-06 09:09:13,951 INFO [trainer.py:765] (0/8) Epoch 11, batch 1700, train_loss[loss=3.502, NarTop10Accuracy=0.6276, over 6447.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.6016, over 5951.23 frames. ], batch size: 14, lr: 7.73e-03 +2024-08-06 09:09:40,733 INFO [trainer.py:765] (0/8) Epoch 11, batch 1800, train_loss[loss=3.691, NarTop10Accuracy=0.5781, over 7050.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.6016, over 6003.46 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 09:10:07,343 INFO [trainer.py:765] (0/8) Epoch 11, batch 1900, train_loss[loss=3.761, NarTop10Accuracy=0.5645, over 6362.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.598, over 6041.23 frames. ], batch size: 52, lr: 7.70e-03 +2024-08-06 09:10:33,040 INFO [trainer.py:765] (0/8) Epoch 11, batch 2000, train_loss[loss=3.601, NarTop10Accuracy=0.6046, over 5913.00 frames. ], tot_loss[loss=3.589, NarTop10Accuracy=0.5988, over 6010.07 frames. ], batch size: 49, lr: 7.69e-03 +2024-08-06 09:10:58,442 INFO [trainer.py:765] (0/8) Epoch 11, batch 2100, train_loss[loss=3.493, NarTop10Accuracy=0.619, over 4923.00 frames. ], tot_loss[loss=3.574, NarTop10Accuracy=0.6017, over 6000.09 frames. ], batch size: 5, lr: 7.68e-03 +2024-08-06 09:11:20,708 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-28000.pt +2024-08-06 09:11:24,493 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 09:11:31,457 INFO [trainer.py:811] (0/8) Epoch 11, validation: loss=3.372, NarTop10Accuracy=0.6462, over 1907754.00 frames. +2024-08-06 09:11:31,458 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 09:11:31,930 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.800e+02 1.966e+02 2.160e+02 4.000e+02, threshold=3.933e+02, percent-clipped=0.1 +2024-08-06 09:11:34,519 INFO [trainer.py:765] (0/8) Epoch 11, batch 2200, train_loss[loss=3.458, NarTop10Accuracy=0.6221, over 7333.00 frames. ], tot_loss[loss=3.573, NarTop10Accuracy=0.6021, over 6035.96 frames. ], batch size: 32, lr: 7.66e-03 +2024-08-06 09:11:59,940 INFO [trainer.py:765] (0/8) Epoch 11, batch 2300, train_loss[loss=3.41, NarTop10Accuracy=0.6401, over 5751.00 frames. ], tot_loss[loss=3.582, NarTop10Accuracy=0.6001, over 6074.01 frames. ], batch size: 9, lr: 7.65e-03 +2024-08-06 09:12:24,696 INFO [trainer.py:765] (0/8) Epoch 11, batch 2400, train_loss[loss=3.899, NarTop10Accuracy=0.5409, over 6411.00 frames. ], tot_loss[loss=3.601, NarTop10Accuracy=0.5966, over 5891.73 frames. ], batch size: 50, lr: 7.64e-03 +2024-08-06 09:12:47,879 INFO [trainer.py:765] (0/8) Epoch 11, batch 2500, train_loss[loss=3.826, NarTop10Accuracy=0.5491, over 5114.00 frames. ], tot_loss[loss=3.582, NarTop10Accuracy=0.5997, over 5537.57 frames. ], batch size: 6, lr: 7.62e-03 +2024-08-06 09:13:09,236 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 09:13:09,239 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-11.pt +2024-08-06 09:14:12,278 INFO [trainer.py:765] (0/8) Epoch 12, batch 100, train_loss[loss=3.424, NarTop10Accuracy=0.6485, over 7247.00 frames. ], tot_loss[loss=3.536, NarTop10Accuracy=0.6108, over 2365.90 frames. ], batch size: 31, lr: 7.29e-03 +2024-08-06 09:14:48,096 INFO [trainer.py:765] (0/8) Epoch 12, batch 200, train_loss[loss=3.416, NarTop10Accuracy=0.6393, over 6817.00 frames. ], tot_loss[loss=3.509, NarTop10Accuracy=0.6165, over 3865.92 frames. ], batch size: 17, lr: 7.28e-03 +2024-08-06 09:15:20,021 INFO [trainer.py:765] (0/8) Epoch 12, batch 300, train_loss[loss=3.388, NarTop10Accuracy=0.6405, over 7241.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6178, over 4656.48 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 09:15:52,633 INFO [trainer.py:765] (0/8) Epoch 12, batch 400, train_loss[loss=3.352, NarTop10Accuracy=0.6288, over 5085.00 frames. ], tot_loss[loss=3.519, NarTop10Accuracy=0.6146, over 5122.30 frames. ], batch size: 7, lr: 7.25e-03 +2024-08-06 09:16:26,433 INFO [trainer.py:765] (0/8) Epoch 12, batch 500, train_loss[loss=3.82, NarTop10Accuracy=0.5551, over 6150.00 frames. ], tot_loss[loss=3.529, NarTop10Accuracy=0.6121, over 5403.60 frames. ], batch size: 11, lr: 7.24e-03 +2024-08-06 09:16:59,239 INFO [trainer.py:765] (0/8) Epoch 12, batch 600, train_loss[loss=3.467, NarTop10Accuracy=0.6259, over 5804.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6101, over 5673.31 frames. ], batch size: 9, lr: 7.23e-03 +2024-08-06 09:17:01,178 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-29000.pt +2024-08-06 09:17:36,318 INFO [trainer.py:765] (0/8) Epoch 12, batch 700, train_loss[loss=3.659, NarTop10Accuracy=0.582, over 5130.00 frames. ], tot_loss[loss=3.535, NarTop10Accuracy=0.61, over 5744.73 frames. ], batch size: 6, lr: 7.22e-03 +2024-08-06 09:18:07,753 INFO [trainer.py:765] (0/8) Epoch 12, batch 800, train_loss[loss=3.423, NarTop10Accuracy=0.6315, over 5081.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.6104, over 5802.17 frames. ], batch size: 6, lr: 7.21e-03 +2024-08-06 09:18:43,779 INFO [trainer.py:765] (0/8) Epoch 12, batch 900, train_loss[loss=3.847, NarTop10Accuracy=0.5466, over 6669.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6093, over 5834.93 frames. ], batch size: 14, lr: 7.19e-03 +2024-08-06 09:19:17,689 INFO [trainer.py:765] (0/8) Epoch 12, batch 1000, train_loss[loss=3.26, NarTop10Accuracy=0.6513, over 6312.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6089, over 5931.65 frames. ], batch size: 13, lr: 7.18e-03 +2024-08-06 09:19:52,427 INFO [trainer.py:765] (0/8) Epoch 12, batch 1100, train_loss[loss=3.762, NarTop10Accuracy=0.5639, over 6854.00 frames. ], tot_loss[loss=3.546, NarTop10Accuracy=0.6076, over 5964.26 frames. ], batch size: 17, lr: 7.17e-03 +2024-08-06 09:20:29,443 INFO [trainer.py:765] (0/8) Epoch 12, batch 1200, train_loss[loss=3.51, NarTop10Accuracy=0.6229, over 7286.00 frames. ], tot_loss[loss=3.554, NarTop10Accuracy=0.6063, over 5951.88 frames. ], batch size: 30, lr: 7.16e-03 +2024-08-06 09:21:02,826 INFO [trainer.py:765] (0/8) Epoch 12, batch 1300, train_loss[loss=3.67, NarTop10Accuracy=0.569, over 4907.00 frames. ], tot_loss[loss=3.557, NarTop10Accuracy=0.605, over 6020.02 frames. ], batch size: 6, lr: 7.15e-03 +2024-08-06 09:21:36,981 INFO [trainer.py:765] (0/8) Epoch 12, batch 1400, train_loss[loss=3.229, NarTop10Accuracy=0.677, over 6231.00 frames. ], tot_loss[loss=3.564, NarTop10Accuracy=0.6035, over 6041.58 frames. ], batch size: 11, lr: 7.13e-03 +2024-08-06 09:22:09,920 INFO [trainer.py:765] (0/8) Epoch 12, batch 1500, train_loss[loss=3.607, NarTop10Accuracy=0.6043, over 5908.00 frames. ], tot_loss[loss=3.557, NarTop10Accuracy=0.6051, over 5968.91 frames. ], batch size: 49, lr: 7.12e-03 +2024-08-06 09:22:38,027 INFO [trainer.py:765] (0/8) Epoch 12, batch 1600, train_loss[loss=3.786, NarTop10Accuracy=0.5645, over 7100.00 frames. ], tot_loss[loss=3.561, NarTop10Accuracy=0.6041, over 5944.31 frames. ], batch size: 22, lr: 7.11e-03 +2024-08-06 09:22:39,859 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-30000.pt +2024-08-06 09:22:43,418 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 09:22:49,889 INFO [trainer.py:811] (0/8) Epoch 12, validation: loss=3.364, NarTop10Accuracy=0.6481, over 1907754.00 frames. +2024-08-06 09:22:49,889 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 09:22:50,413 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.796e+02 1.978e+02 2.176e+02 4.603e+02, threshold=3.957e+02, percent-clipped=0.2 +2024-08-06 09:23:14,785 INFO [trainer.py:765] (0/8) Epoch 12, batch 1700, train_loss[loss=3.303, NarTop10Accuracy=0.655, over 6302.00 frames. ], tot_loss[loss=3.56, NarTop10Accuracy=0.6044, over 5943.91 frames. ], batch size: 13, lr: 7.10e-03 +2024-08-06 09:23:41,386 INFO [trainer.py:765] (0/8) Epoch 12, batch 1800, train_loss[loss=3.33, NarTop10Accuracy=0.6538, over 7085.00 frames. ], tot_loss[loss=3.548, NarTop10Accuracy=0.6069, over 6008.60 frames. ], batch size: 22, lr: 7.09e-03 +2024-08-06 09:24:07,957 INFO [trainer.py:765] (0/8) Epoch 12, batch 1900, train_loss[loss=3.598, NarTop10Accuracy=0.6011, over 5885.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6033, over 6053.45 frames. ], batch size: 49, lr: 7.08e-03 +2024-08-06 09:24:33,618 INFO [trainer.py:765] (0/8) Epoch 12, batch 2000, train_loss[loss=3.523, NarTop10Accuracy=0.6195, over 5745.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6035, over 6024.57 frames. ], batch size: 49, lr: 7.07e-03 +2024-08-06 09:24:59,038 INFO [trainer.py:765] (0/8) Epoch 12, batch 2100, train_loss[loss=3.637, NarTop10Accuracy=0.5865, over 4709.00 frames. ], tot_loss[loss=3.567, NarTop10Accuracy=0.6033, over 5985.60 frames. ], batch size: 5, lr: 7.05e-03 +2024-08-06 09:25:24,509 INFO [trainer.py:765] (0/8) Epoch 12, batch 2200, train_loss[loss=3.461, NarTop10Accuracy=0.6182, over 7141.00 frames. ], tot_loss[loss=3.562, NarTop10Accuracy=0.6041, over 6020.57 frames. ], batch size: 30, lr: 7.04e-03 +2024-08-06 09:25:49,926 INFO [trainer.py:765] (0/8) Epoch 12, batch 2300, train_loss[loss=3.606, NarTop10Accuracy=0.5777, over 5856.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.6016, over 6061.03 frames. ], batch size: 9, lr: 7.03e-03 +2024-08-06 09:26:14,656 INFO [trainer.py:765] (0/8) Epoch 12, batch 2400, train_loss[loss=3.55, NarTop10Accuracy=0.6073, over 6343.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.6006, over 5880.95 frames. ], batch size: 49, lr: 7.02e-03 +2024-08-06 09:26:38,154 INFO [trainer.py:765] (0/8) Epoch 12, batch 2500, train_loss[loss=3.535, NarTop10Accuracy=0.5954, over 5159.00 frames. ], tot_loss[loss=3.559, NarTop10Accuracy=0.6044, over 5528.06 frames. ], batch size: 6, lr: 7.01e-03 +2024-08-06 09:26:59,637 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 09:26:59,639 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-12.pt +2024-08-06 09:27:36,183 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-31000.pt +2024-08-06 09:28:03,610 INFO [trainer.py:765] (0/8) Epoch 13, batch 100, train_loss[loss=3.532, NarTop10Accuracy=0.6127, over 7151.00 frames. ], tot_loss[loss=3.521, NarTop10Accuracy=0.6134, over 2355.84 frames. ], batch size: 30, lr: 6.72e-03 +2024-08-06 09:28:36,905 INFO [trainer.py:765] (0/8) Epoch 13, batch 200, train_loss[loss=3.352, NarTop10Accuracy=0.6436, over 6918.00 frames. ], tot_loss[loss=3.507, NarTop10Accuracy=0.6164, over 3857.99 frames. ], batch size: 17, lr: 6.71e-03 +2024-08-06 09:29:07,170 INFO [trainer.py:765] (0/8) Epoch 13, batch 300, train_loss[loss=3.473, NarTop10Accuracy=0.6271, over 7220.00 frames. ], tot_loss[loss=3.508, NarTop10Accuracy=0.6167, over 4671.60 frames. ], batch size: 22, lr: 6.70e-03 +2024-08-06 09:29:41,038 INFO [trainer.py:765] (0/8) Epoch 13, batch 400, train_loss[loss=3.232, NarTop10Accuracy=0.6843, over 5151.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6179, over 5126.29 frames. ], batch size: 7, lr: 6.69e-03 +2024-08-06 09:30:13,729 INFO [trainer.py:765] (0/8) Epoch 13, batch 500, train_loss[loss=3.784, NarTop10Accuracy=0.56, over 6088.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.6179, over 5403.82 frames. ], batch size: 11, lr: 6.68e-03 +2024-08-06 09:30:47,198 INFO [trainer.py:765] (0/8) Epoch 13, batch 600, train_loss[loss=3.664, NarTop10Accuracy=0.5874, over 5846.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6182, over 5670.21 frames. ], batch size: 9, lr: 6.67e-03 +2024-08-06 09:31:23,820 INFO [trainer.py:765] (0/8) Epoch 13, batch 700, train_loss[loss=3.431, NarTop10Accuracy=0.6261, over 5128.00 frames. ], tot_loss[loss=3.508, NarTop10Accuracy=0.6163, over 5741.58 frames. ], batch size: 6, lr: 6.66e-03 +2024-08-06 09:31:58,208 INFO [trainer.py:765] (0/8) Epoch 13, batch 800, train_loss[loss=3.359, NarTop10Accuracy=0.6345, over 4989.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.6146, over 5793.13 frames. ], batch size: 6, lr: 6.65e-03 +2024-08-06 09:32:29,193 INFO [trainer.py:765] (0/8) Epoch 13, batch 900, train_loss[loss=3.302, NarTop10Accuracy=0.649, over 6194.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6167, over 5819.52 frames. ], batch size: 13, lr: 6.64e-03 +2024-08-06 09:33:03,133 INFO [trainer.py:765] (0/8) Epoch 13, batch 1000, train_loss[loss=3.743, NarTop10Accuracy=0.5751, over 6200.00 frames. ], tot_loss[loss=3.517, NarTop10Accuracy=0.6139, over 5914.57 frames. ], batch size: 13, lr: 6.63e-03 +2024-08-06 09:33:14,217 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-32000.pt +2024-08-06 09:33:17,889 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 09:33:24,525 INFO [trainer.py:811] (0/8) Epoch 13, validation: loss=3.389, NarTop10Accuracy=0.6428, over 1907754.00 frames. +2024-08-06 09:33:24,525 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 09:33:25,132 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.457e+02 1.794e+02 1.964e+02 2.145e+02 3.608e+02, threshold=3.929e+02, percent-clipped=0.0 +2024-08-06 09:33:51,714 INFO [trainer.py:765] (0/8) Epoch 13, batch 1100, train_loss[loss=3.632, NarTop10Accuracy=0.5871, over 6833.00 frames. ], tot_loss[loss=3.54, NarTop10Accuracy=0.6091, over 5951.24 frames. ], batch size: 17, lr: 6.62e-03 +2024-08-06 09:34:25,485 INFO [trainer.py:765] (0/8) Epoch 13, batch 1200, train_loss[loss=3.627, NarTop10Accuracy=0.5903, over 7226.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.6099, over 5960.27 frames. ], batch size: 30, lr: 6.61e-03 +2024-08-06 09:35:05,084 INFO [trainer.py:765] (0/8) Epoch 13, batch 1300, train_loss[loss=3.339, NarTop10Accuracy=0.6303, over 5088.00 frames. ], tot_loss[loss=3.529, NarTop10Accuracy=0.6109, over 6027.02 frames. ], batch size: 6, lr: 6.60e-03 +2024-08-06 09:35:36,404 INFO [trainer.py:765] (0/8) Epoch 13, batch 1400, train_loss[loss=3.392, NarTop10Accuracy=0.6494, over 6225.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.6097, over 6047.47 frames. ], batch size: 11, lr: 6.59e-03 +2024-08-06 09:36:07,319 INFO [trainer.py:765] (0/8) Epoch 13, batch 1500, train_loss[loss=3.804, NarTop10Accuracy=0.5581, over 6127.00 frames. ], tot_loss[loss=3.532, NarTop10Accuracy=0.6098, over 5965.76 frames. ], batch size: 48, lr: 6.58e-03 +2024-08-06 09:36:35,388 INFO [trainer.py:765] (0/8) Epoch 13, batch 1600, train_loss[loss=3.794, NarTop10Accuracy=0.5643, over 6989.00 frames. ], tot_loss[loss=3.535, NarTop10Accuracy=0.6091, over 5954.69 frames. ], batch size: 22, lr: 6.57e-03 +2024-08-06 09:37:02,143 INFO [trainer.py:765] (0/8) Epoch 13, batch 1700, train_loss[loss=3.568, NarTop10Accuracy=0.6032, over 6309.00 frames. ], tot_loss[loss=3.529, NarTop10Accuracy=0.6101, over 5931.50 frames. ], batch size: 13, lr: 6.56e-03 +2024-08-06 09:37:28,778 INFO [trainer.py:765] (0/8) Epoch 13, batch 1800, train_loss[loss=3.481, NarTop10Accuracy=0.6306, over 6972.00 frames. ], tot_loss[loss=3.532, NarTop10Accuracy=0.6105, over 5990.53 frames. ], batch size: 22, lr: 6.55e-03 +2024-08-06 09:37:55,386 INFO [trainer.py:765] (0/8) Epoch 13, batch 1900, train_loss[loss=3.555, NarTop10Accuracy=0.6074, over 6292.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.6104, over 6037.28 frames. ], batch size: 48, lr: 6.54e-03 +2024-08-06 09:38:21,122 INFO [trainer.py:765] (0/8) Epoch 13, batch 2000, train_loss[loss=3.646, NarTop10Accuracy=0.5893, over 5966.00 frames. ], tot_loss[loss=3.532, NarTop10Accuracy=0.6106, over 6003.19 frames. ], batch size: 49, lr: 6.53e-03 +2024-08-06 09:38:27,582 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-33000.pt +2024-08-06 09:38:49,691 INFO [trainer.py:765] (0/8) Epoch 13, batch 2100, train_loss[loss=3.109, NarTop10Accuracy=0.6985, over 3971.00 frames. ], tot_loss[loss=3.524, NarTop10Accuracy=0.6121, over 5977.62 frames. ], batch size: 4, lr: 6.52e-03 +2024-08-06 09:39:15,107 INFO [trainer.py:765] (0/8) Epoch 13, batch 2200, train_loss[loss=3.507, NarTop10Accuracy=0.6141, over 7361.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6097, over 6031.36 frames. ], batch size: 31, lr: 6.51e-03 +2024-08-06 09:39:40,617 INFO [trainer.py:765] (0/8) Epoch 13, batch 2300, train_loss[loss=3.516, NarTop10Accuracy=0.6162, over 5709.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.6083, over 6056.74 frames. ], batch size: 9, lr: 6.50e-03 +2024-08-06 09:40:05,343 INFO [trainer.py:765] (0/8) Epoch 13, batch 2400, train_loss[loss=3.733, NarTop10Accuracy=0.5769, over 6415.00 frames. ], tot_loss[loss=3.553, NarTop10Accuracy=0.6064, over 5858.37 frames. ], batch size: 49, lr: 6.49e-03 +2024-08-06 09:40:28,767 INFO [trainer.py:765] (0/8) Epoch 13, batch 2500, train_loss[loss=3.395, NarTop10Accuracy=0.6333, over 5067.00 frames. ], tot_loss[loss=3.527, NarTop10Accuracy=0.6111, over 5530.29 frames. ], batch size: 6, lr: 6.48e-03 +2024-08-06 09:40:49,792 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 09:40:49,794 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-13.pt +2024-08-06 09:41:48,979 INFO [trainer.py:765] (0/8) Epoch 14, batch 100, train_loss[loss=3.407, NarTop10Accuracy=0.6368, over 7189.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6234, over 2374.56 frames. ], batch size: 30, lr: 6.24e-03 +2024-08-06 09:42:22,937 INFO [trainer.py:765] (0/8) Epoch 14, batch 200, train_loss[loss=3.355, NarTop10Accuracy=0.6338, over 6898.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6252, over 3869.57 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 09:42:58,414 INFO [trainer.py:765] (0/8) Epoch 14, batch 300, train_loss[loss=3.553, NarTop10Accuracy=0.5984, over 7180.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6211, over 4678.93 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 09:43:30,439 INFO [trainer.py:765] (0/8) Epoch 14, batch 400, train_loss[loss=3.366, NarTop10Accuracy=0.6524, over 5231.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6211, over 5132.60 frames. ], batch size: 7, lr: 6.21e-03 +2024-08-06 09:43:42,485 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-34000.pt +2024-08-06 09:43:46,094 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 09:43:53,651 INFO [trainer.py:811] (0/8) Epoch 14, validation: loss=3.321, NarTop10Accuracy=0.6566, over 1907754.00 frames. +2024-08-06 09:43:53,652 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 09:43:54,211 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.805e+02 1.968e+02 2.158e+02 4.264e+02, threshold=3.936e+02, percent-clipped=0.2 +2024-08-06 09:44:11,700 INFO [trainer.py:765] (0/8) Epoch 14, batch 500, train_loss[loss=3.68, NarTop10Accuracy=0.5938, over 6072.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6213, over 5406.98 frames. ], batch size: 11, lr: 6.20e-03 +2024-08-06 09:44:47,166 INFO [trainer.py:765] (0/8) Epoch 14, batch 600, train_loss[loss=3.892, NarTop10Accuracy=0.5449, over 5947.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6211, over 5686.73 frames. ], batch size: 9, lr: 6.19e-03 +2024-08-06 09:45:19,804 INFO [trainer.py:765] (0/8) Epoch 14, batch 700, train_loss[loss=3.498, NarTop10Accuracy=0.5997, over 5182.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6226, over 5744.37 frames. ], batch size: 6, lr: 6.18e-03 +2024-08-06 09:45:58,435 INFO [trainer.py:765] (0/8) Epoch 14, batch 800, train_loss[loss=3.444, NarTop10Accuracy=0.6424, over 5044.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.62, over 5784.84 frames. ], batch size: 6, lr: 6.17e-03 +2024-08-06 09:46:35,420 INFO [trainer.py:765] (0/8) Epoch 14, batch 900, train_loss[loss=3.55, NarTop10Accuracy=0.6, over 6431.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6191, over 5819.81 frames. ], batch size: 13, lr: 6.17e-03 +2024-08-06 09:47:08,399 INFO [trainer.py:765] (0/8) Epoch 14, batch 1000, train_loss[loss=3.646, NarTop10Accuracy=0.5753, over 6331.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6181, over 5920.45 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 09:47:47,663 INFO [trainer.py:765] (0/8) Epoch 14, batch 1100, train_loss[loss=3.444, NarTop10Accuracy=0.6291, over 6765.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6167, over 5960.46 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 09:48:23,500 INFO [trainer.py:765] (0/8) Epoch 14, batch 1200, train_loss[loss=3.414, NarTop10Accuracy=0.6362, over 7068.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6174, over 5963.53 frames. ], batch size: 30, lr: 6.14e-03 +2024-08-06 09:48:57,972 INFO [trainer.py:765] (0/8) Epoch 14, batch 1300, train_loss[loss=3.465, NarTop10Accuracy=0.6307, over 5115.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6166, over 6021.94 frames. ], batch size: 6, lr: 6.13e-03 +2024-08-06 09:49:30,234 INFO [trainer.py:765] (0/8) Epoch 14, batch 1400, train_loss[loss=3.328, NarTop10Accuracy=0.6395, over 6029.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.613, over 6034.30 frames. ], batch size: 11, lr: 6.12e-03 +2024-08-06 09:49:48,789 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-35000.pt +2024-08-06 09:50:07,531 INFO [trainer.py:765] (0/8) Epoch 14, batch 1500, train_loss[loss=3.598, NarTop10Accuracy=0.599, over 6214.00 frames. ], tot_loss[loss=3.516, NarTop10Accuracy=0.6131, over 5986.71 frames. ], batch size: 49, lr: 6.11e-03 +2024-08-06 09:50:35,637 INFO [trainer.py:765] (0/8) Epoch 14, batch 1600, train_loss[loss=3.416, NarTop10Accuracy=0.6316, over 7212.00 frames. ], tot_loss[loss=3.51, NarTop10Accuracy=0.6149, over 5960.23 frames. ], batch size: 22, lr: 6.10e-03 +2024-08-06 09:51:02,377 INFO [trainer.py:765] (0/8) Epoch 14, batch 1700, train_loss[loss=3.451, NarTop10Accuracy=0.6321, over 6309.00 frames. ], tot_loss[loss=3.515, NarTop10Accuracy=0.6138, over 5936.01 frames. ], batch size: 13, lr: 6.10e-03 +2024-08-06 09:51:28,994 INFO [trainer.py:765] (0/8) Epoch 14, batch 1800, train_loss[loss=3.549, NarTop10Accuracy=0.6126, over 7063.00 frames. ], tot_loss[loss=3.508, NarTop10Accuracy=0.6161, over 5991.62 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 09:51:55,729 INFO [trainer.py:765] (0/8) Epoch 14, batch 1900, train_loss[loss=3.946, NarTop10Accuracy=0.5258, over 6080.00 frames. ], tot_loss[loss=3.52, NarTop10Accuracy=0.6135, over 6031.51 frames. ], batch size: 50, lr: 6.08e-03 +2024-08-06 09:52:21,503 INFO [trainer.py:765] (0/8) Epoch 14, batch 2000, train_loss[loss=3.46, NarTop10Accuracy=0.6221, over 6257.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.6108, over 6017.36 frames. ], batch size: 49, lr: 6.07e-03 +2024-08-06 09:52:47,011 INFO [trainer.py:765] (0/8) Epoch 14, batch 2100, train_loss[loss=3.792, NarTop10Accuracy=0.5572, over 4802.00 frames. ], tot_loss[loss=3.52, NarTop10Accuracy=0.6135, over 6000.70 frames. ], batch size: 5, lr: 6.06e-03 +2024-08-06 09:53:12,480 INFO [trainer.py:765] (0/8) Epoch 14, batch 2200, train_loss[loss=3.351, NarTop10Accuracy=0.6504, over 7229.00 frames. ], tot_loss[loss=3.524, NarTop10Accuracy=0.6127, over 6033.36 frames. ], batch size: 30, lr: 6.05e-03 +2024-08-06 09:53:37,975 INFO [trainer.py:765] (0/8) Epoch 14, batch 2300, train_loss[loss=3.421, NarTop10Accuracy=0.636, over 5805.00 frames. ], tot_loss[loss=3.543, NarTop10Accuracy=0.6087, over 6069.19 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 09:54:02,717 INFO [trainer.py:765] (0/8) Epoch 14, batch 2400, train_loss[loss=3.645, NarTop10Accuracy=0.5886, over 5615.00 frames. ], tot_loss[loss=3.541, NarTop10Accuracy=0.6094, over 5882.68 frames. ], batch size: 48, lr: 6.04e-03 +2024-08-06 09:54:12,820 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-36000.pt +2024-08-06 09:54:17,718 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 09:54:24,304 INFO [trainer.py:811] (0/8) Epoch 14, validation: loss=3.364, NarTop10Accuracy=0.6477, over 1907754.00 frames. +2024-08-06 09:54:24,304 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 09:54:24,752 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.815e+02 1.970e+02 2.165e+02 3.684e+02, threshold=3.939e+02, percent-clipped=0.0 +2024-08-06 09:54:37,619 INFO [trainer.py:765] (0/8) Epoch 14, batch 2500, train_loss[loss=3.98, NarTop10Accuracy=0.5167, over 4996.00 frames. ], tot_loss[loss=3.523, NarTop10Accuracy=0.6132, over 5547.93 frames. ], batch size: 6, lr: 6.03e-03 +2024-08-06 09:54:58,850 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 09:54:58,855 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-14.pt +2024-08-06 09:56:03,097 INFO [trainer.py:765] (0/8) Epoch 15, batch 100, train_loss[loss=3.529, NarTop10Accuracy=0.6146, over 7427.00 frames. ], tot_loss[loss=3.466, NarTop10Accuracy=0.626, over 2376.08 frames. ], batch size: 31, lr: 5.81e-03 +2024-08-06 09:56:35,980 INFO [trainer.py:765] (0/8) Epoch 15, batch 200, train_loss[loss=3.428, NarTop10Accuracy=0.6274, over 6801.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6266, over 3857.21 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 09:57:07,653 INFO [trainer.py:765] (0/8) Epoch 15, batch 300, train_loss[loss=3.309, NarTop10Accuracy=0.6577, over 7082.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6281, over 4673.92 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 09:57:38,463 INFO [trainer.py:765] (0/8) Epoch 15, batch 400, train_loss[loss=3.487, NarTop10Accuracy=0.627, over 5033.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6262, over 5111.59 frames. ], batch size: 7, lr: 5.79e-03 +2024-08-06 09:58:12,235 INFO [trainer.py:765] (0/8) Epoch 15, batch 500, train_loss[loss=3.379, NarTop10Accuracy=0.6392, over 6220.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.6211, over 5383.14 frames. ], batch size: 11, lr: 5.78e-03 +2024-08-06 09:58:47,543 INFO [trainer.py:765] (0/8) Epoch 15, batch 600, train_loss[loss=3.812, NarTop10Accuracy=0.557, over 5637.00 frames. ], tot_loss[loss=3.484, NarTop10Accuracy=0.6211, over 5665.34 frames. ], batch size: 9, lr: 5.77e-03 +2024-08-06 09:59:17,062 INFO [trainer.py:765] (0/8) Epoch 15, batch 700, train_loss[loss=3.323, NarTop10Accuracy=0.6453, over 5091.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6211, over 5725.67 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 09:59:55,588 INFO [trainer.py:765] (0/8) Epoch 15, batch 800, train_loss[loss=3.748, NarTop10Accuracy=0.5799, over 4994.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6214, over 5807.32 frames. ], batch size: 6, lr: 5.76e-03 +2024-08-06 10:00:15,459 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-37000.pt +2024-08-06 10:00:32,024 INFO [trainer.py:765] (0/8) Epoch 15, batch 900, train_loss[loss=3.362, NarTop10Accuracy=0.6503, over 6519.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6225, over 5835.53 frames. ], batch size: 14, lr: 5.75e-03 +2024-08-06 10:01:05,538 INFO [trainer.py:765] (0/8) Epoch 15, batch 1000, train_loss[loss=3.266, NarTop10Accuracy=0.6784, over 6267.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.622, over 5934.89 frames. ], batch size: 13, lr: 5.74e-03 +2024-08-06 10:01:45,154 INFO [trainer.py:765] (0/8) Epoch 15, batch 1100, train_loss[loss=3.423, NarTop10Accuracy=0.6252, over 6800.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6179, over 5963.03 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 10:02:18,756 INFO [trainer.py:765] (0/8) Epoch 15, batch 1200, train_loss[loss=3.7, NarTop10Accuracy=0.5779, over 7017.00 frames. ], tot_loss[loss=3.482, NarTop10Accuracy=0.6207, over 5945.27 frames. ], batch size: 30, lr: 5.73e-03 +2024-08-06 10:02:51,921 INFO [trainer.py:765] (0/8) Epoch 15, batch 1300, train_loss[loss=3.459, NarTop10Accuracy=0.6122, over 4240.00 frames. ], tot_loss[loss=3.482, NarTop10Accuracy=0.6207, over 6019.87 frames. ], batch size: 5, lr: 5.72e-03 +2024-08-06 10:03:25,436 INFO [trainer.py:765] (0/8) Epoch 15, batch 1400, train_loss[loss=3.576, NarTop10Accuracy=0.5972, over 6134.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6181, over 6037.85 frames. ], batch size: 11, lr: 5.71e-03 +2024-08-06 10:03:59,042 INFO [trainer.py:765] (0/8) Epoch 15, batch 1500, train_loss[loss=3.553, NarTop10Accuracy=0.6034, over 6214.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.619, over 5966.51 frames. ], batch size: 48, lr: 5.71e-03 +2024-08-06 10:04:27,106 INFO [trainer.py:765] (0/8) Epoch 15, batch 1600, train_loss[loss=3.741, NarTop10Accuracy=0.5745, over 7154.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.6212, over 5952.21 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 10:04:53,907 INFO [trainer.py:765] (0/8) Epoch 15, batch 1700, train_loss[loss=3.691, NarTop10Accuracy=0.5815, over 6239.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6204, over 5942.69 frames. ], batch size: 13, lr: 5.69e-03 +2024-08-06 10:05:20,728 INFO [trainer.py:765] (0/8) Epoch 15, batch 1800, train_loss[loss=3.692, NarTop10Accuracy=0.5816, over 7202.00 frames. ], tot_loss[loss=3.507, NarTop10Accuracy=0.6163, over 6005.64 frames. ], batch size: 22, lr: 5.68e-03 +2024-08-06 10:05:37,265 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-38000.pt +2024-08-06 10:05:40,780 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 10:05:47,411 INFO [trainer.py:811] (0/8) Epoch 15, validation: loss=3.325, NarTop10Accuracy=0.6551, over 1907754.00 frames. +2024-08-06 10:05:47,412 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 10:05:47,920 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.835e+02 1.986e+02 2.156e+02 4.531e+02, threshold=3.972e+02, percent-clipped=0.1 +2024-08-06 10:05:57,568 INFO [trainer.py:765] (0/8) Epoch 15, batch 1900, train_loss[loss=3.715, NarTop10Accuracy=0.5751, over 6884.00 frames. ], tot_loss[loss=3.516, NarTop10Accuracy=0.6146, over 6047.41 frames. ], batch size: 49, lr: 5.68e-03 +2024-08-06 10:06:23,372 INFO [trainer.py:765] (0/8) Epoch 15, batch 2000, train_loss[loss=3.55, NarTop10Accuracy=0.604, over 6537.00 frames. ], tot_loss[loss=3.51, NarTop10Accuracy=0.6157, over 6018.00 frames. ], batch size: 49, lr: 5.67e-03 +2024-08-06 10:06:48,759 INFO [trainer.py:765] (0/8) Epoch 15, batch 2100, train_loss[loss=3.072, NarTop10Accuracy=0.6891, over 3840.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6166, over 5990.47 frames. ], batch size: 4, lr: 5.66e-03 +2024-08-06 10:07:14,170 INFO [trainer.py:765] (0/8) Epoch 15, batch 2200, train_loss[loss=3.347, NarTop10Accuracy=0.6483, over 7171.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6182, over 6033.65 frames. ], batch size: 30, lr: 5.65e-03 +2024-08-06 10:07:39,629 INFO [trainer.py:765] (0/8) Epoch 15, batch 2300, train_loss[loss=3.223, NarTop10Accuracy=0.665, over 5819.00 frames. ], tot_loss[loss=3.508, NarTop10Accuracy=0.616, over 6054.65 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 10:08:04,361 INFO [trainer.py:765] (0/8) Epoch 15, batch 2400, train_loss[loss=3.893, NarTop10Accuracy=0.529, over 6052.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6148, over 5874.77 frames. ], batch size: 49, lr: 5.64e-03 +2024-08-06 10:08:27,713 INFO [trainer.py:765] (0/8) Epoch 15, batch 2500, train_loss[loss=3.534, NarTop10Accuracy=0.6112, over 4969.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6184, over 5530.44 frames. ], batch size: 6, lr: 5.63e-03 +2024-08-06 10:08:49,186 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 10:08:49,192 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-15.pt +2024-08-06 10:09:44,183 INFO [trainer.py:765] (0/8) Epoch 16, batch 100, train_loss[loss=3.668, NarTop10Accuracy=0.5889, over 7489.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6261, over 2375.49 frames. ], batch size: 31, lr: 5.44e-03 +2024-08-06 10:10:23,207 INFO [trainer.py:765] (0/8) Epoch 16, batch 200, train_loss[loss=3.358, NarTop10Accuracy=0.6581, over 6865.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6305, over 3882.41 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 10:10:50,262 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-39000.pt +2024-08-06 10:10:58,841 INFO [trainer.py:765] (0/8) Epoch 16, batch 300, train_loss[loss=3.245, NarTop10Accuracy=0.6709, over 7048.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6287, over 4696.14 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 10:11:29,595 INFO [trainer.py:765] (0/8) Epoch 16, batch 400, train_loss[loss=3.4, NarTop10Accuracy=0.6404, over 5705.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6284, over 5133.20 frames. ], batch size: 8, lr: 5.42e-03 +2024-08-06 10:12:02,298 INFO [trainer.py:765] (0/8) Epoch 16, batch 500, train_loss[loss=3.76, NarTop10Accuracy=0.5652, over 6117.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6291, over 5431.10 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 10:12:42,340 INFO [trainer.py:765] (0/8) Epoch 16, batch 600, train_loss[loss=3.571, NarTop10Accuracy=0.6137, over 5864.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.63, over 5698.16 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 10:13:13,952 INFO [trainer.py:765] (0/8) Epoch 16, batch 700, train_loss[loss=3.169, NarTop10Accuracy=0.6738, over 4974.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6279, over 5778.31 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:13:46,285 INFO [trainer.py:765] (0/8) Epoch 16, batch 800, train_loss[loss=3.663, NarTop10Accuracy=0.5856, over 4967.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6286, over 5816.67 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:14:23,296 INFO [trainer.py:765] (0/8) Epoch 16, batch 900, train_loss[loss=3.329, NarTop10Accuracy=0.6512, over 6170.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6286, over 5833.40 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 10:15:00,059 INFO [trainer.py:765] (0/8) Epoch 16, batch 1000, train_loss[loss=3.688, NarTop10Accuracy=0.5727, over 6362.00 frames. ], tot_loss[loss=3.467, NarTop10Accuracy=0.6239, over 5947.70 frames. ], batch size: 13, lr: 5.38e-03 +2024-08-06 10:15:30,509 INFO [trainer.py:765] (0/8) Epoch 16, batch 1100, train_loss[loss=3.456, NarTop10Accuracy=0.6299, over 6922.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6212, over 5977.35 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 10:16:11,384 INFO [trainer.py:765] (0/8) Epoch 16, batch 1200, train_loss[loss=3.496, NarTop10Accuracy=0.6262, over 7283.00 frames. ], tot_loss[loss=3.474, NarTop10Accuracy=0.6221, over 5962.36 frames. ], batch size: 31, lr: 5.37e-03 +2024-08-06 10:16:39,396 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-40000.pt +2024-08-06 10:16:42,816 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 10:16:49,676 INFO [trainer.py:811] (0/8) Epoch 16, validation: loss=3.375, NarTop10Accuracy=0.6455, over 1907754.00 frames. +2024-08-06 10:16:49,676 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 10:16:52,482 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 1.814e+02 1.975e+02 2.151e+02 4.776e+02, threshold=3.950e+02, percent-clipped=0.2 +2024-08-06 10:16:58,042 INFO [trainer.py:765] (0/8) Epoch 16, batch 1300, train_loss[loss=3.847, NarTop10Accuracy=0.5535, over 4978.00 frames. ], tot_loss[loss=3.466, NarTop10Accuracy=0.6233, over 6018.96 frames. ], batch size: 6, lr: 5.36e-03 +2024-08-06 10:17:29,376 INFO [trainer.py:765] (0/8) Epoch 16, batch 1400, train_loss[loss=3.443, NarTop10Accuracy=0.6378, over 6184.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6223, over 6028.72 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 10:18:02,354 INFO [trainer.py:765] (0/8) Epoch 16, batch 1500, train_loss[loss=3.562, NarTop10Accuracy=0.6162, over 6475.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.622, over 5961.91 frames. ], batch size: 48, lr: 5.35e-03 +2024-08-06 10:18:30,469 INFO [trainer.py:765] (0/8) Epoch 16, batch 1600, train_loss[loss=3.638, NarTop10Accuracy=0.5969, over 7153.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6194, over 5946.03 frames. ], batch size: 22, lr: 5.34e-03 +2024-08-06 10:18:57,273 INFO [trainer.py:765] (0/8) Epoch 16, batch 1700, train_loss[loss=3.756, NarTop10Accuracy=0.5646, over 6343.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6219, over 5940.11 frames. ], batch size: 13, lr: 5.34e-03 +2024-08-06 10:19:23,979 INFO [trainer.py:765] (0/8) Epoch 16, batch 1800, train_loss[loss=3.677, NarTop10Accuracy=0.5763, over 7114.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6192, over 5995.30 frames. ], batch size: 22, lr: 5.33e-03 +2024-08-06 10:19:50,773 INFO [trainer.py:765] (0/8) Epoch 16, batch 1900, train_loss[loss=3.752, NarTop10Accuracy=0.5707, over 5887.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6191, over 6022.56 frames. ], batch size: 49, lr: 5.32e-03 +2024-08-06 10:20:16,602 INFO [trainer.py:765] (0/8) Epoch 16, batch 2000, train_loss[loss=3.525, NarTop10Accuracy=0.6033, over 6009.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6166, over 6022.59 frames. ], batch size: 49, lr: 5.32e-03 +2024-08-06 10:20:42,160 INFO [trainer.py:765] (0/8) Epoch 16, batch 2100, train_loss[loss=3.462, NarTop10Accuracy=0.6013, over 3890.00 frames. ], tot_loss[loss=3.515, NarTop10Accuracy=0.6139, over 6007.81 frames. ], batch size: 4, lr: 5.31e-03 +2024-08-06 10:21:07,651 INFO [trainer.py:765] (0/8) Epoch 16, batch 2200, train_loss[loss=3.365, NarTop10Accuracy=0.6478, over 7039.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6166, over 6047.01 frames. ], batch size: 30, lr: 5.30e-03 +2024-08-06 10:21:28,124 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-41000.pt +2024-08-06 10:21:36,082 INFO [trainer.py:765] (0/8) Epoch 16, batch 2300, train_loss[loss=3.444, NarTop10Accuracy=0.624, over 5730.00 frames. ], tot_loss[loss=3.509, NarTop10Accuracy=0.6153, over 6064.13 frames. ], batch size: 9, lr: 5.30e-03 +2024-08-06 10:22:00,907 INFO [trainer.py:765] (0/8) Epoch 16, batch 2400, train_loss[loss=3.551, NarTop10Accuracy=0.6158, over 6305.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6163, over 5898.09 frames. ], batch size: 48, lr: 5.29e-03 +2024-08-06 10:22:24,290 INFO [trainer.py:765] (0/8) Epoch 16, batch 2500, train_loss[loss=3.423, NarTop10Accuracy=0.6276, over 5252.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6206, over 5533.26 frames. ], batch size: 6, lr: 5.28e-03 +2024-08-06 10:22:45,882 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 10:22:45,887 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-16.pt +2024-08-06 10:23:45,727 INFO [trainer.py:765] (0/8) Epoch 17, batch 100, train_loss[loss=3.439, NarTop10Accuracy=0.6323, over 7240.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6326, over 2373.95 frames. ], batch size: 30, lr: 5.12e-03 +2024-08-06 10:24:19,034 INFO [trainer.py:765] (0/8) Epoch 17, batch 200, train_loss[loss=3.294, NarTop10Accuracy=0.6687, over 6910.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6325, over 3864.63 frames. ], batch size: 17, lr: 5.11e-03 +2024-08-06 10:24:53,441 INFO [trainer.py:765] (0/8) Epoch 17, batch 300, train_loss[loss=3.667, NarTop10Accuracy=0.5923, over 7201.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.632, over 4686.93 frames. ], batch size: 22, lr: 5.10e-03 +2024-08-06 10:25:28,013 INFO [trainer.py:765] (0/8) Epoch 17, batch 400, train_loss[loss=3.697, NarTop10Accuracy=0.5677, over 5258.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6316, over 5135.09 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 10:25:58,606 INFO [trainer.py:765] (0/8) Epoch 17, batch 500, train_loss[loss=3.525, NarTop10Accuracy=0.6251, over 6124.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6311, over 5407.05 frames. ], batch size: 11, lr: 5.09e-03 +2024-08-06 10:26:29,756 INFO [trainer.py:765] (0/8) Epoch 17, batch 600, train_loss[loss=3.845, NarTop10Accuracy=0.5448, over 5773.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6319, over 5680.41 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 10:27:07,499 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-42000.pt +2024-08-06 10:27:11,062 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 10:27:17,547 INFO [trainer.py:811] (0/8) Epoch 17, validation: loss=3.327, NarTop10Accuracy=0.6554, over 1907754.00 frames. +2024-08-06 10:27:17,548 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 10:27:18,066 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 1.825e+02 1.985e+02 2.150e+02 4.169e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 10:27:18,072 INFO [trainer.py:765] (0/8) Epoch 17, batch 700, train_loss[loss=2.996, NarTop10Accuracy=0.724, over 5076.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6272, over 5733.93 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 10:27:49,841 INFO [trainer.py:765] (0/8) Epoch 17, batch 800, train_loss[loss=3.461, NarTop10Accuracy=0.6324, over 5069.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6282, over 5790.92 frames. ], batch size: 6, lr: 5.07e-03 +2024-08-06 10:28:24,839 INFO [trainer.py:765] (0/8) Epoch 17, batch 900, train_loss[loss=3.388, NarTop10Accuracy=0.6421, over 6229.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.628, over 5806.67 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 10:28:59,684 INFO [trainer.py:765] (0/8) Epoch 17, batch 1000, train_loss[loss=3.279, NarTop10Accuracy=0.6637, over 6228.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6294, over 5916.68 frames. ], batch size: 13, lr: 5.06e-03 +2024-08-06 10:29:36,659 INFO [trainer.py:765] (0/8) Epoch 17, batch 1100, train_loss[loss=3.225, NarTop10Accuracy=0.6719, over 6796.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.627, over 5942.04 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 10:30:08,242 INFO [trainer.py:765] (0/8) Epoch 17, batch 1200, train_loss[loss=3.618, NarTop10Accuracy=0.6015, over 7032.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6289, over 5962.02 frames. ], batch size: 30, lr: 5.05e-03 +2024-08-06 10:30:47,102 INFO [trainer.py:765] (0/8) Epoch 17, batch 1300, train_loss[loss=3.494, NarTop10Accuracy=0.62, over 5159.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6271, over 6030.65 frames. ], batch size: 6, lr: 5.04e-03 +2024-08-06 10:31:20,894 INFO [trainer.py:765] (0/8) Epoch 17, batch 1400, train_loss[loss=3.443, NarTop10Accuracy=0.6382, over 6286.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6256, over 6042.74 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 10:31:51,401 INFO [trainer.py:765] (0/8) Epoch 17, batch 1500, train_loss[loss=3.549, NarTop10Accuracy=0.6116, over 6618.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6271, over 5998.72 frames. ], batch size: 49, lr: 5.03e-03 +2024-08-06 10:32:19,401 INFO [trainer.py:765] (0/8) Epoch 17, batch 1600, train_loss[loss=3.577, NarTop10Accuracy=0.6047, over 7263.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6251, over 5979.40 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 10:32:45,660 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-43000.pt +2024-08-06 10:32:50,394 INFO [trainer.py:765] (0/8) Epoch 17, batch 1700, train_loss[loss=3.65, NarTop10Accuracy=0.5822, over 6639.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.621, over 5970.67 frames. ], batch size: 14, lr: 5.02e-03 +2024-08-06 10:33:17,035 INFO [trainer.py:765] (0/8) Epoch 17, batch 1800, train_loss[loss=3.757, NarTop10Accuracy=0.5539, over 7084.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6191, over 6015.86 frames. ], batch size: 22, lr: 5.02e-03 +2024-08-06 10:33:43,597 INFO [trainer.py:765] (0/8) Epoch 17, batch 1900, train_loss[loss=3.811, NarTop10Accuracy=0.542, over 5954.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6208, over 6033.53 frames. ], batch size: 48, lr: 5.01e-03 +2024-08-06 10:34:09,287 INFO [trainer.py:765] (0/8) Epoch 17, batch 2000, train_loss[loss=3.835, NarTop10Accuracy=0.5499, over 5625.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6204, over 6015.20 frames. ], batch size: 49, lr: 5.00e-03 +2024-08-06 10:34:34,802 INFO [trainer.py:765] (0/8) Epoch 17, batch 2100, train_loss[loss=3.389, NarTop10Accuracy=0.6289, over 3953.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6184, over 6005.50 frames. ], batch size: 4, lr: 5.00e-03 +2024-08-06 10:35:00,245 INFO [trainer.py:765] (0/8) Epoch 17, batch 2200, train_loss[loss=3.462, NarTop10Accuracy=0.626, over 7251.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6225, over 6043.42 frames. ], batch size: 31, lr: 4.99e-03 +2024-08-06 10:35:25,732 INFO [trainer.py:765] (0/8) Epoch 17, batch 2300, train_loss[loss=3.174, NarTop10Accuracy=0.6803, over 5822.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6208, over 6073.63 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 10:35:50,526 INFO [trainer.py:765] (0/8) Epoch 17, batch 2400, train_loss[loss=3.817, NarTop10Accuracy=0.5418, over 6006.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6182, over 5893.09 frames. ], batch size: 48, lr: 4.98e-03 +2024-08-06 10:36:14,105 INFO [trainer.py:765] (0/8) Epoch 17, batch 2500, train_loss[loss=3.472, NarTop10Accuracy=0.6091, over 5018.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6224, over 5563.06 frames. ], batch size: 6, lr: 4.98e-03 +2024-08-06 10:36:35,891 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 10:36:35,894 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-17.pt +2024-08-06 10:37:32,052 INFO [trainer.py:765] (0/8) Epoch 18, batch 100, train_loss[loss=3.262, NarTop10Accuracy=0.6699, over 7516.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6318, over 2383.57 frames. ], batch size: 30, lr: 4.83e-03 +2024-08-06 10:37:39,162 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-44000.pt +2024-08-06 10:37:42,569 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 10:37:49,085 INFO [trainer.py:811] (0/8) Epoch 18, validation: loss=3.339, NarTop10Accuracy=0.6526, over 1907754.00 frames. +2024-08-06 10:37:49,085 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 10:37:49,685 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.841e+02 1.993e+02 2.161e+02 3.871e+02, threshold=3.985e+02, percent-clipped=0.0 +2024-08-06 10:38:18,145 INFO [trainer.py:765] (0/8) Epoch 18, batch 200, train_loss[loss=3.517, NarTop10Accuracy=0.61, over 6723.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.633, over 3879.98 frames. ], batch size: 17, lr: 4.82e-03 +2024-08-06 10:38:50,199 INFO [trainer.py:765] (0/8) Epoch 18, batch 300, train_loss[loss=3.41, NarTop10Accuracy=0.6343, over 7139.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6351, over 4677.63 frames. ], batch size: 22, lr: 4.81e-03 +2024-08-06 10:39:23,743 INFO [trainer.py:765] (0/8) Epoch 18, batch 400, train_loss[loss=3.476, NarTop10Accuracy=0.6295, over 5119.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6357, over 5127.61 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 10:39:54,103 INFO [trainer.py:765] (0/8) Epoch 18, batch 500, train_loss[loss=3.267, NarTop10Accuracy=0.6854, over 6211.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6364, over 5412.72 frames. ], batch size: 11, lr: 4.80e-03 +2024-08-06 10:40:28,527 INFO [trainer.py:765] (0/8) Epoch 18, batch 600, train_loss[loss=3.565, NarTop10Accuracy=0.6119, over 5777.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6355, over 5671.23 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 10:41:02,143 INFO [trainer.py:765] (0/8) Epoch 18, batch 700, train_loss[loss=3.165, NarTop10Accuracy=0.6882, over 4977.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6322, over 5744.77 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:41:38,519 INFO [trainer.py:765] (0/8) Epoch 18, batch 800, train_loss[loss=3.381, NarTop10Accuracy=0.656, over 5088.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6326, over 5809.16 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:42:12,611 INFO [trainer.py:765] (0/8) Epoch 18, batch 900, train_loss[loss=3.583, NarTop10Accuracy=0.6133, over 6139.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6308, over 5827.63 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:42:46,703 INFO [trainer.py:765] (0/8) Epoch 18, batch 1000, train_loss[loss=3.04, NarTop10Accuracy=0.7079, over 6275.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6312, over 5917.41 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:43:24,183 INFO [trainer.py:765] (0/8) Epoch 18, batch 1100, train_loss[loss=3.832, NarTop10Accuracy=0.5548, over 6851.00 frames. ], tot_loss[loss=3.466, NarTop10Accuracy=0.6246, over 5960.72 frames. ], batch size: 17, lr: 4.77e-03 +2024-08-06 10:43:28,871 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-45000.pt +2024-08-06 10:44:02,363 INFO [trainer.py:765] (0/8) Epoch 18, batch 1200, train_loss[loss=3.447, NarTop10Accuracy=0.616, over 7430.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6273, over 5953.65 frames. ], batch size: 31, lr: 4.77e-03 +2024-08-06 10:44:35,919 INFO [trainer.py:765] (0/8) Epoch 18, batch 1300, train_loss[loss=3.222, NarTop10Accuracy=0.6733, over 5074.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6285, over 6034.54 frames. ], batch size: 6, lr: 4.76e-03 +2024-08-06 10:45:10,238 INFO [trainer.py:765] (0/8) Epoch 18, batch 1400, train_loss[loss=3.506, NarTop10Accuracy=0.6238, over 6010.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6258, over 6052.73 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 10:45:40,976 INFO [trainer.py:765] (0/8) Epoch 18, batch 1500, train_loss[loss=3.797, NarTop10Accuracy=0.5625, over 6089.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6242, over 5980.65 frames. ], batch size: 49, lr: 4.75e-03 +2024-08-06 10:46:09,055 INFO [trainer.py:765] (0/8) Epoch 18, batch 1600, train_loss[loss=3.339, NarTop10Accuracy=0.652, over 7037.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6226, over 5970.39 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 10:46:35,858 INFO [trainer.py:765] (0/8) Epoch 18, batch 1700, train_loss[loss=3.581, NarTop10Accuracy=0.5917, over 6235.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6246, over 5951.66 frames. ], batch size: 13, lr: 4.74e-03 +2024-08-06 10:47:02,438 INFO [trainer.py:765] (0/8) Epoch 18, batch 1800, train_loss[loss=3.524, NarTop10Accuracy=0.6118, over 7181.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6225, over 6020.04 frames. ], batch size: 22, lr: 4.74e-03 +2024-08-06 10:47:29,093 INFO [trainer.py:765] (0/8) Epoch 18, batch 1900, train_loss[loss=3.679, NarTop10Accuracy=0.5866, over 6421.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6217, over 6059.00 frames. ], batch size: 49, lr: 4.73e-03 +2024-08-06 10:47:54,884 INFO [trainer.py:765] (0/8) Epoch 18, batch 2000, train_loss[loss=3.499, NarTop10Accuracy=0.6279, over 6282.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.6211, over 6024.30 frames. ], batch size: 51, lr: 4.73e-03 +2024-08-06 10:48:20,370 INFO [trainer.py:765] (0/8) Epoch 18, batch 2100, train_loss[loss=3.435, NarTop10Accuracy=0.6313, over 3984.00 frames. ], tot_loss[loss=3.48, NarTop10Accuracy=0.6217, over 6006.34 frames. ], batch size: 4, lr: 4.72e-03 +2024-08-06 10:48:24,747 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-46000.pt +2024-08-06 10:48:28,374 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 10:48:35,039 INFO [trainer.py:811] (0/8) Epoch 18, validation: loss=3.307, NarTop10Accuracy=0.6593, over 1907754.00 frames. +2024-08-06 10:48:35,040 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 10:48:35,535 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 1.855e+02 2.003e+02 2.193e+02 3.481e+02, threshold=4.005e+02, percent-clipped=0.0 +2024-08-06 10:48:56,096 INFO [trainer.py:765] (0/8) Epoch 18, batch 2200, train_loss[loss=3.309, NarTop10Accuracy=0.6538, over 7137.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.624, over 6040.26 frames. ], batch size: 30, lr: 4.72e-03 +2024-08-06 10:49:21,521 INFO [trainer.py:765] (0/8) Epoch 18, batch 2300, train_loss[loss=3.298, NarTop10Accuracy=0.6608, over 5732.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6231, over 6082.50 frames. ], batch size: 9, lr: 4.71e-03 +2024-08-06 10:49:46,256 INFO [trainer.py:765] (0/8) Epoch 18, batch 2400, train_loss[loss=3.499, NarTop10Accuracy=0.62, over 6395.00 frames. ], tot_loss[loss=3.466, NarTop10Accuracy=0.6237, over 5899.71 frames. ], batch size: 50, lr: 4.71e-03 +2024-08-06 10:50:09,708 INFO [trainer.py:765] (0/8) Epoch 18, batch 2500, train_loss[loss=3.239, NarTop10Accuracy=0.6769, over 5079.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6267, over 5532.26 frames. ], batch size: 6, lr: 4.70e-03 +2024-08-06 10:50:31,270 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 10:50:31,273 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-18.pt +2024-08-06 10:51:33,564 INFO [trainer.py:765] (0/8) Epoch 19, batch 100, train_loss[loss=3.38, NarTop10Accuracy=0.6538, over 7105.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6395, over 2361.09 frames. ], batch size: 30, lr: 4.57e-03 +2024-08-06 10:52:06,164 INFO [trainer.py:765] (0/8) Epoch 19, batch 200, train_loss[loss=3.632, NarTop10Accuracy=0.5859, over 7006.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6389, over 3860.72 frames. ], batch size: 17, lr: 4.56e-03 +2024-08-06 10:52:40,031 INFO [trainer.py:765] (0/8) Epoch 19, batch 300, train_loss[loss=3.55, NarTop10Accuracy=0.6134, over 7128.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6389, over 4670.97 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 10:53:12,830 INFO [trainer.py:765] (0/8) Epoch 19, batch 400, train_loss[loss=3.177, NarTop10Accuracy=0.6894, over 5026.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6364, over 5131.23 frames. ], batch size: 7, lr: 4.55e-03 +2024-08-06 10:53:45,020 INFO [trainer.py:765] (0/8) Epoch 19, batch 500, train_loss[loss=3.365, NarTop10Accuracy=0.648, over 6234.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.637, over 5398.92 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 10:53:55,100 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-47000.pt +2024-08-06 10:54:18,600 INFO [trainer.py:765] (0/8) Epoch 19, batch 600, train_loss[loss=3.235, NarTop10Accuracy=0.6758, over 5866.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6368, over 5668.87 frames. ], batch size: 9, lr: 4.54e-03 +2024-08-06 10:54:54,112 INFO [trainer.py:765] (0/8) Epoch 19, batch 700, train_loss[loss=3.407, NarTop10Accuracy=0.6418, over 5124.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.634, over 5745.07 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 10:55:29,925 INFO [trainer.py:765] (0/8) Epoch 19, batch 800, train_loss[loss=3.293, NarTop10Accuracy=0.6604, over 5143.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.634, over 5787.54 frames. ], batch size: 6, lr: 4.53e-03 +2024-08-06 10:56:02,239 INFO [trainer.py:765] (0/8) Epoch 19, batch 900, train_loss[loss=3.489, NarTop10Accuracy=0.619, over 6404.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6339, over 5801.49 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 10:56:38,299 INFO [trainer.py:765] (0/8) Epoch 19, batch 1000, train_loss[loss=3.282, NarTop10Accuracy=0.6481, over 6326.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6314, over 5915.69 frames. ], batch size: 13, lr: 4.52e-03 +2024-08-06 10:57:15,188 INFO [trainer.py:765] (0/8) Epoch 19, batch 1100, train_loss[loss=3.363, NarTop10Accuracy=0.6531, over 6766.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6302, over 5941.02 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 10:57:46,665 INFO [trainer.py:765] (0/8) Epoch 19, batch 1200, train_loss[loss=3.272, NarTop10Accuracy=0.664, over 7487.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6306, over 5938.22 frames. ], batch size: 31, lr: 4.51e-03 +2024-08-06 10:58:23,900 INFO [trainer.py:765] (0/8) Epoch 19, batch 1300, train_loss[loss=3.382, NarTop10Accuracy=0.6376, over 5086.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6292, over 6006.31 frames. ], batch size: 6, lr: 4.51e-03 +2024-08-06 10:58:58,028 INFO [trainer.py:765] (0/8) Epoch 19, batch 1400, train_loss[loss=3.356, NarTop10Accuracy=0.6379, over 6107.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.628, over 6025.44 frames. ], batch size: 11, lr: 4.50e-03 +2024-08-06 10:59:30,769 INFO [trainer.py:765] (0/8) Epoch 19, batch 1500, train_loss[loss=3.729, NarTop10Accuracy=0.5803, over 5679.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6276, over 5970.66 frames. ], batch size: 49, lr: 4.50e-03 +2024-08-06 10:59:40,830 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-48000.pt +2024-08-06 10:59:44,391 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 10:59:50,899 INFO [trainer.py:811] (0/8) Epoch 19, validation: loss=3.276, NarTop10Accuracy=0.6653, over 1907754.00 frames. +2024-08-06 10:59:50,899 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 10:59:51,426 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.829e+02 1.984e+02 2.176e+02 3.542e+02, threshold=3.967e+02, percent-clipped=0.0 +2024-08-06 11:00:08,816 INFO [trainer.py:765] (0/8) Epoch 19, batch 1600, train_loss[loss=3.712, NarTop10Accuracy=0.5748, over 7197.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6276, over 5954.39 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:00:35,588 INFO [trainer.py:765] (0/8) Epoch 19, batch 1700, train_loss[loss=3.552, NarTop10Accuracy=0.5953, over 6299.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6258, over 5942.80 frames. ], batch size: 13, lr: 4.49e-03 +2024-08-06 11:01:02,257 INFO [trainer.py:765] (0/8) Epoch 19, batch 1800, train_loss[loss=3.259, NarTop10Accuracy=0.6654, over 6922.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6281, over 6013.37 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:01:28,930 INFO [trainer.py:765] (0/8) Epoch 19, batch 1900, train_loss[loss=3.616, NarTop10Accuracy=0.6008, over 5937.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6258, over 6048.61 frames. ], batch size: 49, lr: 4.48e-03 +2024-08-06 11:01:54,633 INFO [trainer.py:765] (0/8) Epoch 19, batch 2000, train_loss[loss=3.422, NarTop10Accuracy=0.6322, over 5901.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6265, over 6016.63 frames. ], batch size: 49, lr: 4.48e-03 +2024-08-06 11:02:20,186 INFO [trainer.py:765] (0/8) Epoch 19, batch 2100, train_loss[loss=3.031, NarTop10Accuracy=0.6767, over 4762.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6255, over 6004.56 frames. ], batch size: 5, lr: 4.47e-03 +2024-08-06 11:02:45,694 INFO [trainer.py:765] (0/8) Epoch 19, batch 2200, train_loss[loss=3.52, NarTop10Accuracy=0.6153, over 7486.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6245, over 6034.98 frames. ], batch size: 33, lr: 4.47e-03 +2024-08-06 11:03:11,131 INFO [trainer.py:765] (0/8) Epoch 19, batch 2300, train_loss[loss=3.363, NarTop10Accuracy=0.6499, over 5772.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6246, over 6071.89 frames. ], batch size: 9, lr: 4.46e-03 +2024-08-06 11:03:35,950 INFO [trainer.py:765] (0/8) Epoch 19, batch 2400, train_loss[loss=3.61, NarTop10Accuracy=0.5917, over 6172.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6219, over 5912.72 frames. ], batch size: 49, lr: 4.46e-03 +2024-08-06 11:03:59,406 INFO [trainer.py:765] (0/8) Epoch 19, batch 2500, train_loss[loss=3.427, NarTop10Accuracy=0.6271, over 5131.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6286, over 5559.27 frames. ], batch size: 6, lr: 4.45e-03 +2024-08-06 11:04:07,805 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-49000.pt +2024-08-06 11:04:24,321 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 11:04:24,323 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-19.pt +2024-08-06 11:05:26,561 INFO [trainer.py:765] (0/8) Epoch 20, batch 100, train_loss[loss=3.57, NarTop10Accuracy=0.6046, over 7170.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6392, over 2384.52 frames. ], batch size: 30, lr: 4.33e-03 +2024-08-06 11:05:57,409 INFO [trainer.py:765] (0/8) Epoch 20, batch 200, train_loss[loss=3.4, NarTop10Accuracy=0.6418, over 6913.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6387, over 3878.91 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 11:06:30,634 INFO [trainer.py:765] (0/8) Epoch 20, batch 300, train_loss[loss=3.446, NarTop10Accuracy=0.6291, over 7291.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6389, over 4677.75 frames. ], batch size: 22, lr: 4.32e-03 +2024-08-06 11:07:06,396 INFO [trainer.py:765] (0/8) Epoch 20, batch 400, train_loss[loss=3.313, NarTop10Accuracy=0.6551, over 5135.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6402, over 5144.31 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 11:07:38,166 INFO [trainer.py:765] (0/8) Epoch 20, batch 500, train_loss[loss=3.138, NarTop10Accuracy=0.6831, over 6235.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6399, over 5421.23 frames. ], batch size: 11, lr: 4.31e-03 +2024-08-06 11:08:11,568 INFO [trainer.py:765] (0/8) Epoch 20, batch 600, train_loss[loss=3.144, NarTop10Accuracy=0.6769, over 5857.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6407, over 5691.84 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 11:08:46,274 INFO [trainer.py:765] (0/8) Epoch 20, batch 700, train_loss[loss=3.271, NarTop10Accuracy=0.6762, over 4949.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6374, over 5750.22 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 11:09:23,425 INFO [trainer.py:765] (0/8) Epoch 20, batch 800, train_loss[loss=3.192, NarTop10Accuracy=0.6872, over 4293.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6341, over 5789.81 frames. ], batch size: 5, lr: 4.30e-03 +2024-08-06 11:09:53,513 INFO [trainer.py:765] (0/8) Epoch 20, batch 900, train_loss[loss=3.239, NarTop10Accuracy=0.6641, over 6221.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6332, over 5821.22 frames. ], batch size: 13, lr: 4.30e-03 +2024-08-06 11:10:12,198 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-50000.pt +2024-08-06 11:10:16,238 WARNING [checkpoint.py:343] (0/8) No checkpoints found in exp/valle +2024-08-06 11:10:16,239 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 11:10:23,738 INFO [trainer.py:811] (0/8) Epoch 20, validation: loss=3.279, NarTop10Accuracy=0.6658, over 1907754.00 frames. +2024-08-06 11:10:23,739 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30501MB +2024-08-06 11:10:24,298 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.847e+02 2.007e+02 2.180e+02 4.417e+02, threshold=4.013e+02, percent-clipped=0.1 +2024-08-06 11:10:42,965 INFO [trainer.py:765] (0/8) Epoch 20, batch 1000, train_loss[loss=3.176, NarTop10Accuracy=0.6844, over 6667.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6326, over 5917.17 frames. ], batch size: 14, lr: 4.29e-03 +2024-08-06 11:11:21,022 INFO [trainer.py:765] (0/8) Epoch 20, batch 1100, train_loss[loss=3.239, NarTop10Accuracy=0.667, over 6895.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6316, over 5958.43 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 11:11:55,393 INFO [trainer.py:765] (0/8) Epoch 20, batch 1200, train_loss[loss=3.396, NarTop10Accuracy=0.6304, over 7420.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6315, over 5956.42 frames. ], batch size: 31, lr: 4.28e-03 +2024-08-06 11:12:30,751 INFO [trainer.py:765] (0/8) Epoch 20, batch 1300, train_loss[loss=3.48, NarTop10Accuracy=0.6197, over 5126.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6341, over 6021.47 frames. ], batch size: 6, lr: 4.28e-03 +2024-08-06 11:13:10,291 INFO [trainer.py:765] (0/8) Epoch 20, batch 1400, train_loss[loss=3.664, NarTop10Accuracy=0.5908, over 6055.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6299, over 6050.41 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 11:13:38,989 INFO [trainer.py:765] (0/8) Epoch 20, batch 1500, train_loss[loss=3.552, NarTop10Accuracy=0.6029, over 6106.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6288, over 5986.45 frames. ], batch size: 49, lr: 4.27e-03 +2024-08-06 11:14:07,051 INFO [trainer.py:765] (0/8) Epoch 20, batch 1600, train_loss[loss=3.399, NarTop10Accuracy=0.6412, over 7181.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6291, over 5960.06 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 11:14:33,910 INFO [trainer.py:765] (0/8) Epoch 20, batch 1700, train_loss[loss=3.677, NarTop10Accuracy=0.5825, over 6613.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6298, over 5941.84 frames. ], batch size: 14, lr: 4.26e-03 +2024-08-06 11:15:00,590 INFO [trainer.py:765] (0/8) Epoch 20, batch 1800, train_loss[loss=3.347, NarTop10Accuracy=0.6523, over 7109.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6284, over 6014.00 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 11:15:27,276 INFO [trainer.py:765] (0/8) Epoch 20, batch 1900, train_loss[loss=3.641, NarTop10Accuracy=0.5901, over 6478.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6265, over 6044.80 frames. ], batch size: 51, lr: 4.26e-03 +2024-08-06 11:15:41,045 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-51000.pt +2024-08-06 11:15:56,438 INFO [trainer.py:765] (0/8) Epoch 20, batch 2000, train_loss[loss=3.533, NarTop10Accuracy=0.6177, over 5881.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6282, over 6018.35 frames. ], batch size: 49, lr: 4.25e-03 +2024-08-06 11:16:21,957 INFO [trainer.py:765] (0/8) Epoch 20, batch 2100, train_loss[loss=3.102, NarTop10Accuracy=0.7055, over 3844.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.626, over 5997.16 frames. ], batch size: 4, lr: 4.25e-03 +2024-08-06 11:16:47,405 INFO [trainer.py:765] (0/8) Epoch 20, batch 2200, train_loss[loss=3.477, NarTop10Accuracy=0.6204, over 7070.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.6242, over 6046.12 frames. ], batch size: 30, lr: 4.24e-03 +2024-08-06 11:17:12,907 INFO [trainer.py:765] (0/8) Epoch 20, batch 2300, train_loss[loss=3.574, NarTop10Accuracy=0.5969, over 5783.00 frames. ], tot_loss[loss=3.474, NarTop10Accuracy=0.6226, over 6063.83 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 11:17:37,714 INFO [trainer.py:765] (0/8) Epoch 20, batch 2400, train_loss[loss=3.315, NarTop10Accuracy=0.6505, over 5130.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6235, over 5881.42 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 11:18:01,247 INFO [trainer.py:765] (0/8) Epoch 20, batch 2500, train_loss[loss=3.564, NarTop10Accuracy=0.6051, over 5028.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6302, over 5543.41 frames. ], batch size: 6, lr: 4.23e-03 +2024-08-06 11:18:22,262 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 11:18:22,264 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-20.pt +2024-08-06 11:19:21,459 INFO [trainer.py:765] (0/8) Epoch 21, batch 100, train_loss[loss=3.248, NarTop10Accuracy=0.6621, over 7339.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.64, over 2379.39 frames. ], batch size: 32, lr: 4.12e-03 +2024-08-06 11:19:56,522 INFO [trainer.py:765] (0/8) Epoch 21, batch 200, train_loss[loss=3.419, NarTop10Accuracy=0.6452, over 7004.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6395, over 3868.69 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 11:20:26,598 INFO [trainer.py:765] (0/8) Epoch 21, batch 300, train_loss[loss=3.643, NarTop10Accuracy=0.5917, over 7038.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6423, over 4682.70 frames. ], batch size: 22, lr: 4.11e-03 +2024-08-06 11:20:54,240 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-52000.pt +2024-08-06 11:20:57,870 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 11:21:04,970 INFO [trainer.py:811] (0/8) Epoch 21, validation: loss=3.291, NarTop10Accuracy=0.6625, over 1907754.00 frames. +2024-08-06 11:21:04,970 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 11:21:05,486 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 1.858e+02 2.007e+02 2.193e+02 3.729e+02, threshold=4.015e+02, percent-clipped=0.0 +2024-08-06 11:21:12,221 INFO [trainer.py:765] (0/8) Epoch 21, batch 400, train_loss[loss=3.504, NarTop10Accuracy=0.609, over 5166.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6402, over 5128.82 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 11:21:47,569 INFO [trainer.py:765] (0/8) Epoch 21, batch 500, train_loss[loss=3.356, NarTop10Accuracy=0.6553, over 6151.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6412, over 5401.06 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 11:22:18,238 INFO [trainer.py:765] (0/8) Epoch 21, batch 600, train_loss[loss=3.737, NarTop10Accuracy=0.5716, over 5728.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6391, over 5670.42 frames. ], batch size: 9, lr: 4.10e-03 +2024-08-06 11:22:56,843 INFO [trainer.py:765] (0/8) Epoch 21, batch 700, train_loss[loss=3.07, NarTop10Accuracy=0.7053, over 5079.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6368, over 5750.59 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 11:23:33,075 INFO [trainer.py:765] (0/8) Epoch 21, batch 800, train_loss[loss=3.328, NarTop10Accuracy=0.641, over 5085.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6347, over 5817.87 frames. ], batch size: 6, lr: 4.09e-03 +2024-08-06 11:24:03,022 INFO [trainer.py:765] (0/8) Epoch 21, batch 900, train_loss[loss=3.655, NarTop10Accuracy=0.5791, over 6134.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.634, over 5830.86 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 11:24:37,089 INFO [trainer.py:765] (0/8) Epoch 21, batch 1000, train_loss[loss=3.416, NarTop10Accuracy=0.6235, over 6743.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.632, over 5935.27 frames. ], batch size: 14, lr: 4.09e-03 +2024-08-06 11:25:16,429 INFO [trainer.py:765] (0/8) Epoch 21, batch 1100, train_loss[loss=3.661, NarTop10Accuracy=0.5849, over 6898.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6305, over 5958.83 frames. ], batch size: 17, lr: 4.08e-03 +2024-08-06 11:25:47,740 INFO [trainer.py:765] (0/8) Epoch 21, batch 1200, train_loss[loss=3.46, NarTop10Accuracy=0.6329, over 7371.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6339, over 5961.73 frames. ], batch size: 30, lr: 4.08e-03 +2024-08-06 11:26:23,057 INFO [trainer.py:765] (0/8) Epoch 21, batch 1300, train_loss[loss=3.579, NarTop10Accuracy=0.6067, over 5001.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.636, over 6025.63 frames. ], batch size: 6, lr: 4.07e-03 +2024-08-06 11:26:49,588 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-53000.pt +2024-08-06 11:27:00,082 INFO [trainer.py:765] (0/8) Epoch 21, batch 1400, train_loss[loss=3.232, NarTop10Accuracy=0.6695, over 6063.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6335, over 6051.50 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 11:27:35,326 INFO [trainer.py:765] (0/8) Epoch 21, batch 1500, train_loss[loss=3.882, NarTop10Accuracy=0.5382, over 6225.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6309, over 5980.16 frames. ], batch size: 49, lr: 4.07e-03 +2024-08-06 11:28:03,315 INFO [trainer.py:765] (0/8) Epoch 21, batch 1600, train_loss[loss=3.313, NarTop10Accuracy=0.6598, over 7239.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6311, over 5951.56 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 11:28:30,105 INFO [trainer.py:765] (0/8) Epoch 21, batch 1700, train_loss[loss=3.691, NarTop10Accuracy=0.5762, over 6277.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6304, over 5939.83 frames. ], batch size: 13, lr: 4.06e-03 +2024-08-06 11:28:56,641 INFO [trainer.py:765] (0/8) Epoch 21, batch 1800, train_loss[loss=3.446, NarTop10Accuracy=0.6228, over 7209.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6303, over 6010.93 frames. ], batch size: 23, lr: 4.06e-03 +2024-08-06 11:29:23,198 INFO [trainer.py:765] (0/8) Epoch 21, batch 1900, train_loss[loss=3.397, NarTop10Accuracy=0.6472, over 6473.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.629, over 6035.51 frames. ], batch size: 50, lr: 4.05e-03 +2024-08-06 11:29:49,028 INFO [trainer.py:765] (0/8) Epoch 21, batch 2000, train_loss[loss=3.477, NarTop10Accuracy=0.6197, over 5303.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6291, over 6017.95 frames. ], batch size: 49, lr: 4.05e-03 +2024-08-06 11:30:14,529 INFO [trainer.py:765] (0/8) Epoch 21, batch 2100, train_loss[loss=3.358, NarTop10Accuracy=0.6465, over 4752.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6281, over 6002.26 frames. ], batch size: 5, lr: 4.04e-03 +2024-08-06 11:30:39,871 INFO [trainer.py:765] (0/8) Epoch 21, batch 2200, train_loss[loss=3.682, NarTop10Accuracy=0.5935, over 7128.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6281, over 6030.33 frames. ], batch size: 30, lr: 4.04e-03 +2024-08-06 11:31:05,472 INFO [trainer.py:765] (0/8) Epoch 21, batch 2300, train_loss[loss=3.389, NarTop10Accuracy=0.6355, over 5699.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6266, over 6062.14 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 11:31:23,873 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-54000.pt +2024-08-06 11:31:27,713 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 11:31:34,439 INFO [trainer.py:811] (0/8) Epoch 21, validation: loss=3.272, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 11:31:34,439 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 11:31:34,938 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 1.892e+02 2.038e+02 2.210e+02 4.910e+02, threshold=4.076e+02, percent-clipped=0.1 +2024-08-06 11:31:40,753 INFO [trainer.py:765] (0/8) Epoch 21, batch 2400, train_loss[loss=3.276, NarTop10Accuracy=0.6655, over 5193.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6271, over 5873.51 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 11:32:04,057 INFO [trainer.py:765] (0/8) Epoch 21, batch 2500, train_loss[loss=3.357, NarTop10Accuracy=0.6439, over 5065.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6312, over 5530.06 frames. ], batch size: 6, lr: 4.03e-03 +2024-08-06 11:32:25,520 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 11:32:25,523 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-21.pt +2024-08-06 11:33:29,682 INFO [trainer.py:765] (0/8) Epoch 22, batch 100, train_loss[loss=3.721, NarTop10Accuracy=0.5773, over 7314.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6441, over 2374.95 frames. ], batch size: 31, lr: 3.93e-03 +2024-08-06 11:34:05,036 INFO [trainer.py:765] (0/8) Epoch 22, batch 200, train_loss[loss=3.134, NarTop10Accuracy=0.6779, over 6791.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6454, over 3876.84 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 11:34:37,619 INFO [trainer.py:765] (0/8) Epoch 22, batch 300, train_loss[loss=3.199, NarTop10Accuracy=0.6804, over 7117.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6458, over 4663.67 frames. ], batch size: 22, lr: 3.92e-03 +2024-08-06 11:35:09,968 INFO [trainer.py:765] (0/8) Epoch 22, batch 400, train_loss[loss=3.245, NarTop10Accuracy=0.6756, over 5088.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6427, over 5124.95 frames. ], batch size: 7, lr: 3.92e-03 +2024-08-06 11:35:42,508 INFO [trainer.py:765] (0/8) Epoch 22, batch 500, train_loss[loss=3.457, NarTop10Accuracy=0.6376, over 6176.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6412, over 5407.32 frames. ], batch size: 11, lr: 3.91e-03 +2024-08-06 11:36:16,058 INFO [trainer.py:765] (0/8) Epoch 22, batch 600, train_loss[loss=3.138, NarTop10Accuracy=0.7038, over 5700.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6411, over 5669.14 frames. ], batch size: 9, lr: 3.91e-03 +2024-08-06 11:36:53,858 INFO [trainer.py:765] (0/8) Epoch 22, batch 700, train_loss[loss=3.32, NarTop10Accuracy=0.6515, over 4899.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6391, over 5723.82 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 11:37:23,028 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-55000.pt +2024-08-06 11:37:28,480 INFO [trainer.py:765] (0/8) Epoch 22, batch 800, train_loss[loss=3.227, NarTop10Accuracy=0.6677, over 4946.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6366, over 5781.50 frames. ], batch size: 6, lr: 3.90e-03 +2024-08-06 11:38:03,950 INFO [trainer.py:765] (0/8) Epoch 22, batch 900, train_loss[loss=3.187, NarTop10Accuracy=0.6772, over 6371.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6384, over 5796.06 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 11:38:38,329 INFO [trainer.py:765] (0/8) Epoch 22, batch 1000, train_loss[loss=3.192, NarTop10Accuracy=0.6724, over 6231.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6371, over 5903.61 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 11:39:14,788 INFO [trainer.py:765] (0/8) Epoch 22, batch 1100, train_loss[loss=3.444, NarTop10Accuracy=0.622, over 6921.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6359, over 5938.99 frames. ], batch size: 17, lr: 3.89e-03 +2024-08-06 11:39:48,523 INFO [trainer.py:765] (0/8) Epoch 22, batch 1200, train_loss[loss=3.317, NarTop10Accuracy=0.6575, over 7706.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6356, over 5936.77 frames. ], batch size: 31, lr: 3.89e-03 +2024-08-06 11:40:25,246 INFO [trainer.py:765] (0/8) Epoch 22, batch 1300, train_loss[loss=3.002, NarTop10Accuracy=0.7141, over 4993.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6353, over 6001.06 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 11:41:00,610 INFO [trainer.py:765] (0/8) Epoch 22, batch 1400, train_loss[loss=3.404, NarTop10Accuracy=0.6312, over 6043.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6345, over 6011.39 frames. ], batch size: 11, lr: 3.88e-03 +2024-08-06 11:41:31,584 INFO [trainer.py:765] (0/8) Epoch 22, batch 1500, train_loss[loss=3.663, NarTop10Accuracy=0.5872, over 6857.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.633, over 5956.42 frames. ], batch size: 50, lr: 3.88e-03 +2024-08-06 11:41:59,677 INFO [trainer.py:765] (0/8) Epoch 22, batch 1600, train_loss[loss=3.228, NarTop10Accuracy=0.6741, over 7293.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6306, over 5949.19 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 11:42:26,464 INFO [trainer.py:765] (0/8) Epoch 22, batch 1700, train_loss[loss=3.444, NarTop10Accuracy=0.6442, over 6255.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6286, over 5944.36 frames. ], batch size: 13, lr: 3.87e-03 +2024-08-06 11:42:50,722 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-56000.pt +2024-08-06 11:42:54,181 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 11:43:00,818 INFO [trainer.py:811] (0/8) Epoch 22, validation: loss=3.305, NarTop10Accuracy=0.6597, over 1907754.00 frames. +2024-08-06 11:43:00,819 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 11:43:01,327 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.500e+02 1.900e+02 2.042e+02 2.234e+02 3.494e+02, threshold=4.085e+02, percent-clipped=0.0 +2024-08-06 11:43:03,219 INFO [trainer.py:765] (0/8) Epoch 22, batch 1800, train_loss[loss=3.29, NarTop10Accuracy=0.6608, over 7368.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6306, over 6007.89 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 11:43:29,752 INFO [trainer.py:765] (0/8) Epoch 22, batch 1900, train_loss[loss=3.58, NarTop10Accuracy=0.6003, over 6674.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6297, over 6039.26 frames. ], batch size: 49, lr: 3.87e-03 +2024-08-06 11:43:55,485 INFO [trainer.py:765] (0/8) Epoch 22, batch 2000, train_loss[loss=3.787, NarTop10Accuracy=0.5563, over 6575.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6313, over 6019.97 frames. ], batch size: 49, lr: 3.86e-03 +2024-08-06 11:44:20,932 INFO [trainer.py:765] (0/8) Epoch 22, batch 2100, train_loss[loss=2.986, NarTop10Accuracy=0.7, over 4955.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6314, over 5990.34 frames. ], batch size: 5, lr: 3.86e-03 +2024-08-06 11:44:46,456 INFO [trainer.py:765] (0/8) Epoch 22, batch 2200, train_loss[loss=3.837, NarTop10Accuracy=0.5452, over 7035.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6327, over 6020.57 frames. ], batch size: 30, lr: 3.86e-03 +2024-08-06 11:45:11,882 INFO [trainer.py:765] (0/8) Epoch 22, batch 2300, train_loss[loss=3.288, NarTop10Accuracy=0.6568, over 5819.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6321, over 6060.75 frames. ], batch size: 9, lr: 3.85e-03 +2024-08-06 11:45:36,583 INFO [trainer.py:765] (0/8) Epoch 22, batch 2400, train_loss[loss=3.599, NarTop10Accuracy=0.6004, over 6093.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6284, over 5875.54 frames. ], batch size: 48, lr: 3.85e-03 +2024-08-06 11:46:00,081 INFO [trainer.py:765] (0/8) Epoch 22, batch 2500, train_loss[loss=3.13, NarTop10Accuracy=0.6867, over 5043.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6326, over 5543.89 frames. ], batch size: 6, lr: 3.85e-03 +2024-08-06 11:46:21,449 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 11:46:21,452 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-22.pt +2024-08-06 11:47:20,476 INFO [trainer.py:765] (0/8) Epoch 23, batch 100, train_loss[loss=3.234, NarTop10Accuracy=0.6765, over 7156.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6459, over 2370.22 frames. ], batch size: 31, lr: 3.75e-03 +2024-08-06 11:47:52,035 INFO [trainer.py:765] (0/8) Epoch 23, batch 200, train_loss[loss=3.46, NarTop10Accuracy=0.6322, over 6843.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.649, over 3879.56 frames. ], batch size: 17, lr: 3.75e-03 +2024-08-06 11:47:54,493 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-57000.pt +2024-08-06 11:48:33,921 INFO [trainer.py:765] (0/8) Epoch 23, batch 300, train_loss[loss=3.322, NarTop10Accuracy=0.6354, over 7175.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6452, over 4697.29 frames. ], batch size: 22, lr: 3.75e-03 +2024-08-06 11:49:06,656 INFO [trainer.py:765] (0/8) Epoch 23, batch 400, train_loss[loss=3.253, NarTop10Accuracy=0.6755, over 5208.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.644, over 5135.07 frames. ], batch size: 7, lr: 3.74e-03 +2024-08-06 11:49:37,619 INFO [trainer.py:765] (0/8) Epoch 23, batch 500, train_loss[loss=3.601, NarTop10Accuracy=0.6016, over 6186.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6435, over 5424.59 frames. ], batch size: 11, lr: 3.74e-03 +2024-08-06 11:50:06,740 INFO [trainer.py:765] (0/8) Epoch 23, batch 600, train_loss[loss=3.617, NarTop10Accuracy=0.5901, over 5731.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6446, over 5681.99 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 11:50:47,601 INFO [trainer.py:765] (0/8) Epoch 23, batch 700, train_loss[loss=3.249, NarTop10Accuracy=0.6673, over 4987.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6429, over 5729.50 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:21,344 INFO [trainer.py:765] (0/8) Epoch 23, batch 800, train_loss[loss=3.39, NarTop10Accuracy=0.642, over 5046.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6399, over 5784.69 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:52,396 INFO [trainer.py:765] (0/8) Epoch 23, batch 900, train_loss[loss=3.334, NarTop10Accuracy=0.6488, over 6143.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6413, over 5803.84 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 11:52:33,917 INFO [trainer.py:765] (0/8) Epoch 23, batch 1000, train_loss[loss=3.172, NarTop10Accuracy=0.6754, over 6744.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6394, over 5906.80 frames. ], batch size: 14, lr: 3.73e-03 +2024-08-06 11:53:08,608 INFO [trainer.py:765] (0/8) Epoch 23, batch 1100, train_loss[loss=3.631, NarTop10Accuracy=0.5899, over 7020.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6357, over 5941.97 frames. ], batch size: 17, lr: 3.72e-03 +2024-08-06 11:53:40,339 INFO [trainer.py:765] (0/8) Epoch 23, batch 1200, train_loss[loss=3.434, NarTop10Accuracy=0.6299, over 7183.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6331, over 5947.61 frames. ], batch size: 31, lr: 3.72e-03 +2024-08-06 11:53:42,823 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-58000.pt +2024-08-06 11:53:46,299 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 11:53:53,935 INFO [trainer.py:811] (0/8) Epoch 23, validation: loss=3.236, NarTop10Accuracy=0.6739, over 1907754.00 frames. +2024-08-06 11:53:53,935 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 11:53:54,457 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.901e+02 2.047e+02 2.234e+02 4.368e+02, threshold=4.093e+02, percent-clipped=0.1 +2024-08-06 11:54:30,447 INFO [trainer.py:765] (0/8) Epoch 23, batch 1300, train_loss[loss=3.343, NarTop10Accuracy=0.6414, over 4945.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6343, over 6014.42 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 11:55:04,197 INFO [trainer.py:765] (0/8) Epoch 23, batch 1400, train_loss[loss=3.397, NarTop10Accuracy=0.6363, over 6184.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6344, over 6038.82 frames. ], batch size: 11, lr: 3.71e-03 +2024-08-06 11:55:35,397 INFO [trainer.py:765] (0/8) Epoch 23, batch 1500, train_loss[loss=3.597, NarTop10Accuracy=0.6022, over 6144.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6327, over 5977.67 frames. ], batch size: 49, lr: 3.71e-03 +2024-08-06 11:56:03,427 INFO [trainer.py:765] (0/8) Epoch 23, batch 1600, train_loss[loss=3.509, NarTop10Accuracy=0.6345, over 7093.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6348, over 5969.81 frames. ], batch size: 22, lr: 3.71e-03 +2024-08-06 11:56:30,202 INFO [trainer.py:765] (0/8) Epoch 23, batch 1700, train_loss[loss=3.439, NarTop10Accuracy=0.6223, over 6316.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6303, over 5953.36 frames. ], batch size: 13, lr: 3.70e-03 +2024-08-06 11:56:56,968 INFO [trainer.py:765] (0/8) Epoch 23, batch 1800, train_loss[loss=3.366, NarTop10Accuracy=0.6486, over 7008.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.631, over 6007.52 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 11:57:23,596 INFO [trainer.py:765] (0/8) Epoch 23, batch 1900, train_loss[loss=3.544, NarTop10Accuracy=0.6109, over 6092.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6306, over 6049.70 frames. ], batch size: 49, lr: 3.70e-03 +2024-08-06 11:57:49,250 INFO [trainer.py:765] (0/8) Epoch 23, batch 2000, train_loss[loss=3.694, NarTop10Accuracy=0.5782, over 6383.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6312, over 6015.71 frames. ], batch size: 49, lr: 3.69e-03 +2024-08-06 11:58:14,769 INFO [trainer.py:765] (0/8) Epoch 23, batch 2100, train_loss[loss=3.686, NarTop10Accuracy=0.5756, over 3989.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6326, over 5990.69 frames. ], batch size: 4, lr: 3.69e-03 +2024-08-06 11:58:40,237 INFO [trainer.py:765] (0/8) Epoch 23, batch 2200, train_loss[loss=3.721, NarTop10Accuracy=0.5829, over 7521.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6336, over 6029.78 frames. ], batch size: 31, lr: 3.69e-03 +2024-08-06 11:58:42,492 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-59000.pt +2024-08-06 11:59:08,915 INFO [trainer.py:765] (0/8) Epoch 23, batch 2300, train_loss[loss=3.402, NarTop10Accuracy=0.632, over 5740.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6312, over 6062.11 frames. ], batch size: 9, lr: 3.68e-03 +2024-08-06 11:59:33,601 INFO [trainer.py:765] (0/8) Epoch 23, batch 2400, train_loss[loss=3.219, NarTop10Accuracy=0.6564, over 5134.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6292, over 5881.67 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 11:59:57,010 INFO [trainer.py:765] (0/8) Epoch 23, batch 2500, train_loss[loss=3.534, NarTop10Accuracy=0.6211, over 5079.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6337, over 5536.53 frames. ], batch size: 6, lr: 3.68e-03 +2024-08-06 12:00:17,805 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 12:00:17,808 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-23.pt +2024-08-06 12:01:22,110 INFO [trainer.py:765] (0/8) Epoch 24, batch 100, train_loss[loss=3.572, NarTop10Accuracy=0.6065, over 7255.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6404, over 2376.62 frames. ], batch size: 30, lr: 3.59e-03 +2024-08-06 12:01:51,341 INFO [trainer.py:765] (0/8) Epoch 24, batch 200, train_loss[loss=3.511, NarTop10Accuracy=0.6158, over 6920.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6445, over 3866.49 frames. ], batch size: 17, lr: 3.59e-03 +2024-08-06 12:02:23,512 INFO [trainer.py:765] (0/8) Epoch 24, batch 300, train_loss[loss=3.239, NarTop10Accuracy=0.6727, over 7072.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6457, over 4683.27 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 12:03:02,846 INFO [trainer.py:765] (0/8) Epoch 24, batch 400, train_loss[loss=3.071, NarTop10Accuracy=0.6993, over 5173.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6454, over 5146.66 frames. ], batch size: 7, lr: 3.59e-03 +2024-08-06 12:03:31,256 INFO [trainer.py:765] (0/8) Epoch 24, batch 500, train_loss[loss=3.156, NarTop10Accuracy=0.6839, over 6130.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6453, over 5420.38 frames. ], batch size: 11, lr: 3.58e-03 +2024-08-06 12:04:00,173 INFO [trainer.py:765] (0/8) Epoch 24, batch 600, train_loss[loss=3.71, NarTop10Accuracy=0.5816, over 5751.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6439, over 5687.52 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 12:04:12,530 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-60000.pt +2024-08-06 12:04:16,120 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 12:04:22,775 INFO [trainer.py:811] (0/8) Epoch 24, validation: loss=3.282, NarTop10Accuracy=0.6644, over 1907754.00 frames. +2024-08-06 12:04:22,775 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 12:04:23,311 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 1.905e+02 2.071e+02 2.258e+02 3.709e+02, threshold=4.142e+02, percent-clipped=0.0 +2024-08-06 12:04:51,733 INFO [trainer.py:765] (0/8) Epoch 24, batch 700, train_loss[loss=3.198, NarTop10Accuracy=0.6745, over 4920.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6426, over 5748.50 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 12:05:21,275 INFO [trainer.py:765] (0/8) Epoch 24, batch 800, train_loss[loss=3.489, NarTop10Accuracy=0.6297, over 5147.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6405, over 5798.43 frames. ], batch size: 6, lr: 3.57e-03 +2024-08-06 12:05:51,754 INFO [trainer.py:765] (0/8) Epoch 24, batch 900, train_loss[loss=3.55, NarTop10Accuracy=0.606, over 6273.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6411, over 5806.87 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 12:06:32,813 INFO [trainer.py:765] (0/8) Epoch 24, batch 1000, train_loss[loss=3.109, NarTop10Accuracy=0.7055, over 6309.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6395, over 5905.81 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 12:07:09,040 INFO [trainer.py:765] (0/8) Epoch 24, batch 1100, train_loss[loss=3.365, NarTop10Accuracy=0.6467, over 6800.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6381, over 5944.11 frames. ], batch size: 17, lr: 3.56e-03 +2024-08-06 12:07:38,135 INFO [trainer.py:765] (0/8) Epoch 24, batch 1200, train_loss[loss=3.367, NarTop10Accuracy=0.6449, over 7076.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6372, over 5942.14 frames. ], batch size: 30, lr: 3.56e-03 +2024-08-06 12:08:20,732 INFO [trainer.py:765] (0/8) Epoch 24, batch 1300, train_loss[loss=2.934, NarTop10Accuracy=0.7239, over 4987.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6377, over 6006.03 frames. ], batch size: 6, lr: 3.56e-03 +2024-08-06 12:08:56,066 INFO [trainer.py:765] (0/8) Epoch 24, batch 1400, train_loss[loss=3.23, NarTop10Accuracy=0.6597, over 6302.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6347, over 6026.26 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 12:09:24,343 INFO [trainer.py:765] (0/8) Epoch 24, batch 1500, train_loss[loss=3.669, NarTop10Accuracy=0.5855, over 6320.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6337, over 5961.32 frames. ], batch size: 49, lr: 3.55e-03 +2024-08-06 12:09:52,525 INFO [trainer.py:765] (0/8) Epoch 24, batch 1600, train_loss[loss=3.292, NarTop10Accuracy=0.6592, over 7095.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.635, over 5943.75 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 12:10:00,006 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-61000.pt +2024-08-06 12:10:22,546 INFO [trainer.py:765] (0/8) Epoch 24, batch 1700, train_loss[loss=3.668, NarTop10Accuracy=0.5776, over 6313.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6328, over 5928.34 frames. ], batch size: 13, lr: 3.55e-03 +2024-08-06 12:10:49,273 INFO [trainer.py:765] (0/8) Epoch 24, batch 1800, train_loss[loss=3.347, NarTop10Accuracy=0.6509, over 7089.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6343, over 6006.83 frames. ], batch size: 22, lr: 3.54e-03 +2024-08-06 12:11:15,847 INFO [trainer.py:765] (0/8) Epoch 24, batch 1900, train_loss[loss=3.523, NarTop10Accuracy=0.6194, over 5422.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6335, over 6029.65 frames. ], batch size: 48, lr: 3.54e-03 +2024-08-06 12:11:41,667 INFO [trainer.py:765] (0/8) Epoch 24, batch 2000, train_loss[loss=3.4, NarTop10Accuracy=0.6412, over 6079.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6347, over 6008.55 frames. ], batch size: 48, lr: 3.54e-03 +2024-08-06 12:12:07,104 INFO [trainer.py:765] (0/8) Epoch 24, batch 2100, train_loss[loss=3.2, NarTop10Accuracy=0.6913, over 3955.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6359, over 5987.42 frames. ], batch size: 4, lr: 3.54e-03 +2024-08-06 12:12:33,373 INFO [trainer.py:765] (0/8) Epoch 24, batch 2200, train_loss[loss=3.484, NarTop10Accuracy=0.6189, over 7429.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6357, over 6032.35 frames. ], batch size: 31, lr: 3.53e-03 +2024-08-06 12:12:58,773 INFO [trainer.py:765] (0/8) Epoch 24, batch 2300, train_loss[loss=3.661, NarTop10Accuracy=0.5825, over 5726.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6317, over 6060.25 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 12:13:23,488 INFO [trainer.py:765] (0/8) Epoch 24, batch 2400, train_loss[loss=3.602, NarTop10Accuracy=0.5975, over 6204.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6301, over 5869.15 frames. ], batch size: 49, lr: 3.53e-03 +2024-08-06 12:13:47,006 INFO [trainer.py:765] (0/8) Epoch 24, batch 2500, train_loss[loss=3.473, NarTop10Accuracy=0.6325, over 4344.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6365, over 5518.77 frames. ], batch size: 5, lr: 3.52e-03 +2024-08-06 12:14:08,384 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 12:14:08,388 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-24.pt +2024-08-06 12:14:50,196 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-62000.pt +2024-08-06 12:14:53,735 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 12:15:00,657 INFO [trainer.py:811] (0/8) Epoch 25, validation: loss=3.279, NarTop10Accuracy=0.6656, over 1907754.00 frames. +2024-08-06 12:15:00,658 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 12:15:01,363 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.921e+02 2.068e+02 2.276e+02 6.228e+02, threshold=4.136e+02, percent-clipped=0.3 +2024-08-06 12:15:17,917 INFO [trainer.py:765] (0/8) Epoch 25, batch 100, train_loss[loss=3.175, NarTop10Accuracy=0.682, over 7596.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6472, over 2368.20 frames. ], batch size: 31, lr: 3.45e-03 +2024-08-06 12:15:53,499 INFO [trainer.py:765] (0/8) Epoch 25, batch 200, train_loss[loss=3.254, NarTop10Accuracy=0.6574, over 6736.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6468, over 3866.68 frames. ], batch size: 17, lr: 3.44e-03 +2024-08-06 12:16:23,596 INFO [trainer.py:765] (0/8) Epoch 25, batch 300, train_loss[loss=3.413, NarTop10Accuracy=0.6361, over 7201.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6466, over 4674.95 frames. ], batch size: 22, lr: 3.44e-03 +2024-08-06 12:16:59,163 INFO [trainer.py:765] (0/8) Epoch 25, batch 400, train_loss[loss=3.491, NarTop10Accuracy=0.6199, over 5014.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.646, over 5145.83 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 12:17:32,096 INFO [trainer.py:765] (0/8) Epoch 25, batch 500, train_loss[loss=3.289, NarTop10Accuracy=0.6564, over 6148.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6475, over 5416.46 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 12:18:05,181 INFO [trainer.py:765] (0/8) Epoch 25, batch 600, train_loss[loss=3.236, NarTop10Accuracy=0.6757, over 5904.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.648, over 5679.35 frames. ], batch size: 9, lr: 3.43e-03 +2024-08-06 12:18:39,598 INFO [trainer.py:765] (0/8) Epoch 25, batch 700, train_loss[loss=3.18, NarTop10Accuracy=0.6864, over 5075.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6465, over 5740.40 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 12:19:16,015 INFO [trainer.py:765] (0/8) Epoch 25, batch 800, train_loss[loss=3.039, NarTop10Accuracy=0.7044, over 5054.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6444, over 5813.15 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 12:19:49,558 INFO [trainer.py:765] (0/8) Epoch 25, batch 900, train_loss[loss=3.128, NarTop10Accuracy=0.6871, over 6783.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6419, over 5842.67 frames. ], batch size: 14, lr: 3.43e-03 +2024-08-06 12:20:23,876 INFO [trainer.py:765] (0/8) Epoch 25, batch 1000, train_loss[loss=3.291, NarTop10Accuracy=0.6531, over 6567.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6409, over 5923.10 frames. ], batch size: 14, lr: 3.42e-03 +2024-08-06 12:20:40,427 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-63000.pt +2024-08-06 12:21:01,915 INFO [trainer.py:765] (0/8) Epoch 25, batch 1100, train_loss[loss=3.252, NarTop10Accuracy=0.6679, over 6903.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6384, over 5951.97 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 12:21:40,638 INFO [trainer.py:765] (0/8) Epoch 25, batch 1200, train_loss[loss=3.361, NarTop10Accuracy=0.6471, over 7144.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6416, over 5942.89 frames. ], batch size: 30, lr: 3.42e-03 +2024-08-06 12:22:11,838 INFO [trainer.py:765] (0/8) Epoch 25, batch 1300, train_loss[loss=3.506, NarTop10Accuracy=0.6173, over 5256.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6416, over 6008.06 frames. ], batch size: 6, lr: 3.41e-03 +2024-08-06 12:22:48,550 INFO [trainer.py:765] (0/8) Epoch 25, batch 1400, train_loss[loss=3.675, NarTop10Accuracy=0.5732, over 6245.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6399, over 6023.47 frames. ], batch size: 11, lr: 3.41e-03 +2024-08-06 12:23:21,655 INFO [trainer.py:765] (0/8) Epoch 25, batch 1500, train_loss[loss=3.794, NarTop10Accuracy=0.5587, over 6209.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6391, over 5957.19 frames. ], batch size: 50, lr: 3.41e-03 +2024-08-06 12:23:49,717 INFO [trainer.py:765] (0/8) Epoch 25, batch 1600, train_loss[loss=3.219, NarTop10Accuracy=0.6739, over 7142.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6372, over 5943.12 frames. ], batch size: 22, lr: 3.41e-03 +2024-08-06 12:24:16,373 INFO [trainer.py:765] (0/8) Epoch 25, batch 1700, train_loss[loss=3.579, NarTop10Accuracy=0.5962, over 6340.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6343, over 5929.72 frames. ], batch size: 13, lr: 3.40e-03 +2024-08-06 12:24:43,092 INFO [trainer.py:765] (0/8) Epoch 25, batch 1800, train_loss[loss=3.304, NarTop10Accuracy=0.6572, over 7249.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6346, over 5992.52 frames. ], batch size: 22, lr: 3.40e-03 +2024-08-06 12:25:09,776 INFO [trainer.py:765] (0/8) Epoch 25, batch 1900, train_loss[loss=3.558, NarTop10Accuracy=0.6057, over 5963.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6301, over 6027.94 frames. ], batch size: 49, lr: 3.40e-03 +2024-08-06 12:25:35,710 INFO [trainer.py:765] (0/8) Epoch 25, batch 2000, train_loss[loss=3.551, NarTop10Accuracy=0.6098, over 6046.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6356, over 6021.79 frames. ], batch size: 49, lr: 3.40e-03 +2024-08-06 12:25:47,854 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-64000.pt +2024-08-06 12:25:52,168 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 12:25:58,846 INFO [trainer.py:811] (0/8) Epoch 25, validation: loss=3.265, NarTop10Accuracy=0.667, over 1907754.00 frames. +2024-08-06 12:25:58,847 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 12:25:59,344 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 1.947e+02 2.092e+02 2.280e+02 8.190e+02, threshold=4.185e+02, percent-clipped=0.2 +2024-08-06 12:26:12,224 INFO [trainer.py:765] (0/8) Epoch 25, batch 2100, train_loss[loss=3.59, NarTop10Accuracy=0.6045, over 3937.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6354, over 5997.51 frames. ], batch size: 4, lr: 3.39e-03 +2024-08-06 12:26:37,833 INFO [trainer.py:765] (0/8) Epoch 25, batch 2200, train_loss[loss=3.536, NarTop10Accuracy=0.6127, over 7316.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6367, over 6036.51 frames. ], batch size: 31, lr: 3.39e-03 +2024-08-06 12:27:03,344 INFO [trainer.py:765] (0/8) Epoch 25, batch 2300, train_loss[loss=3.411, NarTop10Accuracy=0.6307, over 5956.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6351, over 6062.71 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 12:27:28,150 INFO [trainer.py:765] (0/8) Epoch 25, batch 2400, train_loss[loss=3.548, NarTop10Accuracy=0.6011, over 6181.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6326, over 5877.93 frames. ], batch size: 50, lr: 3.39e-03 +2024-08-06 12:27:51,732 INFO [trainer.py:765] (0/8) Epoch 25, batch 2500, train_loss[loss=3.349, NarTop10Accuracy=0.6482, over 4943.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6365, over 5535.07 frames. ], batch size: 6, lr: 3.38e-03 +2024-08-06 12:28:12,990 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 12:28:12,992 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-25.pt +2024-08-06 12:29:08,881 INFO [trainer.py:765] (0/8) Epoch 26, batch 100, train_loss[loss=3.59, NarTop10Accuracy=0.6065, over 7072.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6519, over 2370.30 frames. ], batch size: 30, lr: 3.31e-03 +2024-08-06 12:29:44,319 INFO [trainer.py:765] (0/8) Epoch 26, batch 200, train_loss[loss=3.155, NarTop10Accuracy=0.6819, over 6964.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6495, over 3865.67 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 12:30:19,754 INFO [trainer.py:765] (0/8) Epoch 26, batch 300, train_loss[loss=3.222, NarTop10Accuracy=0.6702, over 6967.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6472, over 4666.91 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 12:30:52,510 INFO [trainer.py:765] (0/8) Epoch 26, batch 400, train_loss[loss=3.031, NarTop10Accuracy=0.7061, over 5159.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6483, over 5128.15 frames. ], batch size: 7, lr: 3.30e-03 +2024-08-06 12:31:11,050 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-65000.pt +2024-08-06 12:31:26,531 INFO [trainer.py:765] (0/8) Epoch 26, batch 500, train_loss[loss=3.356, NarTop10Accuracy=0.6521, over 6111.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6479, over 5416.03 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 12:31:59,782 INFO [trainer.py:765] (0/8) Epoch 26, batch 600, train_loss[loss=3.536, NarTop10Accuracy=0.6105, over 5730.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.645, over 5673.50 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 12:32:36,967 INFO [trainer.py:765] (0/8) Epoch 26, batch 700, train_loss[loss=3.292, NarTop10Accuracy=0.6623, over 5004.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6444, over 5741.30 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 12:33:10,809 INFO [trainer.py:765] (0/8) Epoch 26, batch 800, train_loss[loss=3.399, NarTop10Accuracy=0.6487, over 5086.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6434, over 5806.52 frames. ], batch size: 6, lr: 3.29e-03 +2024-08-06 12:33:46,257 INFO [trainer.py:765] (0/8) Epoch 26, batch 900, train_loss[loss=3.387, NarTop10Accuracy=0.6358, over 6281.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6403, over 5802.97 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 12:34:22,902 INFO [trainer.py:765] (0/8) Epoch 26, batch 1000, train_loss[loss=3.246, NarTop10Accuracy=0.6637, over 6331.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6406, over 5916.79 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 12:34:57,798 INFO [trainer.py:765] (0/8) Epoch 26, batch 1100, train_loss[loss=3.29, NarTop10Accuracy=0.6586, over 6893.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6419, over 5958.16 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 12:35:31,893 INFO [trainer.py:765] (0/8) Epoch 26, batch 1200, train_loss[loss=3.245, NarTop10Accuracy=0.6622, over 7381.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6436, over 5952.05 frames. ], batch size: 31, lr: 3.28e-03 +2024-08-06 12:36:10,658 INFO [trainer.py:765] (0/8) Epoch 26, batch 1300, train_loss[loss=3.382, NarTop10Accuracy=0.6373, over 5191.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6436, over 6024.13 frames. ], batch size: 6, lr: 3.28e-03 +2024-08-06 12:36:44,564 INFO [trainer.py:765] (0/8) Epoch 26, batch 1400, train_loss[loss=3.28, NarTop10Accuracy=0.6603, over 6104.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6414, over 6033.83 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 12:37:03,593 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-66000.pt +2024-08-06 12:37:06,979 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 12:37:13,567 INFO [trainer.py:811] (0/8) Epoch 26, validation: loss=3.231, NarTop10Accuracy=0.6753, over 1907754.00 frames. +2024-08-06 12:37:13,568 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 12:37:14,078 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.928e+02 2.102e+02 2.299e+02 4.602e+02, threshold=4.203e+02, percent-clipped=0.2 +2024-08-06 12:37:23,028 INFO [trainer.py:765] (0/8) Epoch 26, batch 1500, train_loss[loss=3.621, NarTop10Accuracy=0.5993, over 5962.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6423, over 5969.17 frames. ], batch size: 49, lr: 3.28e-03 +2024-08-06 12:37:51,061 INFO [trainer.py:765] (0/8) Epoch 26, batch 1600, train_loss[loss=3.307, NarTop10Accuracy=0.6505, over 7184.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6433, over 5964.51 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:38:17,854 INFO [trainer.py:765] (0/8) Epoch 26, batch 1700, train_loss[loss=3.381, NarTop10Accuracy=0.6425, over 6215.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6389, over 5949.93 frames. ], batch size: 13, lr: 3.27e-03 +2024-08-06 12:38:44,384 INFO [trainer.py:765] (0/8) Epoch 26, batch 1800, train_loss[loss=3.127, NarTop10Accuracy=0.687, over 7197.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6381, over 6010.34 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:39:10,952 INFO [trainer.py:765] (0/8) Epoch 26, batch 1900, train_loss[loss=3.663, NarTop10Accuracy=0.5815, over 5950.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6355, over 6058.80 frames. ], batch size: 49, lr: 3.27e-03 +2024-08-06 12:39:36,610 INFO [trainer.py:765] (0/8) Epoch 26, batch 2000, train_loss[loss=3.537, NarTop10Accuracy=0.6101, over 5854.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6353, over 6004.51 frames. ], batch size: 49, lr: 3.26e-03 +2024-08-06 12:40:02,148 INFO [trainer.py:765] (0/8) Epoch 26, batch 2100, train_loss[loss=3.054, NarTop10Accuracy=0.6994, over 4798.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6384, over 5977.94 frames. ], batch size: 5, lr: 3.26e-03 +2024-08-06 12:40:27,759 INFO [trainer.py:765] (0/8) Epoch 26, batch 2200, train_loss[loss=3.341, NarTop10Accuracy=0.6507, over 7283.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.638, over 6034.43 frames. ], batch size: 30, lr: 3.26e-03 +2024-08-06 12:40:53,233 INFO [trainer.py:765] (0/8) Epoch 26, batch 2300, train_loss[loss=3.278, NarTop10Accuracy=0.6696, over 5823.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6362, over 6069.50 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 12:41:17,931 INFO [trainer.py:765] (0/8) Epoch 26, batch 2400, train_loss[loss=3.386, NarTop10Accuracy=0.6408, over 6094.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6347, over 5873.29 frames. ], batch size: 49, lr: 3.25e-03 +2024-08-06 12:41:33,149 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-67000.pt +2024-08-06 12:41:44,478 INFO [trainer.py:765] (0/8) Epoch 26, batch 2500, train_loss[loss=3.235, NarTop10Accuracy=0.6787, over 5212.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6404, over 5526.74 frames. ], batch size: 6, lr: 3.25e-03 +2024-08-06 12:42:05,393 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 12:42:05,396 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-26.pt +2024-08-06 12:43:12,533 INFO [trainer.py:765] (0/8) Epoch 27, batch 100, train_loss[loss=3.536, NarTop10Accuracy=0.6073, over 7347.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6496, over 2374.12 frames. ], batch size: 31, lr: 3.19e-03 +2024-08-06 12:43:43,576 INFO [trainer.py:765] (0/8) Epoch 27, batch 200, train_loss[loss=3.621, NarTop10Accuracy=0.5893, over 6757.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6498, over 3870.67 frames. ], batch size: 17, lr: 3.18e-03 +2024-08-06 12:44:13,786 INFO [trainer.py:765] (0/8) Epoch 27, batch 300, train_loss[loss=3.136, NarTop10Accuracy=0.6928, over 7135.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6524, over 4682.63 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 12:44:50,461 INFO [trainer.py:765] (0/8) Epoch 27, batch 400, train_loss[loss=3.169, NarTop10Accuracy=0.688, over 5094.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6523, over 5141.44 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 12:45:20,670 INFO [trainer.py:765] (0/8) Epoch 27, batch 500, train_loss[loss=3.283, NarTop10Accuracy=0.6601, over 6126.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6521, over 5411.59 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 12:45:55,260 INFO [trainer.py:765] (0/8) Epoch 27, batch 600, train_loss[loss=3.255, NarTop10Accuracy=0.6731, over 5782.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6489, over 5676.63 frames. ], batch size: 9, lr: 3.17e-03 +2024-08-06 12:46:26,747 INFO [trainer.py:765] (0/8) Epoch 27, batch 700, train_loss[loss=3.567, NarTop10Accuracy=0.6038, over 4876.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6471, over 5744.84 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:05,016 INFO [trainer.py:765] (0/8) Epoch 27, batch 800, train_loss[loss=3.213, NarTop10Accuracy=0.677, over 5124.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6459, over 5805.60 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:32,741 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-68000.pt +2024-08-06 12:47:36,212 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 12:47:42,765 INFO [trainer.py:811] (0/8) Epoch 27, validation: loss=3.258, NarTop10Accuracy=0.6695, over 1907754.00 frames. +2024-08-06 12:47:42,766 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 12:47:43,335 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 1.939e+02 2.100e+02 2.298e+02 4.859e+02, threshold=4.201e+02, percent-clipped=0.2 +2024-08-06 12:47:47,259 INFO [trainer.py:765] (0/8) Epoch 27, batch 900, train_loss[loss=3.36, NarTop10Accuracy=0.6528, over 6199.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6458, over 5807.90 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 12:48:22,862 INFO [trainer.py:765] (0/8) Epoch 27, batch 1000, train_loss[loss=3.252, NarTop10Accuracy=0.6626, over 6310.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6453, over 5919.09 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 12:48:58,085 INFO [trainer.py:765] (0/8) Epoch 27, batch 1100, train_loss[loss=3.666, NarTop10Accuracy=0.5907, over 6960.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6436, over 5955.92 frames. ], batch size: 17, lr: 3.16e-03 +2024-08-06 12:49:34,896 INFO [trainer.py:765] (0/8) Epoch 27, batch 1200, train_loss[loss=3.218, NarTop10Accuracy=0.6802, over 7397.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.643, over 5939.12 frames. ], batch size: 31, lr: 3.16e-03 +2024-08-06 12:50:06,242 INFO [trainer.py:765] (0/8) Epoch 27, batch 1300, train_loss[loss=3.099, NarTop10Accuracy=0.6923, over 5115.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6421, over 6005.58 frames. ], batch size: 6, lr: 3.16e-03 +2024-08-06 12:50:42,951 INFO [trainer.py:765] (0/8) Epoch 27, batch 1400, train_loss[loss=3.086, NarTop10Accuracy=0.7041, over 6169.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6409, over 6026.90 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 12:51:11,278 INFO [trainer.py:765] (0/8) Epoch 27, batch 1500, train_loss[loss=3.414, NarTop10Accuracy=0.6303, over 6381.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6411, over 5973.28 frames. ], batch size: 49, lr: 3.15e-03 +2024-08-06 12:51:39,352 INFO [trainer.py:765] (0/8) Epoch 27, batch 1600, train_loss[loss=3.289, NarTop10Accuracy=0.6561, over 7102.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6398, over 5952.38 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:52:06,062 INFO [trainer.py:765] (0/8) Epoch 27, batch 1700, train_loss[loss=3.73, NarTop10Accuracy=0.5806, over 6210.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6392, over 5958.37 frames. ], batch size: 13, lr: 3.15e-03 +2024-08-06 12:52:32,669 INFO [trainer.py:765] (0/8) Epoch 27, batch 1800, train_loss[loss=3.345, NarTop10Accuracy=0.6567, over 7270.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6409, over 6021.99 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:52:55,214 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-69000.pt +2024-08-06 12:53:02,289 INFO [trainer.py:765] (0/8) Epoch 27, batch 1900, train_loss[loss=3.648, NarTop10Accuracy=0.5913, over 6404.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.637, over 6047.65 frames. ], batch size: 50, lr: 3.14e-03 +2024-08-06 12:53:27,998 INFO [trainer.py:765] (0/8) Epoch 27, batch 2000, train_loss[loss=3.478, NarTop10Accuracy=0.6231, over 5784.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.638, over 6027.85 frames. ], batch size: 49, lr: 3.14e-03 +2024-08-06 12:53:53,538 INFO [trainer.py:765] (0/8) Epoch 27, batch 2100, train_loss[loss=3.719, NarTop10Accuracy=0.5732, over 4827.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.637, over 6006.87 frames. ], batch size: 5, lr: 3.14e-03 +2024-08-06 12:54:18,997 INFO [trainer.py:765] (0/8) Epoch 27, batch 2200, train_loss[loss=3.415, NarTop10Accuracy=0.6363, over 7208.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6381, over 6044.03 frames. ], batch size: 30, lr: 3.14e-03 +2024-08-06 12:54:44,480 INFO [trainer.py:765] (0/8) Epoch 27, batch 2300, train_loss[loss=3.345, NarTop10Accuracy=0.6566, over 5865.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6357, over 6068.95 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 12:55:09,218 INFO [trainer.py:765] (0/8) Epoch 27, batch 2400, train_loss[loss=3.455, NarTop10Accuracy=0.6234, over 5887.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6329, over 5893.94 frames. ], batch size: 48, lr: 3.13e-03 +2024-08-06 12:55:32,726 INFO [trainer.py:765] (0/8) Epoch 27, batch 2500, train_loss[loss=3.173, NarTop10Accuracy=0.6833, over 5136.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6399, over 5549.09 frames. ], batch size: 6, lr: 3.13e-03 +2024-08-06 12:55:54,286 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 12:55:54,288 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-27.pt +2024-08-06 12:56:46,803 INFO [trainer.py:765] (0/8) Epoch 28, batch 100, train_loss[loss=3.218, NarTop10Accuracy=0.6725, over 7231.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.652, over 2368.27 frames. ], batch size: 30, lr: 3.07e-03 +2024-08-06 12:57:23,205 INFO [trainer.py:765] (0/8) Epoch 28, batch 200, train_loss[loss=3.393, NarTop10Accuracy=0.6472, over 6872.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6523, over 3865.33 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 12:57:55,704 INFO [trainer.py:765] (0/8) Epoch 28, batch 300, train_loss[loss=3.343, NarTop10Accuracy=0.6469, over 7150.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6497, over 4667.55 frames. ], batch size: 22, lr: 3.07e-03 +2024-08-06 12:57:56,456 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-70000.pt +2024-08-06 12:58:00,124 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 12:58:06,828 INFO [trainer.py:811] (0/8) Epoch 28, validation: loss=3.275, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 12:58:06,828 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 12:58:07,333 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 1.944e+02 2.106e+02 2.298e+02 4.786e+02, threshold=4.211e+02, percent-clipped=0.1 +2024-08-06 12:58:34,932 INFO [trainer.py:765] (0/8) Epoch 28, batch 400, train_loss[loss=3.41, NarTop10Accuracy=0.6251, over 5131.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6483, over 5128.33 frames. ], batch size: 7, lr: 3.06e-03 +2024-08-06 12:59:11,437 INFO [trainer.py:765] (0/8) Epoch 28, batch 500, train_loss[loss=3.212, NarTop10Accuracy=0.6827, over 6254.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6499, over 5403.59 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 12:59:44,487 INFO [trainer.py:765] (0/8) Epoch 28, batch 600, train_loss[loss=3.206, NarTop10Accuracy=0.6761, over 5758.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6501, over 5661.77 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 13:00:20,012 INFO [trainer.py:765] (0/8) Epoch 28, batch 700, train_loss[loss=3.245, NarTop10Accuracy=0.6625, over 5211.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6487, over 5736.47 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 13:00:56,433 INFO [trainer.py:765] (0/8) Epoch 28, batch 800, train_loss[loss=3.408, NarTop10Accuracy=0.6372, over 5134.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6495, over 5798.47 frames. ], batch size: 6, lr: 3.05e-03 +2024-08-06 13:01:31,042 INFO [trainer.py:765] (0/8) Epoch 28, batch 900, train_loss[loss=3.311, NarTop10Accuracy=0.6647, over 6239.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6473, over 5812.26 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 13:02:06,494 INFO [trainer.py:765] (0/8) Epoch 28, batch 1000, train_loss[loss=3.462, NarTop10Accuracy=0.6203, over 6336.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6442, over 5918.42 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 13:02:41,229 INFO [trainer.py:765] (0/8) Epoch 28, batch 1100, train_loss[loss=3.334, NarTop10Accuracy=0.6502, over 6863.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6437, over 5937.21 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 13:03:16,895 INFO [trainer.py:765] (0/8) Epoch 28, batch 1200, train_loss[loss=3.52, NarTop10Accuracy=0.6205, over 7466.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6414, over 5940.39 frames. ], batch size: 31, lr: 3.05e-03 +2024-08-06 13:03:54,153 INFO [trainer.py:765] (0/8) Epoch 28, batch 1300, train_loss[loss=3.371, NarTop10Accuracy=0.6456, over 5117.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6415, over 5998.56 frames. ], batch size: 6, lr: 3.04e-03 +2024-08-06 13:03:54,981 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-71000.pt +2024-08-06 13:04:28,712 INFO [trainer.py:765] (0/8) Epoch 28, batch 1400, train_loss[loss=3.595, NarTop10Accuracy=0.6051, over 6283.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6393, over 6033.16 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 13:05:02,349 INFO [trainer.py:765] (0/8) Epoch 28, batch 1500, train_loss[loss=3.413, NarTop10Accuracy=0.6369, over 6064.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6421, over 5984.40 frames. ], batch size: 48, lr: 3.04e-03 +2024-08-06 13:05:30,371 INFO [trainer.py:765] (0/8) Epoch 28, batch 1600, train_loss[loss=3.529, NarTop10Accuracy=0.6094, over 7062.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6384, over 5954.51 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 13:05:57,131 INFO [trainer.py:765] (0/8) Epoch 28, batch 1700, train_loss[loss=3.598, NarTop10Accuracy=0.5925, over 6278.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6401, over 5934.82 frames. ], batch size: 13, lr: 3.04e-03 +2024-08-06 13:06:23,732 INFO [trainer.py:765] (0/8) Epoch 28, batch 1800, train_loss[loss=3.602, NarTop10Accuracy=0.5948, over 7264.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6427, over 6007.83 frames. ], batch size: 22, lr: 3.03e-03 +2024-08-06 13:06:50,373 INFO [trainer.py:765] (0/8) Epoch 28, batch 1900, train_loss[loss=3.477, NarTop10Accuracy=0.6202, over 5347.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6411, over 6038.12 frames. ], batch size: 49, lr: 3.03e-03 +2024-08-06 13:07:16,115 INFO [trainer.py:765] (0/8) Epoch 28, batch 2000, train_loss[loss=3.546, NarTop10Accuracy=0.6063, over 5795.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6418, over 6029.68 frames. ], batch size: 49, lr: 3.03e-03 +2024-08-06 13:07:41,546 INFO [trainer.py:765] (0/8) Epoch 28, batch 2100, train_loss[loss=3.361, NarTop10Accuracy=0.6198, over 4699.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6395, over 6011.65 frames. ], batch size: 5, lr: 3.03e-03 +2024-08-06 13:08:06,931 INFO [trainer.py:765] (0/8) Epoch 28, batch 2200, train_loss[loss=3.502, NarTop10Accuracy=0.611, over 7304.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6384, over 6047.96 frames. ], batch size: 30, lr: 3.02e-03 +2024-08-06 13:08:32,387 INFO [trainer.py:765] (0/8) Epoch 28, batch 2300, train_loss[loss=3.217, NarTop10Accuracy=0.6891, over 5627.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6369, over 6063.71 frames. ], batch size: 9, lr: 3.02e-03 +2024-08-06 13:08:33,134 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-72000.pt +2024-08-06 13:08:36,743 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 13:08:43,385 INFO [trainer.py:811] (0/8) Epoch 28, validation: loss=3.224, NarTop10Accuracy=0.676, over 1907754.00 frames. +2024-08-06 13:08:43,385 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 13:08:43,890 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 1.997e+02 2.131e+02 2.314e+02 6.875e+02, threshold=4.261e+02, percent-clipped=0.5 +2024-08-06 13:09:07,390 INFO [trainer.py:765] (0/8) Epoch 28, batch 2400, train_loss[loss=3.849, NarTop10Accuracy=0.5492, over 5564.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6347, over 5876.76 frames. ], batch size: 49, lr: 3.02e-03 +2024-08-06 13:09:30,781 INFO [trainer.py:765] (0/8) Epoch 28, batch 2500, train_loss[loss=3.664, NarTop10Accuracy=0.5849, over 4990.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6392, over 5546.55 frames. ], batch size: 6, lr: 3.02e-03 +2024-08-06 13:09:51,804 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 13:09:51,807 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-28.pt +2024-08-06 13:10:48,192 INFO [trainer.py:765] (0/8) Epoch 29, batch 100, train_loss[loss=3.61, NarTop10Accuracy=0.5972, over 7216.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6522, over 2371.98 frames. ], batch size: 30, lr: 2.96e-03 +2024-08-06 13:11:20,840 INFO [trainer.py:765] (0/8) Epoch 29, batch 200, train_loss[loss=3.525, NarTop10Accuracy=0.6256, over 7002.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6519, over 3879.15 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 13:11:56,949 INFO [trainer.py:765] (0/8) Epoch 29, batch 300, train_loss[loss=3.156, NarTop10Accuracy=0.6906, over 7269.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6532, over 4668.84 frames. ], batch size: 22, lr: 2.96e-03 +2024-08-06 13:12:29,716 INFO [trainer.py:765] (0/8) Epoch 29, batch 400, train_loss[loss=3.11, NarTop10Accuracy=0.6906, over 5207.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6528, over 5121.79 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 13:12:59,920 INFO [trainer.py:765] (0/8) Epoch 29, batch 500, train_loss[loss=3.421, NarTop10Accuracy=0.6339, over 6243.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6527, over 5399.26 frames. ], batch size: 11, lr: 2.95e-03 +2024-08-06 13:13:33,546 INFO [trainer.py:765] (0/8) Epoch 29, batch 600, train_loss[loss=3.518, NarTop10Accuracy=0.5953, over 5719.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6506, over 5665.15 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 13:14:09,936 INFO [trainer.py:765] (0/8) Epoch 29, batch 700, train_loss[loss=3.457, NarTop10Accuracy=0.6255, over 5019.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6487, over 5740.43 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:14:16,395 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-73000.pt +2024-08-06 13:14:46,677 INFO [trainer.py:765] (0/8) Epoch 29, batch 800, train_loss[loss=3.469, NarTop10Accuracy=0.6296, over 5239.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6441, over 5801.84 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:15:17,113 INFO [trainer.py:765] (0/8) Epoch 29, batch 900, train_loss[loss=3.446, NarTop10Accuracy=0.6296, over 6111.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6452, over 5802.20 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 13:15:59,363 INFO [trainer.py:765] (0/8) Epoch 29, batch 1000, train_loss[loss=3.64, NarTop10Accuracy=0.5853, over 6269.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6436, over 5907.58 frames. ], batch size: 13, lr: 2.94e-03 +2024-08-06 13:16:31,712 INFO [trainer.py:765] (0/8) Epoch 29, batch 1100, train_loss[loss=3.34, NarTop10Accuracy=0.6505, over 6731.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6416, over 5971.35 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 13:17:04,933 INFO [trainer.py:765] (0/8) Epoch 29, batch 1200, train_loss[loss=3.482, NarTop10Accuracy=0.6249, over 7012.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6427, over 5966.53 frames. ], batch size: 30, lr: 2.94e-03 +2024-08-06 13:17:43,956 INFO [trainer.py:765] (0/8) Epoch 29, batch 1300, train_loss[loss=3.261, NarTop10Accuracy=0.669, over 5009.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6431, over 6026.85 frames. ], batch size: 6, lr: 2.94e-03 +2024-08-06 13:18:17,924 INFO [trainer.py:765] (0/8) Epoch 29, batch 1400, train_loss[loss=3.524, NarTop10Accuracy=0.6131, over 6297.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6393, over 6042.53 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 13:18:48,305 INFO [trainer.py:765] (0/8) Epoch 29, batch 1500, train_loss[loss=3.783, NarTop10Accuracy=0.5661, over 5946.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6409, over 5961.34 frames. ], batch size: 49, lr: 2.93e-03 +2024-08-06 13:19:16,408 INFO [trainer.py:765] (0/8) Epoch 29, batch 1600, train_loss[loss=3.271, NarTop10Accuracy=0.6591, over 7216.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6415, over 5957.26 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:19:43,241 INFO [trainer.py:765] (0/8) Epoch 29, batch 1700, train_loss[loss=3.217, NarTop10Accuracy=0.669, over 6759.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6411, over 5942.62 frames. ], batch size: 14, lr: 2.93e-03 +2024-08-06 13:19:49,090 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-74000.pt +2024-08-06 13:19:52,597 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 13:19:59,386 INFO [trainer.py:811] (0/8) Epoch 29, validation: loss=3.233, NarTop10Accuracy=0.6754, over 1907754.00 frames. +2024-08-06 13:19:59,386 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 13:19:59,903 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.964e+02 2.123e+02 2.299e+02 5.520e+02, threshold=4.246e+02, percent-clipped=0.2 +2024-08-06 13:20:20,108 INFO [trainer.py:765] (0/8) Epoch 29, batch 1800, train_loss[loss=3.299, NarTop10Accuracy=0.6521, over 7206.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6401, over 5987.09 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:20:46,844 INFO [trainer.py:765] (0/8) Epoch 29, batch 1900, train_loss[loss=3.56, NarTop10Accuracy=0.6107, over 6174.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6369, over 6030.33 frames. ], batch size: 48, lr: 2.93e-03 +2024-08-06 13:21:12,478 INFO [trainer.py:765] (0/8) Epoch 29, batch 2000, train_loss[loss=3.657, NarTop10Accuracy=0.5861, over 5860.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6385, over 5998.15 frames. ], batch size: 49, lr: 2.92e-03 +2024-08-06 13:21:37,982 INFO [trainer.py:765] (0/8) Epoch 29, batch 2100, train_loss[loss=3.289, NarTop10Accuracy=0.6753, over 4004.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.639, over 5990.77 frames. ], batch size: 4, lr: 2.92e-03 +2024-08-06 13:22:03,360 INFO [trainer.py:765] (0/8) Epoch 29, batch 2200, train_loss[loss=3.292, NarTop10Accuracy=0.6701, over 7471.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6392, over 6034.83 frames. ], batch size: 31, lr: 2.92e-03 +2024-08-06 13:22:28,831 INFO [trainer.py:765] (0/8) Epoch 29, batch 2300, train_loss[loss=3.28, NarTop10Accuracy=0.6599, over 5783.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6381, over 6066.73 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 13:22:53,620 INFO [trainer.py:765] (0/8) Epoch 29, batch 2400, train_loss[loss=3.759, NarTop10Accuracy=0.5657, over 6202.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6379, over 5884.63 frames. ], batch size: 49, lr: 2.92e-03 +2024-08-06 13:23:16,978 INFO [trainer.py:765] (0/8) Epoch 29, batch 2500, train_loss[loss=3.107, NarTop10Accuracy=0.6791, over 5033.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6407, over 5532.59 frames. ], batch size: 6, lr: 2.91e-03 +2024-08-06 13:23:37,887 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 13:23:37,889 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-29.pt +2024-08-06 13:24:38,391 INFO [trainer.py:765] (0/8) Epoch 30, batch 100, train_loss[loss=3.373, NarTop10Accuracy=0.6403, over 7274.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6612, over 2358.53 frames. ], batch size: 30, lr: 2.86e-03 +2024-08-06 13:24:52,129 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-75000.pt +2024-08-06 13:25:14,782 INFO [trainer.py:765] (0/8) Epoch 30, batch 200, train_loss[loss=3.167, NarTop10Accuracy=0.7004, over 6963.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6598, over 3859.04 frames. ], batch size: 17, lr: 2.86e-03 +2024-08-06 13:25:46,846 INFO [trainer.py:765] (0/8) Epoch 30, batch 300, train_loss[loss=3.177, NarTop10Accuracy=0.6901, over 6997.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6569, over 4677.16 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 13:26:17,539 INFO [trainer.py:765] (0/8) Epoch 30, batch 400, train_loss[loss=3.432, NarTop10Accuracy=0.6393, over 5031.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6544, over 5131.72 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 13:26:53,920 INFO [trainer.py:765] (0/8) Epoch 30, batch 500, train_loss[loss=3.217, NarTop10Accuracy=0.6696, over 6207.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6579, over 5403.46 frames. ], batch size: 11, lr: 2.85e-03 +2024-08-06 13:27:25,422 INFO [trainer.py:765] (0/8) Epoch 30, batch 600, train_loss[loss=3.21, NarTop10Accuracy=0.6752, over 5775.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6554, over 5661.83 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 13:28:00,307 INFO [trainer.py:765] (0/8) Epoch 30, batch 700, train_loss[loss=3.39, NarTop10Accuracy=0.6341, over 4982.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6521, over 5732.58 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:28:37,477 INFO [trainer.py:765] (0/8) Epoch 30, batch 800, train_loss[loss=3.515, NarTop10Accuracy=0.6169, over 5192.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6493, over 5779.40 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:29:10,425 INFO [trainer.py:765] (0/8) Epoch 30, batch 900, train_loss[loss=3.428, NarTop10Accuracy=0.6303, over 6222.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6469, over 5813.53 frames. ], batch size: 13, lr: 2.85e-03 +2024-08-06 13:29:45,914 INFO [trainer.py:765] (0/8) Epoch 30, batch 1000, train_loss[loss=3.302, NarTop10Accuracy=0.6512, over 6352.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6435, over 5916.28 frames. ], batch size: 13, lr: 2.84e-03 +2024-08-06 13:30:24,172 INFO [trainer.py:765] (0/8) Epoch 30, batch 1100, train_loss[loss=3.332, NarTop10Accuracy=0.648, over 6998.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6425, over 5959.32 frames. ], batch size: 17, lr: 2.84e-03 +2024-08-06 13:30:38,001 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-76000.pt +2024-08-06 13:30:41,468 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 13:30:48,195 INFO [trainer.py:811] (0/8) Epoch 30, validation: loss=3.239, NarTop10Accuracy=0.6729, over 1907754.00 frames. +2024-08-06 13:30:48,196 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 13:30:48,916 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 1.985e+02 2.139e+02 2.326e+02 4.628e+02, threshold=4.279e+02, percent-clipped=0.1 +2024-08-06 13:31:05,665 INFO [trainer.py:765] (0/8) Epoch 30, batch 1200, train_loss[loss=3.404, NarTop10Accuracy=0.6366, over 7133.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6434, over 5958.78 frames. ], batch size: 30, lr: 2.84e-03 +2024-08-06 13:31:43,020 INFO [trainer.py:765] (0/8) Epoch 30, batch 1300, train_loss[loss=3.28, NarTop10Accuracy=0.6689, over 4995.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.646, over 6021.09 frames. ], batch size: 6, lr: 2.84e-03 +2024-08-06 13:32:19,324 INFO [trainer.py:765] (0/8) Epoch 30, batch 1400, train_loss[loss=3.536, NarTop10Accuracy=0.6088, over 6129.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6424, over 6043.05 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 13:32:52,335 INFO [trainer.py:765] (0/8) Epoch 30, batch 1500, train_loss[loss=3.535, NarTop10Accuracy=0.6093, over 5704.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6429, over 5970.65 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:33:20,407 INFO [trainer.py:765] (0/8) Epoch 30, batch 1600, train_loss[loss=3.806, NarTop10Accuracy=0.5546, over 7032.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6413, over 5943.57 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:33:47,200 INFO [trainer.py:765] (0/8) Epoch 30, batch 1700, train_loss[loss=3.47, NarTop10Accuracy=0.619, over 6326.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6393, over 5944.98 frames. ], batch size: 13, lr: 2.83e-03 +2024-08-06 13:34:13,886 INFO [trainer.py:765] (0/8) Epoch 30, batch 1800, train_loss[loss=3.727, NarTop10Accuracy=0.5606, over 7158.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6418, over 6018.83 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:34:40,547 INFO [trainer.py:765] (0/8) Epoch 30, batch 1900, train_loss[loss=3.497, NarTop10Accuracy=0.6177, over 6231.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6403, over 6045.56 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:35:06,315 INFO [trainer.py:765] (0/8) Epoch 30, batch 2000, train_loss[loss=3.656, NarTop10Accuracy=0.5895, over 6010.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.643, over 6006.44 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:35:31,871 INFO [trainer.py:765] (0/8) Epoch 30, batch 2100, train_loss[loss=3.396, NarTop10Accuracy=0.6323, over 3913.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6404, over 5975.21 frames. ], batch size: 4, lr: 2.82e-03 +2024-08-06 13:35:42,323 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-77000.pt +2024-08-06 13:36:00,553 INFO [trainer.py:765] (0/8) Epoch 30, batch 2200, train_loss[loss=3.552, NarTop10Accuracy=0.6127, over 7269.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6398, over 6027.09 frames. ], batch size: 31, lr: 2.82e-03 +2024-08-06 13:36:26,029 INFO [trainer.py:765] (0/8) Epoch 30, batch 2300, train_loss[loss=3.849, NarTop10Accuracy=0.5574, over 5773.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.638, over 6065.11 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 13:36:50,824 INFO [trainer.py:765] (0/8) Epoch 30, batch 2400, train_loss[loss=3.668, NarTop10Accuracy=0.5916, over 5617.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6371, over 5888.91 frames. ], batch size: 50, lr: 2.82e-03 +2024-08-06 13:37:14,388 INFO [trainer.py:765] (0/8) Epoch 30, batch 2500, train_loss[loss=3.215, NarTop10Accuracy=0.6699, over 5173.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6424, over 5545.35 frames. ], batch size: 6, lr: 2.82e-03 +2024-08-06 13:37:36,084 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 13:37:36,087 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-30.pt +2024-08-06 13:38:28,438 INFO [trainer.py:765] (0/8) Epoch 31, batch 100, train_loss[loss=3.081, NarTop10Accuracy=0.698, over 7255.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.659, over 2371.67 frames. ], batch size: 30, lr: 2.77e-03 +2024-08-06 13:39:02,651 INFO [trainer.py:765] (0/8) Epoch 31, batch 200, train_loss[loss=3.131, NarTop10Accuracy=0.6873, over 6922.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.657, over 3872.76 frames. ], batch size: 17, lr: 2.76e-03 +2024-08-06 13:39:34,676 INFO [trainer.py:765] (0/8) Epoch 31, batch 300, train_loss[loss=3.221, NarTop10Accuracy=0.6723, over 7202.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.658, over 4680.89 frames. ], batch size: 22, lr: 2.76e-03 +2024-08-06 13:40:07,363 INFO [trainer.py:765] (0/8) Epoch 31, batch 400, train_loss[loss=3.433, NarTop10Accuracy=0.6382, over 5711.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6528, over 5142.12 frames. ], batch size: 8, lr: 2.76e-03 +2024-08-06 13:40:37,813 INFO [trainer.py:765] (0/8) Epoch 31, batch 500, train_loss[loss=3.292, NarTop10Accuracy=0.6587, over 6047.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6543, over 5413.09 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 13:40:58,297 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-78000.pt +2024-08-06 13:41:01,893 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 13:41:08,777 INFO [trainer.py:811] (0/8) Epoch 31, validation: loss=3.268, NarTop10Accuracy=0.6673, over 1907754.00 frames. +2024-08-06 13:41:08,778 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 13:41:09,338 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 1.987e+02 2.143e+02 2.328e+02 4.341e+02, threshold=4.287e+02, percent-clipped=0.1 +2024-08-06 13:41:20,862 INFO [trainer.py:765] (0/8) Epoch 31, batch 600, train_loss[loss=3.143, NarTop10Accuracy=0.6869, over 5777.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6513, over 5678.24 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 13:41:54,259 INFO [trainer.py:765] (0/8) Epoch 31, batch 700, train_loss[loss=3.274, NarTop10Accuracy=0.6731, over 5052.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6508, over 5752.80 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 13:42:32,158 INFO [trainer.py:765] (0/8) Epoch 31, batch 800, train_loss[loss=3.325, NarTop10Accuracy=0.6583, over 5155.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6499, over 5805.86 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:43:06,274 INFO [trainer.py:765] (0/8) Epoch 31, batch 900, train_loss[loss=3.313, NarTop10Accuracy=0.6536, over 6202.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6519, over 5830.27 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 13:43:38,009 INFO [trainer.py:765] (0/8) Epoch 31, batch 1000, train_loss[loss=3.103, NarTop10Accuracy=0.7097, over 6138.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6503, over 5939.40 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 13:44:14,513 INFO [trainer.py:765] (0/8) Epoch 31, batch 1100, train_loss[loss=3.409, NarTop10Accuracy=0.636, over 6922.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6492, over 5956.71 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 13:44:53,786 INFO [trainer.py:765] (0/8) Epoch 31, batch 1200, train_loss[loss=3.313, NarTop10Accuracy=0.6565, over 7196.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6476, over 5956.01 frames. ], batch size: 30, lr: 2.75e-03 +2024-08-06 13:45:25,076 INFO [trainer.py:765] (0/8) Epoch 31, batch 1300, train_loss[loss=3.16, NarTop10Accuracy=0.6771, over 5039.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6475, over 6016.19 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:45:58,740 INFO [trainer.py:765] (0/8) Epoch 31, batch 1400, train_loss[loss=3.245, NarTop10Accuracy=0.6683, over 6126.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6461, over 6038.19 frames. ], batch size: 11, lr: 2.74e-03 +2024-08-06 13:46:33,490 INFO [trainer.py:765] (0/8) Epoch 31, batch 1500, train_loss[loss=3.503, NarTop10Accuracy=0.6123, over 6260.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6475, over 5955.11 frames. ], batch size: 49, lr: 2.74e-03 +2024-08-06 13:46:50,119 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-79000.pt +2024-08-06 13:47:04,657 INFO [trainer.py:765] (0/8) Epoch 31, batch 1600, train_loss[loss=3.198, NarTop10Accuracy=0.6743, over 7258.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6454, over 5953.78 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 13:47:31,423 INFO [trainer.py:765] (0/8) Epoch 31, batch 1700, train_loss[loss=3.589, NarTop10Accuracy=0.6061, over 6654.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6446, over 5946.60 frames. ], batch size: 14, lr: 2.74e-03 +2024-08-06 13:47:58,016 INFO [trainer.py:765] (0/8) Epoch 31, batch 1800, train_loss[loss=3.614, NarTop10Accuracy=0.5922, over 7035.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6437, over 6008.46 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 13:48:24,577 INFO [trainer.py:765] (0/8) Epoch 31, batch 1900, train_loss[loss=3.274, NarTop10Accuracy=0.6572, over 5982.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6423, over 6056.28 frames. ], batch size: 49, lr: 2.74e-03 +2024-08-06 13:48:50,257 INFO [trainer.py:765] (0/8) Epoch 31, batch 2000, train_loss[loss=3.645, NarTop10Accuracy=0.5836, over 6010.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6433, over 6035.63 frames. ], batch size: 49, lr: 2.73e-03 +2024-08-06 13:49:15,765 INFO [trainer.py:765] (0/8) Epoch 31, batch 2100, train_loss[loss=3.336, NarTop10Accuracy=0.643, over 4814.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6452, over 6017.89 frames. ], batch size: 5, lr: 2.73e-03 +2024-08-06 13:49:41,278 INFO [trainer.py:765] (0/8) Epoch 31, batch 2200, train_loss[loss=3.299, NarTop10Accuracy=0.6546, over 7317.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6453, over 6038.24 frames. ], batch size: 30, lr: 2.73e-03 +2024-08-06 13:50:06,707 INFO [trainer.py:765] (0/8) Epoch 31, batch 2300, train_loss[loss=3.318, NarTop10Accuracy=0.6632, over 5769.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6413, over 6067.31 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 13:50:31,393 INFO [trainer.py:765] (0/8) Epoch 31, batch 2400, train_loss[loss=3.517, NarTop10Accuracy=0.6156, over 5649.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6369, over 5890.54 frames. ], batch size: 49, lr: 2.73e-03 +2024-08-06 13:50:54,892 INFO [trainer.py:765] (0/8) Epoch 31, batch 2500, train_loss[loss=3.382, NarTop10Accuracy=0.6391, over 4361.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6428, over 5525.60 frames. ], batch size: 5, lr: 2.72e-03 +2024-08-06 13:51:08,995 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-80000.pt +2024-08-06 13:51:12,534 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 13:51:19,070 INFO [trainer.py:811] (0/8) Epoch 31, validation: loss=3.234, NarTop10Accuracy=0.6746, over 1907754.00 frames. +2024-08-06 13:51:19,070 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 13:51:19,539 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.007e+02 2.182e+02 2.368e+02 4.565e+02, threshold=4.363e+02, percent-clipped=0.1 +2024-08-06 13:51:26,773 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 13:51:26,776 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-31.pt +2024-08-06 13:52:19,910 INFO [trainer.py:765] (0/8) Epoch 32, batch 100, train_loss[loss=3.27, NarTop10Accuracy=0.667, over 7073.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6524, over 2357.15 frames. ], batch size: 30, lr: 2.68e-03 +2024-08-06 13:52:52,538 INFO [trainer.py:765] (0/8) Epoch 32, batch 200, train_loss[loss=3.751, NarTop10Accuracy=0.5684, over 6905.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6529, over 3866.95 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 13:53:28,093 INFO [trainer.py:765] (0/8) Epoch 32, batch 300, train_loss[loss=3.333, NarTop10Accuracy=0.6497, over 7083.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6538, over 4686.40 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 13:54:00,887 INFO [trainer.py:765] (0/8) Epoch 32, batch 400, train_loss[loss=3.498, NarTop10Accuracy=0.6127, over 5113.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6542, over 5128.34 frames. ], batch size: 7, lr: 2.67e-03 +2024-08-06 13:54:32,821 INFO [trainer.py:765] (0/8) Epoch 32, batch 500, train_loss[loss=2.972, NarTop10Accuracy=0.7196, over 6101.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.6569, over 5405.33 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 13:55:01,772 INFO [trainer.py:765] (0/8) Epoch 32, batch 600, train_loss[loss=3.59, NarTop10Accuracy=0.6083, over 5757.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6568, over 5673.13 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 13:55:41,511 INFO [trainer.py:765] (0/8) Epoch 32, batch 700, train_loss[loss=3.018, NarTop10Accuracy=0.7216, over 5086.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6527, over 5732.10 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:13,172 INFO [trainer.py:765] (0/8) Epoch 32, batch 800, train_loss[loss=3.028, NarTop10Accuracy=0.719, over 4998.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6526, over 5799.49 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:43,166 INFO [trainer.py:765] (0/8) Epoch 32, batch 900, train_loss[loss=3.64, NarTop10Accuracy=0.588, over 6310.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6526, over 5832.17 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 13:57:12,551 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-81000.pt +2024-08-06 13:57:24,520 INFO [trainer.py:765] (0/8) Epoch 32, batch 1000, train_loss[loss=3.593, NarTop10Accuracy=0.5952, over 6351.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6511, over 5922.57 frames. ], batch size: 13, lr: 2.66e-03 +2024-08-06 13:57:57,452 INFO [trainer.py:765] (0/8) Epoch 32, batch 1100, train_loss[loss=3.112, NarTop10Accuracy=0.6984, over 6865.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6479, over 5969.41 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 13:58:30,541 INFO [trainer.py:765] (0/8) Epoch 32, batch 1200, train_loss[loss=3.105, NarTop10Accuracy=0.6957, over 7266.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6481, over 5960.42 frames. ], batch size: 31, lr: 2.66e-03 +2024-08-06 13:59:08,259 INFO [trainer.py:765] (0/8) Epoch 32, batch 1300, train_loss[loss=3.087, NarTop10Accuracy=0.6956, over 4302.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6465, over 6023.63 frames. ], batch size: 5, lr: 2.66e-03 +2024-08-06 13:59:42,265 INFO [trainer.py:765] (0/8) Epoch 32, batch 1400, train_loss[loss=3.329, NarTop10Accuracy=0.6571, over 6053.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6465, over 6029.44 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 14:00:12,976 INFO [trainer.py:765] (0/8) Epoch 32, batch 1500, train_loss[loss=3.699, NarTop10Accuracy=0.5811, over 6483.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6469, over 5960.90 frames. ], batch size: 48, lr: 2.66e-03 +2024-08-06 14:00:40,824 INFO [trainer.py:765] (0/8) Epoch 32, batch 1600, train_loss[loss=3.178, NarTop10Accuracy=0.673, over 7034.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6467, over 5928.65 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:01:07,534 INFO [trainer.py:765] (0/8) Epoch 32, batch 1700, train_loss[loss=3.369, NarTop10Accuracy=0.6484, over 6240.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6449, over 5931.02 frames. ], batch size: 13, lr: 2.65e-03 +2024-08-06 14:01:34,089 INFO [trainer.py:765] (0/8) Epoch 32, batch 1800, train_loss[loss=3.218, NarTop10Accuracy=0.6746, over 7151.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6447, over 5982.39 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:02:00,636 INFO [trainer.py:765] (0/8) Epoch 32, batch 1900, train_loss[loss=3.427, NarTop10Accuracy=0.6275, over 6209.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6429, over 6032.82 frames. ], batch size: 49, lr: 2.65e-03 +2024-08-06 14:02:20,590 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-82000.pt +2024-08-06 14:02:24,194 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 14:02:30,653 INFO [trainer.py:811] (0/8) Epoch 32, validation: loss=3.204, NarTop10Accuracy=0.6812, over 1907754.00 frames. +2024-08-06 14:02:30,653 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 14:02:31,152 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.032e+02 2.200e+02 2.392e+02 6.182e+02, threshold=4.401e+02, percent-clipped=0.1 +2024-08-06 14:02:36,383 INFO [trainer.py:765] (0/8) Epoch 32, batch 2000, train_loss[loss=3.504, NarTop10Accuracy=0.6151, over 6045.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6438, over 6019.26 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 14:03:01,698 INFO [trainer.py:765] (0/8) Epoch 32, batch 2100, train_loss[loss=3.239, NarTop10Accuracy=0.6566, over 4710.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6439, over 5999.59 frames. ], batch size: 5, lr: 2.65e-03 +2024-08-06 14:03:27,177 INFO [trainer.py:765] (0/8) Epoch 32, batch 2200, train_loss[loss=3.622, NarTop10Accuracy=0.6013, over 7088.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6435, over 6039.76 frames. ], batch size: 30, lr: 2.64e-03 +2024-08-06 14:03:52,585 INFO [trainer.py:765] (0/8) Epoch 32, batch 2300, train_loss[loss=3.712, NarTop10Accuracy=0.5736, over 5855.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6408, over 6070.75 frames. ], batch size: 9, lr: 2.64e-03 +2024-08-06 14:04:17,274 INFO [trainer.py:765] (0/8) Epoch 32, batch 2400, train_loss[loss=3.601, NarTop10Accuracy=0.6044, over 6410.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6398, over 5886.58 frames. ], batch size: 48, lr: 2.64e-03 +2024-08-06 14:04:40,635 INFO [trainer.py:765] (0/8) Epoch 32, batch 2500, train_loss[loss=3.193, NarTop10Accuracy=0.6809, over 5074.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6458, over 5552.01 frames. ], batch size: 6, lr: 2.64e-03 +2024-08-06 14:05:01,806 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 14:05:01,809 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-32.pt +2024-08-06 14:06:02,906 INFO [trainer.py:765] (0/8) Epoch 33, batch 100, train_loss[loss=3.626, NarTop10Accuracy=0.5961, over 7448.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6528, over 2369.19 frames. ], batch size: 31, lr: 2.60e-03 +2024-08-06 14:06:36,080 INFO [trainer.py:765] (0/8) Epoch 33, batch 200, train_loss[loss=3.403, NarTop10Accuracy=0.6391, over 6779.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6561, over 3860.49 frames. ], batch size: 17, lr: 2.59e-03 +2024-08-06 14:07:12,147 INFO [trainer.py:765] (0/8) Epoch 33, batch 300, train_loss[loss=3.281, NarTop10Accuracy=0.668, over 7297.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6565, over 4674.55 frames. ], batch size: 22, lr: 2.59e-03 +2024-08-06 14:07:44,121 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-83000.pt +2024-08-06 14:07:48,256 INFO [trainer.py:765] (0/8) Epoch 33, batch 400, train_loss[loss=3.22, NarTop10Accuracy=0.6712, over 5042.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6558, over 5132.82 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 14:08:18,547 INFO [trainer.py:765] (0/8) Epoch 33, batch 500, train_loss[loss=3.143, NarTop10Accuracy=0.6867, over 6098.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6536, over 5404.05 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 14:08:49,793 INFO [trainer.py:765] (0/8) Epoch 33, batch 600, train_loss[loss=3.043, NarTop10Accuracy=0.7067, over 5739.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6528, over 5676.62 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 14:09:32,926 INFO [trainer.py:765] (0/8) Epoch 33, batch 700, train_loss[loss=3.088, NarTop10Accuracy=0.6963, over 5041.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.652, over 5735.12 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 14:10:04,597 INFO [trainer.py:765] (0/8) Epoch 33, batch 800, train_loss[loss=2.994, NarTop10Accuracy=0.6979, over 5127.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6502, over 5793.88 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 14:10:35,387 INFO [trainer.py:765] (0/8) Epoch 33, batch 900, train_loss[loss=3.319, NarTop10Accuracy=0.6511, over 6271.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6491, over 5799.96 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 14:11:15,070 INFO [trainer.py:765] (0/8) Epoch 33, batch 1000, train_loss[loss=3.276, NarTop10Accuracy=0.6493, over 6198.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6482, over 5909.75 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 14:11:47,302 INFO [trainer.py:765] (0/8) Epoch 33, batch 1100, train_loss[loss=3.55, NarTop10Accuracy=0.5997, over 6797.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6466, over 5934.00 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 14:12:20,928 INFO [trainer.py:765] (0/8) Epoch 33, batch 1200, train_loss[loss=3.34, NarTop10Accuracy=0.6361, over 7187.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6454, over 5933.53 frames. ], batch size: 31, lr: 2.58e-03 +2024-08-06 14:12:57,630 INFO [trainer.py:765] (0/8) Epoch 33, batch 1300, train_loss[loss=3.52, NarTop10Accuracy=0.6062, over 5136.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6474, over 6012.87 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 14:13:30,666 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-84000.pt +2024-08-06 14:13:34,965 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 14:13:41,686 INFO [trainer.py:811] (0/8) Epoch 33, validation: loss=3.242, NarTop10Accuracy=0.6732, over 1907754.00 frames. +2024-08-06 14:13:41,687 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 14:13:42,264 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.031e+02 2.174e+02 2.363e+02 4.871e+02, threshold=4.347e+02, percent-clipped=0.1 +2024-08-06 14:13:42,803 INFO [trainer.py:765] (0/8) Epoch 33, batch 1400, train_loss[loss=3.294, NarTop10Accuracy=0.6611, over 6135.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6496, over 6023.23 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 14:14:11,246 INFO [trainer.py:765] (0/8) Epoch 33, batch 1500, train_loss[loss=3.43, NarTop10Accuracy=0.6284, over 6351.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6466, over 5985.36 frames. ], batch size: 49, lr: 2.57e-03 +2024-08-06 14:14:39,191 INFO [trainer.py:765] (0/8) Epoch 33, batch 1600, train_loss[loss=3.319, NarTop10Accuracy=0.6595, over 7159.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6449, over 5958.13 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:05,858 INFO [trainer.py:765] (0/8) Epoch 33, batch 1700, train_loss[loss=3.577, NarTop10Accuracy=0.6035, over 6305.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6442, over 5949.19 frames. ], batch size: 13, lr: 2.57e-03 +2024-08-06 14:15:32,589 INFO [trainer.py:765] (0/8) Epoch 33, batch 1800, train_loss[loss=3.473, NarTop10Accuracy=0.6276, over 7164.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6472, over 6007.49 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:59,214 INFO [trainer.py:765] (0/8) Epoch 33, batch 1900, train_loss[loss=3.472, NarTop10Accuracy=0.6251, over 5754.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6441, over 6052.53 frames. ], batch size: 48, lr: 2.57e-03 +2024-08-06 14:16:24,895 INFO [trainer.py:765] (0/8) Epoch 33, batch 2000, train_loss[loss=3.435, NarTop10Accuracy=0.6269, over 6121.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6467, over 6027.41 frames. ], batch size: 49, lr: 2.57e-03 +2024-08-06 14:16:50,350 INFO [trainer.py:765] (0/8) Epoch 33, batch 2100, train_loss[loss=3.532, NarTop10Accuracy=0.6058, over 4666.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6453, over 6006.51 frames. ], batch size: 5, lr: 2.56e-03 +2024-08-06 14:17:15,825 INFO [trainer.py:765] (0/8) Epoch 33, batch 2200, train_loss[loss=3.533, NarTop10Accuracy=0.6131, over 7126.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6466, over 6042.20 frames. ], batch size: 30, lr: 2.56e-03 +2024-08-06 14:17:41,309 INFO [trainer.py:765] (0/8) Epoch 33, batch 2300, train_loss[loss=3.456, NarTop10Accuracy=0.6391, over 5716.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6437, over 6087.36 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 14:18:05,030 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-85000.pt +2024-08-06 14:18:10,143 INFO [trainer.py:765] (0/8) Epoch 33, batch 2400, train_loss[loss=3.697, NarTop10Accuracy=0.5805, over 6404.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6416, over 5897.97 frames. ], batch size: 48, lr: 2.56e-03 +2024-08-06 14:18:33,707 INFO [trainer.py:765] (0/8) Epoch 33, batch 2500, train_loss[loss=3.363, NarTop10Accuracy=0.6418, over 5130.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6479, over 5540.90 frames. ], batch size: 6, lr: 2.56e-03 +2024-08-06 14:18:54,675 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 14:18:54,678 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-33.pt +2024-08-06 14:19:51,933 INFO [trainer.py:765] (0/8) Epoch 34, batch 100, train_loss[loss=3.141, NarTop10Accuracy=0.6956, over 7159.00 frames. ], tot_loss[loss=3.296, NarTop10Accuracy=0.661, over 2369.73 frames. ], batch size: 30, lr: 2.52e-03 +2024-08-06 14:20:24,373 INFO [trainer.py:765] (0/8) Epoch 34, batch 200, train_loss[loss=3.368, NarTop10Accuracy=0.6447, over 6997.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.663, over 3867.27 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 14:21:00,842 INFO [trainer.py:765] (0/8) Epoch 34, batch 300, train_loss[loss=3.093, NarTop10Accuracy=0.6944, over 7278.00 frames. ], tot_loss[loss=3.296, NarTop10Accuracy=0.66, over 4682.88 frames. ], batch size: 22, lr: 2.51e-03 +2024-08-06 14:21:31,450 INFO [trainer.py:765] (0/8) Epoch 34, batch 400, train_loss[loss=3.134, NarTop10Accuracy=0.6856, over 5233.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6581, over 5123.71 frames. ], batch size: 7, lr: 2.51e-03 +2024-08-06 14:22:01,876 INFO [trainer.py:765] (0/8) Epoch 34, batch 500, train_loss[loss=3.151, NarTop10Accuracy=0.6834, over 6073.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6556, over 5393.84 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 14:22:36,827 INFO [trainer.py:765] (0/8) Epoch 34, batch 600, train_loss[loss=3.245, NarTop10Accuracy=0.6669, over 5829.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6559, over 5659.38 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 14:23:14,605 INFO [trainer.py:765] (0/8) Epoch 34, batch 700, train_loss[loss=3.149, NarTop10Accuracy=0.6929, over 4919.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6547, over 5738.75 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:46,606 INFO [trainer.py:765] (0/8) Epoch 34, batch 800, train_loss[loss=3.562, NarTop10Accuracy=0.6108, over 4824.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6506, over 5800.48 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:50,719 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-86000.pt +2024-08-06 14:23:54,158 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 14:24:00,855 INFO [trainer.py:811] (0/8) Epoch 34, validation: loss=3.226, NarTop10Accuracy=0.6758, over 1907754.00 frames. +2024-08-06 14:24:00,856 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 14:24:01,413 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.033e+02 2.200e+02 2.391e+02 5.918e+02, threshold=4.399e+02, percent-clipped=0.1 +2024-08-06 14:24:28,899 INFO [trainer.py:765] (0/8) Epoch 34, batch 900, train_loss[loss=3.263, NarTop10Accuracy=0.6598, over 6673.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6505, over 5833.17 frames. ], batch size: 14, lr: 2.51e-03 +2024-08-06 14:25:05,287 INFO [trainer.py:765] (0/8) Epoch 34, batch 1000, train_loss[loss=3.264, NarTop10Accuracy=0.6657, over 6232.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6498, over 5925.37 frames. ], batch size: 13, lr: 2.50e-03 +2024-08-06 14:25:37,996 INFO [trainer.py:765] (0/8) Epoch 34, batch 1100, train_loss[loss=3.548, NarTop10Accuracy=0.6182, over 6921.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.648, over 5956.47 frames. ], batch size: 17, lr: 2.50e-03 +2024-08-06 14:26:13,974 INFO [trainer.py:765] (0/8) Epoch 34, batch 1200, train_loss[loss=3.357, NarTop10Accuracy=0.6568, over 7086.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6495, over 5955.85 frames. ], batch size: 30, lr: 2.50e-03 +2024-08-06 14:26:52,652 INFO [trainer.py:765] (0/8) Epoch 34, batch 1300, train_loss[loss=3.637, NarTop10Accuracy=0.5877, over 5080.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6491, over 6024.94 frames. ], batch size: 6, lr: 2.50e-03 +2024-08-06 14:27:24,383 INFO [trainer.py:765] (0/8) Epoch 34, batch 1400, train_loss[loss=3.118, NarTop10Accuracy=0.7027, over 6224.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6479, over 6048.03 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 14:27:52,726 INFO [trainer.py:765] (0/8) Epoch 34, batch 1500, train_loss[loss=3.751, NarTop10Accuracy=0.567, over 6755.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6495, over 5981.86 frames. ], batch size: 49, lr: 2.50e-03 +2024-08-06 14:28:20,672 INFO [trainer.py:765] (0/8) Epoch 34, batch 1600, train_loss[loss=3.378, NarTop10Accuracy=0.6499, over 7198.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.646, over 5966.01 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 14:28:47,384 INFO [trainer.py:765] (0/8) Epoch 34, batch 1700, train_loss[loss=3.394, NarTop10Accuracy=0.6391, over 6084.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.643, over 5937.25 frames. ], batch size: 13, lr: 2.49e-03 +2024-08-06 14:29:14,009 INFO [trainer.py:765] (0/8) Epoch 34, batch 1800, train_loss[loss=3.61, NarTop10Accuracy=0.5965, over 7124.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6446, over 6010.67 frames. ], batch size: 22, lr: 2.49e-03 +2024-08-06 14:29:17,946 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-87000.pt +2024-08-06 14:29:43,752 INFO [trainer.py:765] (0/8) Epoch 34, batch 1900, train_loss[loss=3.684, NarTop10Accuracy=0.5868, over 5995.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6424, over 6039.93 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 14:30:09,515 INFO [trainer.py:765] (0/8) Epoch 34, batch 2000, train_loss[loss=3.587, NarTop10Accuracy=0.6015, over 5902.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6424, over 6010.19 frames. ], batch size: 49, lr: 2.49e-03 +2024-08-06 14:30:35,015 INFO [trainer.py:765] (0/8) Epoch 34, batch 2100, train_loss[loss=3.2, NarTop10Accuracy=0.6676, over 3914.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6451, over 5994.64 frames. ], batch size: 4, lr: 2.49e-03 +2024-08-06 14:31:00,510 INFO [trainer.py:765] (0/8) Epoch 34, batch 2200, train_loss[loss=3.368, NarTop10Accuracy=0.6442, over 7212.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.645, over 6043.81 frames. ], batch size: 30, lr: 2.49e-03 +2024-08-06 14:31:25,979 INFO [trainer.py:765] (0/8) Epoch 34, batch 2300, train_loss[loss=3.348, NarTop10Accuracy=0.6416, over 5747.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6433, over 6072.35 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 14:31:50,751 INFO [trainer.py:765] (0/8) Epoch 34, batch 2400, train_loss[loss=3.269, NarTop10Accuracy=0.6634, over 5170.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6425, over 5908.34 frames. ], batch size: 7, lr: 2.48e-03 +2024-08-06 14:32:14,249 INFO [trainer.py:765] (0/8) Epoch 34, batch 2500, train_loss[loss=3.06, NarTop10Accuracy=0.706, over 5252.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6462, over 5555.62 frames. ], batch size: 6, lr: 2.48e-03 +2024-08-06 14:32:35,409 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 14:32:35,413 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-34.pt +2024-08-06 14:33:26,337 INFO [trainer.py:765] (0/8) Epoch 35, batch 100, train_loss[loss=3.247, NarTop10Accuracy=0.6673, over 7580.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6588, over 2372.96 frames. ], batch size: 31, lr: 2.44e-03 +2024-08-06 14:34:03,582 INFO [trainer.py:765] (0/8) Epoch 35, batch 200, train_loss[loss=3.417, NarTop10Accuracy=0.6387, over 6886.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6597, over 3873.10 frames. ], batch size: 17, lr: 2.44e-03 +2024-08-06 14:34:13,185 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-88000.pt +2024-08-06 14:34:16,712 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 14:34:23,574 INFO [trainer.py:811] (0/8) Epoch 35, validation: loss=3.163, NarTop10Accuracy=0.689, over 1907754.00 frames. +2024-08-06 14:34:23,575 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 14:34:24,109 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.042e+02 2.203e+02 2.360e+02 4.181e+02, threshold=4.406e+02, percent-clipped=0.0 +2024-08-06 14:34:44,664 INFO [trainer.py:765] (0/8) Epoch 35, batch 300, train_loss[loss=3.678, NarTop10Accuracy=0.5779, over 7099.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6592, over 4681.74 frames. ], batch size: 22, lr: 2.44e-03 +2024-08-06 14:35:13,542 INFO [trainer.py:765] (0/8) Epoch 35, batch 400, train_loss[loss=3.161, NarTop10Accuracy=0.6888, over 5081.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6576, over 5138.55 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 14:35:48,187 INFO [trainer.py:765] (0/8) Epoch 35, batch 500, train_loss[loss=3.436, NarTop10Accuracy=0.6197, over 6138.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6578, over 5406.85 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 14:36:22,747 INFO [trainer.py:765] (0/8) Epoch 35, batch 600, train_loss[loss=3.254, NarTop10Accuracy=0.6524, over 5716.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6562, over 5676.16 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 14:36:57,825 INFO [trainer.py:765] (0/8) Epoch 35, batch 700, train_loss[loss=3.368, NarTop10Accuracy=0.6467, over 5114.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6541, over 5747.68 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 14:37:29,768 INFO [trainer.py:765] (0/8) Epoch 35, batch 800, train_loss[loss=3.088, NarTop10Accuracy=0.6818, over 4217.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6542, over 5808.12 frames. ], batch size: 5, lr: 2.43e-03 +2024-08-06 14:38:03,303 INFO [trainer.py:765] (0/8) Epoch 35, batch 900, train_loss[loss=3.257, NarTop10Accuracy=0.6751, over 6320.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6548, over 5836.35 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 14:38:43,708 INFO [trainer.py:765] (0/8) Epoch 35, batch 1000, train_loss[loss=3.588, NarTop10Accuracy=0.6034, over 6209.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6537, over 5922.27 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 14:39:16,566 INFO [trainer.py:765] (0/8) Epoch 35, batch 1100, train_loss[loss=3.538, NarTop10Accuracy=0.6101, over 6746.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6528, over 5962.99 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 14:39:50,837 INFO [trainer.py:765] (0/8) Epoch 35, batch 1200, train_loss[loss=3.217, NarTop10Accuracy=0.6767, over 7632.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6505, over 5954.05 frames. ], batch size: 32, lr: 2.43e-03 +2024-08-06 14:40:05,950 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-89000.pt +2024-08-06 14:40:33,952 INFO [trainer.py:765] (0/8) Epoch 35, batch 1300, train_loss[loss=3.274, NarTop10Accuracy=0.6665, over 5023.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6515, over 6017.40 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:41:03,183 INFO [trainer.py:765] (0/8) Epoch 35, batch 1400, train_loss[loss=3.458, NarTop10Accuracy=0.6212, over 6039.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6477, over 6035.73 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 14:41:33,823 INFO [trainer.py:765] (0/8) Epoch 35, batch 1500, train_loss[loss=3.465, NarTop10Accuracy=0.6258, over 6467.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6489, over 5961.48 frames. ], batch size: 49, lr: 2.43e-03 +2024-08-06 14:42:01,777 INFO [trainer.py:765] (0/8) Epoch 35, batch 1600, train_loss[loss=3.509, NarTop10Accuracy=0.6057, over 7069.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.647, over 5955.26 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 14:42:28,466 INFO [trainer.py:765] (0/8) Epoch 35, batch 1700, train_loss[loss=3.27, NarTop10Accuracy=0.6548, over 6337.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6485, over 5938.90 frames. ], batch size: 13, lr: 2.42e-03 +2024-08-06 14:42:55,040 INFO [trainer.py:765] (0/8) Epoch 35, batch 1800, train_loss[loss=3.259, NarTop10Accuracy=0.6655, over 6972.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6469, over 6019.72 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 14:43:21,646 INFO [trainer.py:765] (0/8) Epoch 35, batch 1900, train_loss[loss=3.42, NarTop10Accuracy=0.6435, over 5934.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6466, over 6043.15 frames. ], batch size: 49, lr: 2.42e-03 +2024-08-06 14:43:47,367 INFO [trainer.py:765] (0/8) Epoch 35, batch 2000, train_loss[loss=3.449, NarTop10Accuracy=0.6301, over 5955.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6475, over 6001.78 frames. ], batch size: 49, lr: 2.42e-03 +2024-08-06 14:44:12,856 INFO [trainer.py:765] (0/8) Epoch 35, batch 2100, train_loss[loss=3.28, NarTop10Accuracy=0.6622, over 4047.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6473, over 6007.44 frames. ], batch size: 4, lr: 2.42e-03 +2024-08-06 14:44:38,388 INFO [trainer.py:765] (0/8) Epoch 35, batch 2200, train_loss[loss=3.547, NarTop10Accuracy=0.6177, over 7525.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6475, over 6046.15 frames. ], batch size: 31, lr: 2.42e-03 +2024-08-06 14:44:47,199 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-90000.pt +2024-08-06 14:44:50,805 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 14:44:57,441 INFO [trainer.py:811] (0/8) Epoch 35, validation: loss=3.219, NarTop10Accuracy=0.6773, over 1907754.00 frames. +2024-08-06 14:44:57,441 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 14:44:57,973 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.083e+02 2.237e+02 2.412e+02 3.944e+02, threshold=4.474e+02, percent-clipped=0.0 +2024-08-06 14:45:14,099 INFO [trainer.py:765] (0/8) Epoch 35, batch 2300, train_loss[loss=3.177, NarTop10Accuracy=0.6805, over 5874.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6463, over 6067.67 frames. ], batch size: 9, lr: 2.41e-03 +2024-08-06 14:45:38,819 INFO [trainer.py:765] (0/8) Epoch 35, batch 2400, train_loss[loss=3.386, NarTop10Accuracy=0.6475, over 5869.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6452, over 5869.71 frames. ], batch size: 48, lr: 2.41e-03 +2024-08-06 14:46:02,146 INFO [trainer.py:765] (0/8) Epoch 35, batch 2500, train_loss[loss=3.265, NarTop10Accuracy=0.6726, over 5113.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.652, over 5540.34 frames. ], batch size: 6, lr: 2.41e-03 +2024-08-06 14:46:23,241 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 14:46:23,243 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-35.pt +2024-08-06 14:47:25,441 INFO [trainer.py:765] (0/8) Epoch 36, batch 100, train_loss[loss=3.235, NarTop10Accuracy=0.6733, over 7215.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6565, over 2381.26 frames. ], batch size: 31, lr: 2.38e-03 +2024-08-06 14:47:58,358 INFO [trainer.py:765] (0/8) Epoch 36, batch 200, train_loss[loss=3.267, NarTop10Accuracy=0.6613, over 6901.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.66, over 3877.22 frames. ], batch size: 17, lr: 2.37e-03 +2024-08-06 14:48:30,724 INFO [trainer.py:765] (0/8) Epoch 36, batch 300, train_loss[loss=3.265, NarTop10Accuracy=0.6733, over 7260.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.6605, over 4686.20 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 14:49:04,814 INFO [trainer.py:765] (0/8) Epoch 36, batch 400, train_loss[loss=3.195, NarTop10Accuracy=0.6981, over 5138.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6607, over 5154.58 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 14:49:36,588 INFO [trainer.py:765] (0/8) Epoch 36, batch 500, train_loss[loss=3.456, NarTop10Accuracy=0.6357, over 6170.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6613, over 5426.53 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 14:50:09,654 INFO [trainer.py:765] (0/8) Epoch 36, batch 600, train_loss[loss=3.004, NarTop10Accuracy=0.7149, over 5816.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6569, over 5680.17 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 14:50:29,786 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-91000.pt +2024-08-06 14:50:46,514 INFO [trainer.py:765] (0/8) Epoch 36, batch 700, train_loss[loss=3.165, NarTop10Accuracy=0.6902, over 5038.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6557, over 5737.22 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:23,702 INFO [trainer.py:765] (0/8) Epoch 36, batch 800, train_loss[loss=3.115, NarTop10Accuracy=0.6728, over 5128.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6547, over 5794.96 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:54,346 INFO [trainer.py:765] (0/8) Epoch 36, batch 900, train_loss[loss=3.176, NarTop10Accuracy=0.6878, over 6397.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.654, over 5825.56 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 14:52:30,324 INFO [trainer.py:765] (0/8) Epoch 36, batch 1000, train_loss[loss=3.113, NarTop10Accuracy=0.7074, over 6363.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6545, over 5915.45 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 14:53:06,863 INFO [trainer.py:765] (0/8) Epoch 36, batch 1100, train_loss[loss=3.299, NarTop10Accuracy=0.6778, over 6814.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6501, over 5958.27 frames. ], batch size: 17, lr: 2.36e-03 +2024-08-06 14:53:40,248 INFO [trainer.py:765] (0/8) Epoch 36, batch 1200, train_loss[loss=3.277, NarTop10Accuracy=0.6586, over 7475.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6534, over 5961.39 frames. ], batch size: 31, lr: 2.36e-03 +2024-08-06 14:54:15,855 INFO [trainer.py:765] (0/8) Epoch 36, batch 1300, train_loss[loss=3.269, NarTop10Accuracy=0.659, over 5165.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6535, over 6024.05 frames. ], batch size: 6, lr: 2.36e-03 +2024-08-06 14:54:51,540 INFO [trainer.py:765] (0/8) Epoch 36, batch 1400, train_loss[loss=3.315, NarTop10Accuracy=0.66, over 6275.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6524, over 6046.84 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 14:55:21,802 INFO [trainer.py:765] (0/8) Epoch 36, batch 1500, train_loss[loss=3.572, NarTop10Accuracy=0.5966, over 5598.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6511, over 5979.96 frames. ], batch size: 49, lr: 2.36e-03 +2024-08-06 14:55:49,902 INFO [trainer.py:765] (0/8) Epoch 36, batch 1600, train_loss[loss=3.164, NarTop10Accuracy=0.6841, over 6984.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6509, over 5941.22 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 14:56:04,131 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-92000.pt +2024-08-06 14:56:07,767 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 14:56:14,600 INFO [trainer.py:811] (0/8) Epoch 36, validation: loss=3.22, NarTop10Accuracy=0.6784, over 1907754.00 frames. +2024-08-06 14:56:14,601 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 14:56:15,104 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.063e+02 2.224e+02 2.398e+02 5.290e+02, threshold=4.447e+02, percent-clipped=0.1 +2024-08-06 14:56:27,177 INFO [trainer.py:765] (0/8) Epoch 36, batch 1700, train_loss[loss=3.293, NarTop10Accuracy=0.6632, over 6643.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6502, over 5938.46 frames. ], batch size: 14, lr: 2.35e-03 +2024-08-06 14:56:53,758 INFO [trainer.py:765] (0/8) Epoch 36, batch 1800, train_loss[loss=3.47, NarTop10Accuracy=0.6207, over 7338.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6501, over 6009.30 frames. ], batch size: 22, lr: 2.35e-03 +2024-08-06 14:57:20,335 INFO [trainer.py:765] (0/8) Epoch 36, batch 1900, train_loss[loss=3.303, NarTop10Accuracy=0.6553, over 6116.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6488, over 6057.87 frames. ], batch size: 50, lr: 2.35e-03 +2024-08-06 14:57:46,056 INFO [trainer.py:765] (0/8) Epoch 36, batch 2000, train_loss[loss=3.654, NarTop10Accuracy=0.5832, over 6397.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6474, over 6026.92 frames. ], batch size: 49, lr: 2.35e-03 +2024-08-06 14:58:11,404 INFO [trainer.py:765] (0/8) Epoch 36, batch 2100, train_loss[loss=3.042, NarTop10Accuracy=0.7175, over 3909.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6482, over 6015.42 frames. ], batch size: 4, lr: 2.35e-03 +2024-08-06 14:58:36,832 INFO [trainer.py:765] (0/8) Epoch 36, batch 2200, train_loss[loss=3.598, NarTop10Accuracy=0.5882, over 7253.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6486, over 6053.70 frames. ], batch size: 30, lr: 2.35e-03 +2024-08-06 14:59:02,344 INFO [trainer.py:765] (0/8) Epoch 36, batch 2300, train_loss[loss=3.093, NarTop10Accuracy=0.6936, over 5740.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.645, over 6070.57 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 14:59:27,094 INFO [trainer.py:765] (0/8) Epoch 36, batch 2400, train_loss[loss=3.5, NarTop10Accuracy=0.6255, over 5241.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6436, over 5894.42 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 14:59:50,503 INFO [trainer.py:765] (0/8) Epoch 36, batch 2500, train_loss[loss=3.676, NarTop10Accuracy=0.5948, over 4885.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6473, over 5532.38 frames. ], batch size: 6, lr: 2.34e-03 +2024-08-06 15:00:11,193 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 15:00:11,197 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-36.pt +2024-08-06 15:00:58,572 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-93000.pt +2024-08-06 15:01:14,218 INFO [trainer.py:765] (0/8) Epoch 37, batch 100, train_loss[loss=3.191, NarTop10Accuracy=0.6836, over 7413.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6618, over 2380.77 frames. ], batch size: 31, lr: 2.31e-03 +2024-08-06 15:01:44,098 INFO [trainer.py:765] (0/8) Epoch 37, batch 200, train_loss[loss=3.085, NarTop10Accuracy=0.6983, over 6909.00 frames. ], tot_loss[loss=3.278, NarTop10Accuracy=0.6638, over 3869.96 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 15:02:17,383 INFO [trainer.py:765] (0/8) Epoch 37, batch 300, train_loss[loss=3.18, NarTop10Accuracy=0.6889, over 7323.00 frames. ], tot_loss[loss=3.278, NarTop10Accuracy=0.6635, over 4670.79 frames. ], batch size: 23, lr: 2.31e-03 +2024-08-06 15:02:48,346 INFO [trainer.py:765] (0/8) Epoch 37, batch 400, train_loss[loss=3.578, NarTop10Accuracy=0.6072, over 5223.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.6611, over 5122.61 frames. ], batch size: 7, lr: 2.31e-03 +2024-08-06 15:03:26,570 INFO [trainer.py:765] (0/8) Epoch 37, batch 500, train_loss[loss=2.978, NarTop10Accuracy=0.7217, over 6189.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6583, over 5401.54 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 15:03:58,033 INFO [trainer.py:765] (0/8) Epoch 37, batch 600, train_loss[loss=3.113, NarTop10Accuracy=0.6895, over 5803.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6582, over 5673.00 frames. ], batch size: 9, lr: 2.30e-03 +2024-08-06 15:04:30,248 INFO [trainer.py:765] (0/8) Epoch 37, batch 700, train_loss[loss=3.308, NarTop10Accuracy=0.6515, over 5083.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6562, over 5741.82 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:12,163 INFO [trainer.py:765] (0/8) Epoch 37, batch 800, train_loss[loss=3.201, NarTop10Accuracy=0.668, over 5136.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6529, over 5797.89 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:40,606 INFO [trainer.py:765] (0/8) Epoch 37, batch 900, train_loss[loss=2.999, NarTop10Accuracy=0.7198, over 6166.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.654, over 5819.68 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 15:06:15,608 INFO [trainer.py:765] (0/8) Epoch 37, batch 1000, train_loss[loss=3.163, NarTop10Accuracy=0.6811, over 6164.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6524, over 5920.25 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 15:06:42,490 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-94000.pt +2024-08-06 15:06:46,411 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 15:06:53,168 INFO [trainer.py:811] (0/8) Epoch 37, validation: loss=3.234, NarTop10Accuracy=0.6744, over 1907754.00 frames. +2024-08-06 15:06:53,169 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 15:06:53,809 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.068e+02 2.238e+02 2.409e+02 6.392e+02, threshold=4.475e+02, percent-clipped=0.1 +2024-08-06 15:07:01,306 INFO [trainer.py:765] (0/8) Epoch 37, batch 1100, train_loss[loss=3.572, NarTop10Accuracy=0.6016, over 6759.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6533, over 5954.03 frames. ], batch size: 17, lr: 2.30e-03 +2024-08-06 15:07:32,718 INFO [trainer.py:765] (0/8) Epoch 37, batch 1200, train_loss[loss=3.289, NarTop10Accuracy=0.6681, over 7305.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6544, over 5957.21 frames. ], batch size: 30, lr: 2.30e-03 +2024-08-06 15:08:04,777 INFO [trainer.py:765] (0/8) Epoch 37, batch 1300, train_loss[loss=3.451, NarTop10Accuracy=0.6232, over 4967.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6533, over 6022.76 frames. ], batch size: 6, lr: 2.29e-03 +2024-08-06 15:08:47,879 INFO [trainer.py:765] (0/8) Epoch 37, batch 1400, train_loss[loss=2.981, NarTop10Accuracy=0.7108, over 6166.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6506, over 6042.73 frames. ], batch size: 11, lr: 2.29e-03 +2024-08-06 15:09:16,180 INFO [trainer.py:765] (0/8) Epoch 37, batch 1500, train_loss[loss=3.622, NarTop10Accuracy=0.5939, over 6240.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6521, over 5967.80 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:09:44,190 INFO [trainer.py:765] (0/8) Epoch 37, batch 1600, train_loss[loss=3.482, NarTop10Accuracy=0.6258, over 7033.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6513, over 5946.62 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:10:11,082 INFO [trainer.py:765] (0/8) Epoch 37, batch 1700, train_loss[loss=3.145, NarTop10Accuracy=0.6867, over 6357.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.651, over 5952.94 frames. ], batch size: 13, lr: 2.29e-03 +2024-08-06 15:10:37,752 INFO [trainer.py:765] (0/8) Epoch 37, batch 1800, train_loss[loss=3.495, NarTop10Accuracy=0.6199, over 7340.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6525, over 6022.91 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:11:04,270 INFO [trainer.py:765] (0/8) Epoch 37, batch 1900, train_loss[loss=3.621, NarTop10Accuracy=0.5955, over 6243.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6479, over 6048.41 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:11:29,941 INFO [trainer.py:765] (0/8) Epoch 37, batch 2000, train_loss[loss=3.532, NarTop10Accuracy=0.6111, over 5832.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.648, over 6016.96 frames. ], batch size: 48, lr: 2.29e-03 +2024-08-06 15:11:48,560 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-95000.pt +2024-08-06 15:11:58,797 INFO [trainer.py:765] (0/8) Epoch 37, batch 2100, train_loss[loss=3.037, NarTop10Accuracy=0.7127, over 3893.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6495, over 5994.43 frames. ], batch size: 4, lr: 2.29e-03 +2024-08-06 15:12:24,311 INFO [trainer.py:765] (0/8) Epoch 37, batch 2200, train_loss[loss=3.317, NarTop10Accuracy=0.6543, over 7154.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6496, over 6028.77 frames. ], batch size: 30, lr: 2.28e-03 +2024-08-06 15:12:49,786 INFO [trainer.py:765] (0/8) Epoch 37, batch 2300, train_loss[loss=3.264, NarTop10Accuracy=0.6618, over 5755.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.648, over 6050.91 frames. ], batch size: 9, lr: 2.28e-03 +2024-08-06 15:13:14,526 INFO [trainer.py:765] (0/8) Epoch 37, batch 2400, train_loss[loss=3.186, NarTop10Accuracy=0.6819, over 5228.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6476, over 5875.68 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 15:13:37,942 INFO [trainer.py:765] (0/8) Epoch 37, batch 2500, train_loss[loss=3.831, NarTop10Accuracy=0.5419, over 5144.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6509, over 5518.91 frames. ], batch size: 6, lr: 2.28e-03 +2024-08-06 15:13:59,219 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 15:13:59,221 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-37.pt +2024-08-06 15:14:50,846 INFO [trainer.py:765] (0/8) Epoch 38, batch 100, train_loss[loss=3.537, NarTop10Accuracy=0.6095, over 7513.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6562, over 2367.99 frames. ], batch size: 31, lr: 2.25e-03 +2024-08-06 15:15:27,289 INFO [trainer.py:765] (0/8) Epoch 38, batch 200, train_loss[loss=3.172, NarTop10Accuracy=0.6884, over 6845.00 frames. ], tot_loss[loss=3.278, NarTop10Accuracy=0.6638, over 3878.73 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 15:16:01,281 INFO [trainer.py:765] (0/8) Epoch 38, batch 300, train_loss[loss=3.393, NarTop10Accuracy=0.6398, over 7292.00 frames. ], tot_loss[loss=3.274, NarTop10Accuracy=0.6646, over 4679.86 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 15:16:32,595 INFO [trainer.py:765] (0/8) Epoch 38, batch 400, train_loss[loss=3.252, NarTop10Accuracy=0.6611, over 5144.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6635, over 5125.67 frames. ], batch size: 7, lr: 2.24e-03 +2024-08-06 15:17:04,257 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-96000.pt +2024-08-06 15:17:07,576 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 15:17:14,104 INFO [trainer.py:811] (0/8) Epoch 38, validation: loss=3.229, NarTop10Accuracy=0.6755, over 1907754.00 frames. +2024-08-06 15:17:14,105 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 15:17:14,630 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.062e+02 2.214e+02 2.396e+02 3.845e+02, threshold=4.429e+02, percent-clipped=0.0 +2024-08-06 15:17:16,479 INFO [trainer.py:765] (0/8) Epoch 38, batch 500, train_loss[loss=3.346, NarTop10Accuracy=0.6522, over 6026.00 frames. ], tot_loss[loss=3.27, NarTop10Accuracy=0.6651, over 5382.60 frames. ], batch size: 11, lr: 2.24e-03 +2024-08-06 15:17:53,875 INFO [trainer.py:765] (0/8) Epoch 38, batch 600, train_loss[loss=3.278, NarTop10Accuracy=0.67, over 5783.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.661, over 5658.55 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 15:18:26,465 INFO [trainer.py:765] (0/8) Epoch 38, batch 700, train_loss[loss=3.241, NarTop10Accuracy=0.6625, over 4988.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6572, over 5738.38 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:01,129 INFO [trainer.py:765] (0/8) Epoch 38, batch 800, train_loss[loss=3.119, NarTop10Accuracy=0.6906, over 5048.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6566, over 5792.08 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:36,539 INFO [trainer.py:765] (0/8) Epoch 38, batch 900, train_loss[loss=3.547, NarTop10Accuracy=0.6045, over 6652.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6547, over 5807.01 frames. ], batch size: 14, lr: 2.24e-03 +2024-08-06 15:20:09,134 INFO [trainer.py:765] (0/8) Epoch 38, batch 1000, train_loss[loss=3.534, NarTop10Accuracy=0.608, over 6234.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6535, over 5900.90 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 15:20:47,345 INFO [trainer.py:765] (0/8) Epoch 38, batch 1100, train_loss[loss=3.34, NarTop10Accuracy=0.6495, over 7053.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6509, over 5937.27 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 15:21:25,594 INFO [trainer.py:765] (0/8) Epoch 38, batch 1200, train_loss[loss=3.389, NarTop10Accuracy=0.6383, over 7150.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6516, over 5943.04 frames. ], batch size: 31, lr: 2.23e-03 +2024-08-06 15:21:57,556 INFO [trainer.py:765] (0/8) Epoch 38, batch 1300, train_loss[loss=3.143, NarTop10Accuracy=0.6895, over 5109.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6543, over 6012.35 frames. ], batch size: 6, lr: 2.23e-03 +2024-08-06 15:22:29,467 INFO [trainer.py:765] (0/8) Epoch 38, batch 1400, train_loss[loss=3.028, NarTop10Accuracy=0.7035, over 5981.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6516, over 6046.33 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 15:23:01,195 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-97000.pt +2024-08-06 15:23:06,615 INFO [trainer.py:765] (0/8) Epoch 38, batch 1500, train_loss[loss=3.459, NarTop10Accuracy=0.6308, over 6065.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6495, over 5982.05 frames. ], batch size: 49, lr: 2.23e-03 +2024-08-06 15:23:34,640 INFO [trainer.py:765] (0/8) Epoch 38, batch 1600, train_loss[loss=3.541, NarTop10Accuracy=0.6073, over 6960.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6472, over 5943.97 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:01,433 INFO [trainer.py:765] (0/8) Epoch 38, batch 1700, train_loss[loss=3.147, NarTop10Accuracy=0.6991, over 6240.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6468, over 5936.85 frames. ], batch size: 13, lr: 2.23e-03 +2024-08-06 15:24:28,064 INFO [trainer.py:765] (0/8) Epoch 38, batch 1800, train_loss[loss=3.252, NarTop10Accuracy=0.6728, over 7058.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6465, over 5989.95 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:54,672 INFO [trainer.py:765] (0/8) Epoch 38, batch 1900, train_loss[loss=3.386, NarTop10Accuracy=0.6449, over 6104.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6466, over 6022.17 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 15:25:20,410 INFO [trainer.py:765] (0/8) Epoch 38, batch 2000, train_loss[loss=3.451, NarTop10Accuracy=0.6309, over 6553.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6475, over 6014.71 frames. ], batch size: 49, lr: 2.23e-03 +2024-08-06 15:25:45,856 INFO [trainer.py:765] (0/8) Epoch 38, batch 2100, train_loss[loss=3.304, NarTop10Accuracy=0.6584, over 3927.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6492, over 6005.97 frames. ], batch size: 4, lr: 2.22e-03 +2024-08-06 15:26:11,316 INFO [trainer.py:765] (0/8) Epoch 38, batch 2200, train_loss[loss=3.729, NarTop10Accuracy=0.5734, over 7183.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.648, over 6037.60 frames. ], batch size: 31, lr: 2.22e-03 +2024-08-06 15:26:36,708 INFO [trainer.py:765] (0/8) Epoch 38, batch 2300, train_loss[loss=3.207, NarTop10Accuracy=0.6786, over 5770.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6459, over 6061.97 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 15:27:01,479 INFO [trainer.py:765] (0/8) Epoch 38, batch 2400, train_loss[loss=3.59, NarTop10Accuracy=0.6035, over 6083.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6433, over 5883.32 frames. ], batch size: 49, lr: 2.22e-03 +2024-08-06 15:27:23,144 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-98000.pt +2024-08-06 15:27:26,744 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 15:27:33,589 INFO [trainer.py:811] (0/8) Epoch 38, validation: loss=3.213, NarTop10Accuracy=0.6782, over 1907754.00 frames. +2024-08-06 15:27:33,590 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 15:27:34,075 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.098e+02 2.247e+02 2.437e+02 3.550e+02, threshold=4.494e+02, percent-clipped=0.0 +2024-08-06 15:27:35,514 INFO [trainer.py:765] (0/8) Epoch 38, batch 2500, train_loss[loss=2.998, NarTop10Accuracy=0.7071, over 5095.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.651, over 5531.60 frames. ], batch size: 6, lr: 2.22e-03 +2024-08-06 15:27:56,855 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 15:27:56,860 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-38.pt +2024-08-06 15:28:51,227 INFO [trainer.py:765] (0/8) Epoch 39, batch 100, train_loss[loss=3.212, NarTop10Accuracy=0.6722, over 7141.00 frames. ], tot_loss[loss=3.266, NarTop10Accuracy=0.6666, over 2371.47 frames. ], batch size: 30, lr: 2.19e-03 +2024-08-06 15:29:28,052 INFO [trainer.py:765] (0/8) Epoch 39, batch 200, train_loss[loss=3.546, NarTop10Accuracy=0.6116, over 6856.00 frames. ], tot_loss[loss=3.258, NarTop10Accuracy=0.667, over 3872.29 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 15:30:02,018 INFO [trainer.py:765] (0/8) Epoch 39, batch 300, train_loss[loss=3.287, NarTop10Accuracy=0.659, over 7201.00 frames. ], tot_loss[loss=3.279, NarTop10Accuracy=0.6633, over 4681.82 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 15:30:32,992 INFO [trainer.py:765] (0/8) Epoch 39, batch 400, train_loss[loss=2.964, NarTop10Accuracy=0.7163, over 5237.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.6621, over 5126.41 frames. ], batch size: 7, lr: 2.19e-03 +2024-08-06 15:31:03,569 INFO [trainer.py:765] (0/8) Epoch 39, batch 500, train_loss[loss=3.241, NarTop10Accuracy=0.6684, over 6166.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.66, over 5399.55 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 15:31:40,850 INFO [trainer.py:765] (0/8) Epoch 39, batch 600, train_loss[loss=3.156, NarTop10Accuracy=0.688, over 5830.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.6602, over 5680.82 frames. ], batch size: 9, lr: 2.18e-03 +2024-08-06 15:32:14,452 INFO [trainer.py:765] (0/8) Epoch 39, batch 700, train_loss[loss=3.135, NarTop10Accuracy=0.6944, over 5149.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.6577, over 5754.13 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:32:44,166 INFO [trainer.py:765] (0/8) Epoch 39, batch 800, train_loss[loss=3.377, NarTop10Accuracy=0.6433, over 4989.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6573, over 5787.39 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:33:21,117 INFO [trainer.py:765] (0/8) Epoch 39, batch 900, train_loss[loss=3.077, NarTop10Accuracy=0.7075, over 6330.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6564, over 5812.17 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 15:33:28,983 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-99000.pt +2024-08-06 15:34:02,655 INFO [trainer.py:765] (0/8) Epoch 39, batch 1000, train_loss[loss=3.087, NarTop10Accuracy=0.7108, over 6277.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6562, over 5915.08 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 15:34:33,095 INFO [trainer.py:765] (0/8) Epoch 39, batch 1100, train_loss[loss=3.179, NarTop10Accuracy=0.6862, over 6840.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6552, over 5944.85 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 15:35:09,245 INFO [trainer.py:765] (0/8) Epoch 39, batch 1200, train_loss[loss=3.181, NarTop10Accuracy=0.6865, over 6915.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6545, over 5941.45 frames. ], batch size: 30, lr: 2.18e-03 +2024-08-06 15:35:46,813 INFO [trainer.py:765] (0/8) Epoch 39, batch 1300, train_loss[loss=3.535, NarTop10Accuracy=0.609, over 4289.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6546, over 6002.42 frames. ], batch size: 5, lr: 2.18e-03 +2024-08-06 15:36:18,850 INFO [trainer.py:765] (0/8) Epoch 39, batch 1400, train_loss[loss=3.11, NarTop10Accuracy=0.7008, over 6070.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6545, over 6046.43 frames. ], batch size: 11, lr: 2.17e-03 +2024-08-06 15:36:47,214 INFO [trainer.py:765] (0/8) Epoch 39, batch 1500, train_loss[loss=3.363, NarTop10Accuracy=0.65, over 6300.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6523, over 5982.05 frames. ], batch size: 48, lr: 2.17e-03 +2024-08-06 15:37:15,216 INFO [trainer.py:765] (0/8) Epoch 39, batch 1600, train_loss[loss=3.397, NarTop10Accuracy=0.641, over 7168.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6493, over 5969.16 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:37:41,883 INFO [trainer.py:765] (0/8) Epoch 39, batch 1700, train_loss[loss=3.227, NarTop10Accuracy=0.672, over 6578.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.648, over 5937.14 frames. ], batch size: 14, lr: 2.17e-03 +2024-08-06 15:38:08,510 INFO [trainer.py:765] (0/8) Epoch 39, batch 1800, train_loss[loss=3.322, NarTop10Accuracy=0.65, over 7336.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6504, over 5999.91 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:38:35,253 INFO [trainer.py:765] (0/8) Epoch 39, batch 1900, train_loss[loss=3.46, NarTop10Accuracy=0.6231, over 6186.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.648, over 6025.58 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 15:38:37,990 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-100000.pt +2024-08-06 15:38:41,561 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 15:38:48,262 INFO [trainer.py:811] (0/8) Epoch 39, validation: loss=3.177, NarTop10Accuracy=0.6866, over 1907754.00 frames. +2024-08-06 15:38:48,262 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 15:38:48,768 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.106e+02 2.266e+02 2.462e+02 4.274e+02, threshold=4.532e+02, percent-clipped=0.0 +2024-08-06 15:39:11,226 INFO [trainer.py:765] (0/8) Epoch 39, batch 2000, train_loss[loss=3.324, NarTop10Accuracy=0.6478, over 6129.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6511, over 5992.47 frames. ], batch size: 48, lr: 2.17e-03 +2024-08-06 15:39:36,692 INFO [trainer.py:765] (0/8) Epoch 39, batch 2100, train_loss[loss=3.483, NarTop10Accuracy=0.6307, over 4800.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6487, over 5986.94 frames. ], batch size: 5, lr: 2.17e-03 +2024-08-06 15:40:02,086 INFO [trainer.py:765] (0/8) Epoch 39, batch 2200, train_loss[loss=3.642, NarTop10Accuracy=0.5913, over 7143.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6488, over 6030.30 frames. ], batch size: 30, lr: 2.17e-03 +2024-08-06 15:40:27,496 INFO [trainer.py:765] (0/8) Epoch 39, batch 2300, train_loss[loss=3.009, NarTop10Accuracy=0.7179, over 5680.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6485, over 6068.28 frames. ], batch size: 9, lr: 2.16e-03 +2024-08-06 15:40:52,331 INFO [trainer.py:765] (0/8) Epoch 39, batch 2400, train_loss[loss=3.368, NarTop10Accuracy=0.641, over 5974.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6467, over 5884.07 frames. ], batch size: 49, lr: 2.16e-03 +2024-08-06 15:41:15,695 INFO [trainer.py:765] (0/8) Epoch 39, batch 2500, train_loss[loss=3.513, NarTop10Accuracy=0.6232, over 5076.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6526, over 5533.16 frames. ], batch size: 6, lr: 2.16e-03 +2024-08-06 15:41:37,020 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 15:41:37,025 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-39.pt +2024-08-06 15:42:35,254 INFO [trainer.py:765] (0/8) Epoch 40, batch 100, train_loss[loss=3.643, NarTop10Accuracy=0.5898, over 7318.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6553, over 2361.92 frames. ], batch size: 31, lr: 2.13e-03 +2024-08-06 15:43:09,645 INFO [trainer.py:765] (0/8) Epoch 40, batch 200, train_loss[loss=3.438, NarTop10Accuracy=0.6308, over 6828.00 frames. ], tot_loss[loss=3.276, NarTop10Accuracy=0.6637, over 3853.89 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 15:43:43,738 INFO [trainer.py:765] (0/8) Epoch 40, batch 300, train_loss[loss=3.352, NarTop10Accuracy=0.6378, over 7350.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.6623, over 4674.59 frames. ], batch size: 22, lr: 2.13e-03 +2024-08-06 15:43:52,264 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-101000.pt +2024-08-06 15:44:18,202 INFO [trainer.py:765] (0/8) Epoch 40, batch 400, train_loss[loss=3.053, NarTop10Accuracy=0.7027, over 5066.00 frames. ], tot_loss[loss=3.275, NarTop10Accuracy=0.6638, over 5143.89 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 15:44:50,257 INFO [trainer.py:765] (0/8) Epoch 40, batch 500, train_loss[loss=3.311, NarTop10Accuracy=0.6664, over 6259.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6626, over 5433.20 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 15:45:25,431 INFO [trainer.py:765] (0/8) Epoch 40, batch 600, train_loss[loss=3.408, NarTop10Accuracy=0.6385, over 5860.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6593, over 5692.55 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 15:45:58,647 INFO [trainer.py:765] (0/8) Epoch 40, batch 700, train_loss[loss=3.383, NarTop10Accuracy=0.6428, over 4959.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6561, over 5750.74 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 15:46:34,887 INFO [trainer.py:765] (0/8) Epoch 40, batch 800, train_loss[loss=3.302, NarTop10Accuracy=0.6584, over 5079.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6569, over 5816.45 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 15:47:07,290 INFO [trainer.py:765] (0/8) Epoch 40, batch 900, train_loss[loss=3.195, NarTop10Accuracy=0.6842, over 6580.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.6569, over 5829.43 frames. ], batch size: 14, lr: 2.12e-03 +2024-08-06 15:47:43,510 INFO [trainer.py:765] (0/8) Epoch 40, batch 1000, train_loss[loss=3.43, NarTop10Accuracy=0.6256, over 6640.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6554, over 5917.07 frames. ], batch size: 14, lr: 2.12e-03 +2024-08-06 15:48:18,709 INFO [trainer.py:765] (0/8) Epoch 40, batch 1100, train_loss[loss=3.52, NarTop10Accuracy=0.6255, over 6985.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6543, over 5954.98 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 15:48:52,094 INFO [trainer.py:765] (0/8) Epoch 40, batch 1200, train_loss[loss=3.196, NarTop10Accuracy=0.6816, over 7014.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6546, over 5947.81 frames. ], batch size: 30, lr: 2.12e-03 +2024-08-06 15:49:29,782 INFO [trainer.py:765] (0/8) Epoch 40, batch 1300, train_loss[loss=3.593, NarTop10Accuracy=0.6138, over 5014.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6543, over 6026.96 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 15:49:38,244 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-102000.pt +2024-08-06 15:49:41,956 INFO [trainer.py:803] (0/8) Computing validation loss +2024-08-06 15:49:48,934 INFO [trainer.py:811] (0/8) Epoch 40, validation: loss=3.171, NarTop10Accuracy=0.6871, over 1907754.00 frames. +2024-08-06 15:49:48,935 INFO [trainer.py:814] (0/8) Maximum memory allocated so far is 30689MB +2024-08-06 15:49:49,615 INFO [optim.py:386] (0/8) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.095e+02 2.264e+02 2.441e+02 4.960e+02, threshold=4.528e+02, percent-clipped=0.1 +2024-08-06 15:50:12,460 INFO [trainer.py:765] (0/8) Epoch 40, batch 1400, train_loss[loss=3.37, NarTop10Accuracy=0.6521, over 6206.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.654, over 6019.80 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 15:50:45,930 INFO [trainer.py:765] (0/8) Epoch 40, batch 1500, train_loss[loss=3.537, NarTop10Accuracy=0.6141, over 5496.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6527, over 5956.73 frames. ], batch size: 49, lr: 2.12e-03 +2024-08-06 15:51:13,820 INFO [trainer.py:765] (0/8) Epoch 40, batch 1600, train_loss[loss=3.169, NarTop10Accuracy=0.6865, over 7447.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6542, over 5946.75 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:51:40,570 INFO [trainer.py:765] (0/8) Epoch 40, batch 1700, train_loss[loss=3.442, NarTop10Accuracy=0.6421, over 6677.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6514, over 5938.77 frames. ], batch size: 14, lr: 2.12e-03 +2024-08-06 15:52:07,236 INFO [trainer.py:765] (0/8) Epoch 40, batch 1800, train_loss[loss=3.418, NarTop10Accuracy=0.6345, over 7182.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6508, over 6006.32 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:52:33,820 INFO [trainer.py:765] (0/8) Epoch 40, batch 1900, train_loss[loss=3.399, NarTop10Accuracy=0.6367, over 5951.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6492, over 6044.05 frames. ], batch size: 49, lr: 2.11e-03 +2024-08-06 15:52:59,511 INFO [trainer.py:765] (0/8) Epoch 40, batch 2000, train_loss[loss=3.382, NarTop10Accuracy=0.6457, over 5962.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6484, over 6012.31 frames. ], batch size: 49, lr: 2.11e-03 +2024-08-06 15:53:24,913 INFO [trainer.py:765] (0/8) Epoch 40, batch 2100, train_loss[loss=3.121, NarTop10Accuracy=0.6986, over 4917.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6489, over 5993.42 frames. ], batch size: 5, lr: 2.11e-03 +2024-08-06 15:53:50,418 INFO [trainer.py:765] (0/8) Epoch 40, batch 2200, train_loss[loss=3.38, NarTop10Accuracy=0.647, over 7279.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6498, over 6034.94 frames. ], batch size: 30, lr: 2.11e-03 +2024-08-06 15:54:15,886 INFO [trainer.py:765] (0/8) Epoch 40, batch 2300, train_loss[loss=3.344, NarTop10Accuracy=0.6619, over 5761.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6505, over 6069.80 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 15:54:23,200 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/checkpoint-103000.pt +2024-08-06 15:54:43,787 INFO [trainer.py:765] (0/8) Epoch 40, batch 2400, train_loss[loss=3.786, NarTop10Accuracy=0.5617, over 5833.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6492, over 5907.90 frames. ], batch size: 51, lr: 2.11e-03 +2024-08-06 15:55:07,364 INFO [trainer.py:765] (0/8) Epoch 40, batch 2500, train_loss[loss=3.035, NarTop10Accuracy=0.7183, over 5102.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.656, over 5547.59 frames. ], batch size: 6, lr: 2.11e-03 +2024-08-06 15:55:28,017 INFO [trainer.py:650] (0/8) Reaches end of dataloader. +2024-08-06 15:55:28,019 INFO [checkpoint.py:75] (0/8) Saving checkpoint to exp/valle/epoch-40.pt +2024-08-06 15:55:31,454 INFO [trainer.py:1069] (0/8) Done! diff --git a/libritts/log/log-train-2024-08-06-06-41-41-1 b/libritts/log/log-train-2024-08-06-06-41-41-1 new file mode 100644 index 0000000000000000000000000000000000000000..bec260c53d1982d8340e46f3913fb7c365214649 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-41-41-1 @@ -0,0 +1,1260 @@ +2024-08-06 06:41:41,490 INFO [trainer.py:870] (1/8) Training started +2024-08-06 06:41:41,491 INFO [trainer.py:889] (1/8) Device: cuda:1 +2024-08-06 06:41:41,491 INFO [trainer.py:890] (1/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:41:41,491 INFO [trainer.py:892] (1/8) About to create model +2024-08-06 06:41:42,394 INFO [trainer.py:899] (1/8) Number of model parameters: 367386628 +2024-08-06 06:41:42,395 INFO [checkpoint.py:112] (1/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 06:41:44,848 INFO [trainer.py:914] (1/8) Using DDP +2024-08-06 06:41:46,902 INFO [datamodule.py:427] (1/8) About to get train cuts +2024-08-06 06:41:46,905 INFO [datamodule.py:434] (1/8) About to get dev cuts +2024-08-06 06:41:46,907 INFO [datamodule.py:292] (1/8) Disable SpecAugment +2024-08-06 06:41:46,907 INFO [datamodule.py:294] (1/8) About to create train dataset +2024-08-06 06:41:46,908 INFO [datamodule.py:323] (1/8) Using DynamicBucketingSampler +2024-08-06 06:41:47,544 INFO [datamodule.py:344] (1/8) About to create train dataloader +2024-08-06 06:41:47,544 INFO [datamodule.py:367] (1/8) About to create dev dataset +2024-08-06 06:41:47,885 INFO [datamodule.py:388] (1/8) About to create dev dataloader +2024-08-06 06:42:36,136 INFO [trainer.py:765] (1/8) Epoch 1, batch 100, train_loss[loss=95.48, NarTop10Accuracy=0.01071, over 7338.00 frames. ], tot_loss[loss=80.61, NarTop10Accuracy=0.0526, over 2353.95 frames. ], batch size: 31, lr: 2.25e-02 +2024-08-06 06:43:05,818 INFO [trainer.py:765] (1/8) Epoch 1, batch 200, train_loss[loss=123.2, NarTop10Accuracy=0.01814, over 7100.00 frames. ], tot_loss[loss=99.44, NarTop10Accuracy=0.04509, over 3862.73 frames. ], batch size: 18, lr: 3.00e-02 +2024-08-06 06:43:33,849 INFO [trainer.py:765] (1/8) Epoch 1, batch 300, train_loss[loss=74.02, NarTop10Accuracy=0.02318, over 7021.00 frames. ], tot_loss[loss=87.04, NarTop10Accuracy=0.04609, over 4655.62 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 06:44:05,251 INFO [trainer.py:765] (1/8) Epoch 1, batch 400, train_loss[loss=32.65, NarTop10Accuracy=0.05181, over 5146.00 frames. ], tot_loss[loss=68.01, NarTop10Accuracy=0.05057, over 5100.67 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 06:44:33,445 INFO [trainer.py:765] (1/8) Epoch 1, batch 500, train_loss[loss=16.62, NarTop10Accuracy=0.02294, over 6211.00 frames. ], tot_loss[loss=48.55, NarTop10Accuracy=0.05707, over 5406.64 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 06:45:02,924 INFO [trainer.py:765] (1/8) Epoch 1, batch 600, train_loss[loss=5.973, NarTop10Accuracy=0.2194, over 5789.00 frames. ], tot_loss[loss=33.26, NarTop10Accuracy=0.06419, over 5671.07 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 06:45:40,481 INFO [trainer.py:765] (1/8) Epoch 1, batch 700, train_loss[loss=7.159, NarTop10Accuracy=0.1122, over 5112.00 frames. ], tot_loss[loss=23.51, NarTop10Accuracy=0.07151, over 5732.78 frames. ], batch size: 6, lr: 2.99e-02 +2024-08-06 06:46:09,663 INFO [trainer.py:765] (1/8) Epoch 1, batch 800, train_loss[loss=6.634, NarTop10Accuracy=0.1097, over 4287.00 frames. ], tot_loss[loss=17.49, NarTop10Accuracy=0.08213, over 5799.88 frames. ], batch size: 5, lr: 2.98e-02 +2024-08-06 06:46:37,734 INFO [trainer.py:765] (1/8) Epoch 1, batch 900, train_loss[loss=5.655, NarTop10Accuracy=0.2295, over 6122.00 frames. ], tot_loss[loss=13, NarTop10Accuracy=0.1115, over 5824.88 frames. ], batch size: 13, lr: 2.98e-02 +2024-08-06 06:47:13,909 INFO [trainer.py:765] (1/8) Epoch 1, batch 1000, train_loss[loss=5.909, NarTop10Accuracy=0.1985, over 6163.00 frames. ], tot_loss[loss=10.16, NarTop10Accuracy=0.1368, over 5918.80 frames. ], batch size: 13, lr: 2.97e-02 +2024-08-06 06:47:47,141 INFO [trainer.py:765] (1/8) Epoch 1, batch 1100, train_loss[loss=5.399, NarTop10Accuracy=0.2207, over 6811.00 frames. ], tot_loss[loss=8.407, NarTop10Accuracy=0.1577, over 5955.83 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 06:48:15,710 INFO [trainer.py:765] (1/8) Epoch 1, batch 1200, train_loss[loss=6.288, NarTop10Accuracy=0.1443, over 7167.00 frames. ], tot_loss[loss=7.301, NarTop10Accuracy=0.1748, over 5945.82 frames. ], batch size: 30, lr: 2.96e-02 +2024-08-06 06:48:47,236 INFO [trainer.py:765] (1/8) Epoch 1, batch 1300, train_loss[loss=5.522, NarTop10Accuracy=0.1902, over 5214.00 frames. ], tot_loss[loss=6.605, NarTop10Accuracy=0.1865, over 6016.95 frames. ], batch size: 6, lr: 2.95e-02 +2024-08-06 06:49:23,566 INFO [trainer.py:765] (1/8) Epoch 1, batch 1400, train_loss[loss=5.7, NarTop10Accuracy=0.1619, over 6036.00 frames. ], tot_loss[loss=6.19, NarTop10Accuracy=0.1922, over 6048.58 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 06:49:51,506 INFO [trainer.py:765] (1/8) Epoch 1, batch 1500, train_loss[loss=5.577, NarTop10Accuracy=0.2024, over 6291.00 frames. ], tot_loss[loss=5.927, NarTop10Accuracy=0.1985, over 5982.25 frames. ], batch size: 51, lr: 2.94e-02 +2024-08-06 06:50:19,162 INFO [trainer.py:765] (1/8) Epoch 1, batch 1600, train_loss[loss=5.27, NarTop10Accuracy=0.2494, over 7050.00 frames. ], tot_loss[loss=5.75, NarTop10Accuracy=0.205, over 5967.19 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 06:50:45,596 INFO [trainer.py:765] (1/8) Epoch 1, batch 1700, train_loss[loss=5.405, NarTop10Accuracy=0.2146, over 6639.00 frames. ], tot_loss[loss=5.635, NarTop10Accuracy=0.2099, over 5936.66 frames. ], batch size: 14, lr: 2.92e-02 +2024-08-06 06:51:11,956 INFO [trainer.py:765] (1/8) Epoch 1, batch 1800, train_loss[loss=5.472, NarTop10Accuracy=0.2135, over 7368.00 frames. ], tot_loss[loss=5.55, NarTop10Accuracy=0.2162, over 6003.93 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 06:51:38,225 INFO [trainer.py:765] (1/8) Epoch 1, batch 1900, train_loss[loss=5.583, NarTop10Accuracy=0.2018, over 6101.00 frames. ], tot_loss[loss=5.497, NarTop10Accuracy=0.2208, over 6047.91 frames. ], batch size: 48, lr: 2.90e-02 +2024-08-06 06:52:03,653 INFO [trainer.py:765] (1/8) Epoch 1, batch 2000, train_loss[loss=5.398, NarTop10Accuracy=0.2356, over 6059.00 frames. ], tot_loss[loss=5.442, NarTop10Accuracy=0.2279, over 6013.41 frames. ], batch size: 49, lr: 2.89e-02 +2024-08-06 06:52:03,654 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 06:52:13,994 INFO [trainer.py:811] (1/8) Epoch 1, validation: loss=5.351, NarTop10Accuracy=0.2423, over 1907754.00 frames. +2024-08-06 06:52:13,995 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 26462MB +2024-08-06 06:52:14,534 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 4.341e+01 2.262e+02 7.241e+02 2.074e+04 7.259e+05, threshold=1.448e+03, percent-clipped=0.0 +2024-08-06 06:52:39,585 INFO [trainer.py:765] (1/8) Epoch 1, batch 2100, train_loss[loss=5.6, NarTop10Accuracy=0.1976, over 3937.00 frames. ], tot_loss[loss=5.382, NarTop10Accuracy=0.2379, over 6003.57 frames. ], batch size: 4, lr: 2.88e-02 +2024-08-06 06:53:05,354 INFO [trainer.py:765] (1/8) Epoch 1, batch 2200, train_loss[loss=5.363, NarTop10Accuracy=0.2375, over 7206.00 frames. ], tot_loss[loss=5.353, NarTop10Accuracy=0.2419, over 6042.58 frames. ], batch size: 31, lr: 2.87e-02 +2024-08-06 06:53:30,701 INFO [trainer.py:765] (1/8) Epoch 1, batch 2300, train_loss[loss=5.369, NarTop10Accuracy=0.2308, over 5687.00 frames. ], tot_loss[loss=5.338, NarTop10Accuracy=0.245, over 6080.34 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 06:53:55,359 INFO [trainer.py:765] (1/8) Epoch 1, batch 2400, train_loss[loss=5.363, NarTop10Accuracy=0.2375, over 5222.00 frames. ], tot_loss[loss=5.313, NarTop10Accuracy=0.2502, over 5887.24 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 06:54:18,659 INFO [trainer.py:765] (1/8) Epoch 1, batch 2500, train_loss[loss=5.221, NarTop10Accuracy=0.2556, over 4233.00 frames. ], tot_loss[loss=5.263, NarTop10Accuracy=0.2595, over 5533.74 frames. ], batch size: 5, lr: 2.84e-02 +2024-08-06 06:54:39,787 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 06:55:37,937 INFO [trainer.py:765] (1/8) Epoch 2, batch 100, train_loss[loss=5.307, NarTop10Accuracy=0.2642, over 7298.00 frames. ], tot_loss[loss=5.173, NarTop10Accuracy=0.2809, over 2356.32 frames. ], batch size: 31, lr: 2.77e-02 +2024-08-06 06:56:16,405 INFO [trainer.py:765] (1/8) Epoch 2, batch 200, train_loss[loss=4.921, NarTop10Accuracy=0.3477, over 6958.00 frames. ], tot_loss[loss=5.16, NarTop10Accuracy=0.2826, over 3861.05 frames. ], batch size: 17, lr: 2.76e-02 +2024-08-06 06:56:44,973 INFO [trainer.py:765] (1/8) Epoch 2, batch 300, train_loss[loss=5.264, NarTop10Accuracy=0.2665, over 7129.00 frames. ], tot_loss[loss=5.151, NarTop10Accuracy=0.285, over 4667.31 frames. ], batch size: 22, lr: 2.75e-02 +2024-08-06 06:57:13,939 INFO [trainer.py:765] (1/8) Epoch 2, batch 400, train_loss[loss=5.347, NarTop10Accuracy=0.2348, over 5249.00 frames. ], tot_loss[loss=5.14, NarTop10Accuracy=0.2873, over 5114.37 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 06:57:56,209 INFO [trainer.py:765] (1/8) Epoch 2, batch 500, train_loss[loss=4.807, NarTop10Accuracy=0.3438, over 6044.00 frames. ], tot_loss[loss=5.1, NarTop10Accuracy=0.2946, over 5403.90 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 06:58:25,426 INFO [trainer.py:765] (1/8) Epoch 2, batch 600, train_loss[loss=4.977, NarTop10Accuracy=0.3255, over 5672.00 frames. ], tot_loss[loss=5.089, NarTop10Accuracy=0.2963, over 5672.19 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 06:58:55,282 INFO [trainer.py:765] (1/8) Epoch 2, batch 700, train_loss[loss=4.863, NarTop10Accuracy=0.3315, over 5063.00 frames. ], tot_loss[loss=5.08, NarTop10Accuracy=0.2984, over 5758.54 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 06:59:31,889 INFO [trainer.py:765] (1/8) Epoch 2, batch 800, train_loss[loss=4.998, NarTop10Accuracy=0.3177, over 5039.00 frames. ], tot_loss[loss=5.081, NarTop10Accuracy=0.2974, over 5815.87 frames. ], batch size: 6, lr: 2.69e-02 +2024-08-06 07:00:03,184 INFO [trainer.py:765] (1/8) Epoch 2, batch 900, train_loss[loss=5.461, NarTop10Accuracy=0.209, over 6287.00 frames. ], tot_loss[loss=5.043, NarTop10Accuracy=0.305, over 5828.99 frames. ], batch size: 13, lr: 2.68e-02 +2024-08-06 07:00:33,143 INFO [trainer.py:765] (1/8) Epoch 2, batch 1000, train_loss[loss=4.857, NarTop10Accuracy=0.3409, over 6749.00 frames. ], tot_loss[loss=5.014, NarTop10Accuracy=0.311, over 5920.84 frames. ], batch size: 14, lr: 2.66e-02 +2024-08-06 07:01:05,573 INFO [trainer.py:765] (1/8) Epoch 2, batch 1100, train_loss[loss=4.928, NarTop10Accuracy=0.3316, over 7044.00 frames. ], tot_loss[loss=5, NarTop10Accuracy=0.3138, over 5938.50 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 07:01:46,285 INFO [trainer.py:765] (1/8) Epoch 2, batch 1200, train_loss[loss=4.887, NarTop10Accuracy=0.3364, over 7103.00 frames. ], tot_loss[loss=5.002, NarTop10Accuracy=0.3138, over 5954.53 frames. ], batch size: 30, lr: 2.64e-02 +2024-08-06 07:02:15,645 INFO [trainer.py:765] (1/8) Epoch 2, batch 1300, train_loss[loss=5.306, NarTop10Accuracy=0.2512, over 5060.00 frames. ], tot_loss[loss=4.963, NarTop10Accuracy=0.3214, over 6029.01 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 07:02:45,252 INFO [trainer.py:765] (1/8) Epoch 2, batch 1400, train_loss[loss=4.619, NarTop10Accuracy=0.3899, over 6023.00 frames. ], tot_loss[loss=4.942, NarTop10Accuracy=0.3254, over 6043.89 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 07:02:50,268 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 07:03:02,094 INFO [trainer.py:811] (1/8) Epoch 2, validation: loss=4.943, NarTop10Accuracy=0.3266, over 1907754.00 frames. +2024-08-06 07:03:02,095 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29668MB +2024-08-06 07:03:02,638 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 5.429e+01 1.166e+02 1.425e+02 1.750e+02 6.435e+02, threshold=2.851e+02, percent-clipped=0.0 +2024-08-06 07:03:25,471 INFO [trainer.py:765] (1/8) Epoch 2, batch 1500, train_loss[loss=5.084, NarTop10Accuracy=0.3025, over 6170.00 frames. ], tot_loss[loss=4.937, NarTop10Accuracy=0.3259, over 5965.99 frames. ], batch size: 49, lr: 2.60e-02 +2024-08-06 07:03:53,554 INFO [trainer.py:765] (1/8) Epoch 2, batch 1600, train_loss[loss=4.74, NarTop10Accuracy=0.3589, over 7196.00 frames. ], tot_loss[loss=4.918, NarTop10Accuracy=0.3297, over 5950.39 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 07:04:20,313 INFO [trainer.py:765] (1/8) Epoch 2, batch 1700, train_loss[loss=4.651, NarTop10Accuracy=0.3798, over 6384.00 frames. ], tot_loss[loss=4.911, NarTop10Accuracy=0.3315, over 5935.16 frames. ], batch size: 13, lr: 2.58e-02 +2024-08-06 07:04:46,888 INFO [trainer.py:765] (1/8) Epoch 2, batch 1800, train_loss[loss=5.009, NarTop10Accuracy=0.3254, over 7093.00 frames. ], tot_loss[loss=4.896, NarTop10Accuracy=0.3341, over 5995.91 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 07:05:13,586 INFO [trainer.py:765] (1/8) Epoch 2, batch 1900, train_loss[loss=4.9, NarTop10Accuracy=0.3313, over 5847.00 frames. ], tot_loss[loss=4.875, NarTop10Accuracy=0.3385, over 6033.26 frames. ], batch size: 50, lr: 2.55e-02 +2024-08-06 07:05:39,285 INFO [trainer.py:765] (1/8) Epoch 2, batch 2000, train_loss[loss=4.785, NarTop10Accuracy=0.3602, over 6537.00 frames. ], tot_loss[loss=4.853, NarTop10Accuracy=0.3429, over 6010.76 frames. ], batch size: 49, lr: 2.54e-02 +2024-08-06 07:06:04,829 INFO [trainer.py:765] (1/8) Epoch 2, batch 2100, train_loss[loss=4.389, NarTop10Accuracy=0.4335, over 4784.00 frames. ], tot_loss[loss=4.857, NarTop10Accuracy=0.342, over 5997.84 frames. ], batch size: 5, lr: 2.52e-02 +2024-08-06 07:06:30,372 INFO [trainer.py:765] (1/8) Epoch 2, batch 2200, train_loss[loss=4.8, NarTop10Accuracy=0.3612, over 7333.00 frames. ], tot_loss[loss=4.816, NarTop10Accuracy=0.3507, over 6034.34 frames. ], batch size: 31, lr: 2.51e-02 +2024-08-06 07:06:55,874 INFO [trainer.py:765] (1/8) Epoch 2, batch 2300, train_loss[loss=4.501, NarTop10Accuracy=0.409, over 5741.00 frames. ], tot_loss[loss=4.804, NarTop10Accuracy=0.3533, over 6060.88 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 07:07:20,577 INFO [trainer.py:765] (1/8) Epoch 2, batch 2400, train_loss[loss=4.445, NarTop10Accuracy=0.4323, over 5054.00 frames. ], tot_loss[loss=4.768, NarTop10Accuracy=0.3605, over 5874.61 frames. ], batch size: 7, lr: 2.49e-02 +2024-08-06 07:07:47,112 INFO [trainer.py:765] (1/8) Epoch 2, batch 2500, train_loss[loss=4.437, NarTop10Accuracy=0.4204, over 4970.00 frames. ], tot_loss[loss=4.741, NarTop10Accuracy=0.3658, over 5544.05 frames. ], batch size: 6, lr: 2.47e-02 +2024-08-06 07:08:08,316 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 07:09:08,538 INFO [trainer.py:765] (1/8) Epoch 3, batch 100, train_loss[loss=4.768, NarTop10Accuracy=0.3476, over 7389.00 frames. ], tot_loss[loss=4.663, NarTop10Accuracy=0.3831, over 2356.81 frames. ], batch size: 30, lr: 2.35e-02 +2024-08-06 07:09:41,500 INFO [trainer.py:765] (1/8) Epoch 3, batch 200, train_loss[loss=4.356, NarTop10Accuracy=0.4441, over 6962.00 frames. ], tot_loss[loss=4.622, NarTop10Accuracy=0.3913, over 3865.61 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 07:10:16,976 INFO [trainer.py:765] (1/8) Epoch 3, batch 300, train_loss[loss=4.33, NarTop10Accuracy=0.4402, over 7218.00 frames. ], tot_loss[loss=4.61, NarTop10Accuracy=0.3931, over 4670.78 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 07:10:49,792 INFO [trainer.py:765] (1/8) Epoch 3, batch 400, train_loss[loss=4.424, NarTop10Accuracy=0.4357, over 5298.00 frames. ], tot_loss[loss=4.584, NarTop10Accuracy=0.3979, over 5125.51 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 07:11:18,179 INFO [trainer.py:765] (1/8) Epoch 3, batch 500, train_loss[loss=4.853, NarTop10Accuracy=0.3459, over 6024.00 frames. ], tot_loss[loss=4.586, NarTop10Accuracy=0.398, over 5405.77 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 07:11:51,262 INFO [trainer.py:765] (1/8) Epoch 3, batch 600, train_loss[loss=4.493, NarTop10Accuracy=0.4242, over 5752.00 frames. ], tot_loss[loss=4.568, NarTop10Accuracy=0.4012, over 5674.47 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 07:12:32,101 INFO [trainer.py:765] (1/8) Epoch 3, batch 700, train_loss[loss=4.423, NarTop10Accuracy=0.4249, over 5080.00 frames. ], tot_loss[loss=4.548, NarTop10Accuracy=0.4045, over 5737.26 frames. ], batch size: 6, lr: 2.29e-02 +2024-08-06 07:13:01,919 INFO [trainer.py:765] (1/8) Epoch 3, batch 800, train_loss[loss=4.211, NarTop10Accuracy=0.4682, over 5190.00 frames. ], tot_loss[loss=4.539, NarTop10Accuracy=0.4063, over 5802.64 frames. ], batch size: 6, lr: 2.27e-02 +2024-08-06 07:13:12,668 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 07:13:22,883 INFO [trainer.py:811] (1/8) Epoch 3, validation: loss=4.43, NarTop10Accuracy=0.4285, over 1907754.00 frames. +2024-08-06 07:13:22,884 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29668MB +2024-08-06 07:13:23,430 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 6.823e+01 1.318e+02 1.583e+02 1.978e+02 8.364e+02, threshold=3.166e+02, percent-clipped=5.2 +2024-08-06 07:13:42,435 INFO [trainer.py:765] (1/8) Epoch 3, batch 900, train_loss[loss=4.478, NarTop10Accuracy=0.421, over 6378.00 frames. ], tot_loss[loss=4.513, NarTop10Accuracy=0.4107, over 5816.20 frames. ], batch size: 13, lr: 2.26e-02 +2024-08-06 07:14:25,627 INFO [trainer.py:765] (1/8) Epoch 3, batch 1000, train_loss[loss=4.208, NarTop10Accuracy=0.4673, over 6324.00 frames. ], tot_loss[loss=4.496, NarTop10Accuracy=0.4137, over 5915.77 frames. ], batch size: 13, lr: 2.25e-02 +2024-08-06 07:14:56,325 INFO [trainer.py:765] (1/8) Epoch 3, batch 1100, train_loss[loss=4.494, NarTop10Accuracy=0.4153, over 6930.00 frames. ], tot_loss[loss=4.486, NarTop10Accuracy=0.4155, over 5931.41 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 07:15:29,867 INFO [trainer.py:765] (1/8) Epoch 3, batch 1200, train_loss[loss=4.326, NarTop10Accuracy=0.4405, over 7107.00 frames. ], tot_loss[loss=4.476, NarTop10Accuracy=0.4174, over 5938.41 frames. ], batch size: 31, lr: 2.23e-02 +2024-08-06 07:16:12,665 INFO [trainer.py:765] (1/8) Epoch 3, batch 1300, train_loss[loss=4.668, NarTop10Accuracy=0.3851, over 4970.00 frames. ], tot_loss[loss=4.462, NarTop10Accuracy=0.4202, over 6012.13 frames. ], batch size: 6, lr: 2.22e-02 +2024-08-06 07:16:42,204 INFO [trainer.py:765] (1/8) Epoch 3, batch 1400, train_loss[loss=4.089, NarTop10Accuracy=0.486, over 6067.00 frames. ], tot_loss[loss=4.459, NarTop10Accuracy=0.4205, over 6036.41 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 07:17:10,663 INFO [trainer.py:765] (1/8) Epoch 3, batch 1500, train_loss[loss=4.647, NarTop10Accuracy=0.3802, over 5656.00 frames. ], tot_loss[loss=4.447, NarTop10Accuracy=0.4225, over 5976.02 frames. ], batch size: 50, lr: 2.20e-02 +2024-08-06 07:17:38,769 INFO [trainer.py:765] (1/8) Epoch 3, batch 1600, train_loss[loss=4.173, NarTop10Accuracy=0.482, over 7150.00 frames. ], tot_loss[loss=4.43, NarTop10Accuracy=0.4261, over 5956.80 frames. ], batch size: 22, lr: 2.19e-02 +2024-08-06 07:18:05,503 INFO [trainer.py:765] (1/8) Epoch 3, batch 1700, train_loss[loss=4.336, NarTop10Accuracy=0.4374, over 6395.00 frames. ], tot_loss[loss=4.402, NarTop10Accuracy=0.4314, over 5937.46 frames. ], batch size: 13, lr: 2.18e-02 +2024-08-06 07:18:32,161 INFO [trainer.py:765] (1/8) Epoch 3, batch 1800, train_loss[loss=4.269, NarTop10Accuracy=0.458, over 7195.00 frames. ], tot_loss[loss=4.384, NarTop10Accuracy=0.4353, over 6006.76 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 07:19:01,958 INFO [trainer.py:765] (1/8) Epoch 3, batch 1900, train_loss[loss=4.692, NarTop10Accuracy=0.3803, over 5947.00 frames. ], tot_loss[loss=4.369, NarTop10Accuracy=0.4384, over 6036.35 frames. ], batch size: 48, lr: 2.16e-02 +2024-08-06 07:19:27,622 INFO [trainer.py:765] (1/8) Epoch 3, batch 2000, train_loss[loss=4.584, NarTop10Accuracy=0.3964, over 5955.00 frames. ], tot_loss[loss=4.348, NarTop10Accuracy=0.4422, over 5998.48 frames. ], batch size: 48, lr: 2.15e-02 +2024-08-06 07:19:53,071 INFO [trainer.py:765] (1/8) Epoch 3, batch 2100, train_loss[loss=4.166, NarTop10Accuracy=0.4669, over 4008.00 frames. ], tot_loss[loss=4.313, NarTop10Accuracy=0.449, over 5981.14 frames. ], batch size: 4, lr: 2.14e-02 +2024-08-06 07:20:18,554 INFO [trainer.py:765] (1/8) Epoch 3, batch 2200, train_loss[loss=4.49, NarTop10Accuracy=0.4119, over 7193.00 frames. ], tot_loss[loss=4.302, NarTop10Accuracy=0.4514, over 6033.05 frames. ], batch size: 30, lr: 2.13e-02 +2024-08-06 07:20:44,051 INFO [trainer.py:765] (1/8) Epoch 3, batch 2300, train_loss[loss=4.179, NarTop10Accuracy=0.4805, over 5865.00 frames. ], tot_loss[loss=4.315, NarTop10Accuracy=0.4489, over 6059.83 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 07:21:08,677 INFO [trainer.py:765] (1/8) Epoch 3, batch 2400, train_loss[loss=4.301, NarTop10Accuracy=0.4559, over 5668.00 frames. ], tot_loss[loss=4.298, NarTop10Accuracy=0.4519, over 5882.51 frames. ], batch size: 50, lr: 2.11e-02 +2024-08-06 07:21:32,172 INFO [trainer.py:765] (1/8) Epoch 3, batch 2500, train_loss[loss=3.955, NarTop10Accuracy=0.5074, over 4933.00 frames. ], tot_loss[loss=4.253, NarTop10Accuracy=0.4603, over 5534.39 frames. ], batch size: 6, lr: 2.10e-02 +2024-08-06 07:21:53,202 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 07:23:00,977 INFO [trainer.py:765] (1/8) Epoch 4, batch 100, train_loss[loss=4.148, NarTop10Accuracy=0.4642, over 7455.00 frames. ], tot_loss[loss=4.181, NarTop10Accuracy=0.4776, over 2377.19 frames. ], batch size: 31, lr: 1.97e-02 +2024-08-06 07:23:33,304 INFO [trainer.py:765] (1/8) Epoch 4, batch 200, train_loss[loss=4.065, NarTop10Accuracy=0.4959, over 6905.00 frames. ], tot_loss[loss=4.177, NarTop10Accuracy=0.4784, over 3867.89 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 07:23:51,466 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 07:24:01,517 INFO [trainer.py:811] (1/8) Epoch 4, validation: loss=4.035, NarTop10Accuracy=0.5085, over 1907754.00 frames. +2024-08-06 07:24:01,517 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29668MB +2024-08-06 07:24:02,097 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 9.910e+01 1.530e+02 1.750e+02 2.064e+02 5.317e+02, threshold=3.500e+02, percent-clipped=3.3 +2024-08-06 07:24:14,362 INFO [trainer.py:765] (1/8) Epoch 4, batch 300, train_loss[loss=4.084, NarTop10Accuracy=0.4976, over 7166.00 frames. ], tot_loss[loss=4.166, NarTop10Accuracy=0.4805, over 4677.58 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 07:24:53,597 INFO [trainer.py:765] (1/8) Epoch 4, batch 400, train_loss[loss=3.584, NarTop10Accuracy=0.5779, over 5169.00 frames. ], tot_loss[loss=4.172, NarTop10Accuracy=0.4791, over 5128.39 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 07:25:25,295 INFO [trainer.py:765] (1/8) Epoch 4, batch 500, train_loss[loss=4.333, NarTop10Accuracy=0.4542, over 6093.00 frames. ], tot_loss[loss=4.16, NarTop10Accuracy=0.481, over 5404.85 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 07:25:56,976 INFO [trainer.py:765] (1/8) Epoch 4, batch 600, train_loss[loss=4.132, NarTop10Accuracy=0.4953, over 5805.00 frames. ], tot_loss[loss=4.149, NarTop10Accuracy=0.4828, over 5674.00 frames. ], batch size: 9, lr: 1.92e-02 +2024-08-06 07:26:37,607 INFO [trainer.py:765] (1/8) Epoch 4, batch 700, train_loss[loss=4.212, NarTop10Accuracy=0.47, over 5161.00 frames. ], tot_loss[loss=4.152, NarTop10Accuracy=0.4821, over 5745.21 frames. ], batch size: 6, lr: 1.92e-02 +2024-08-06 07:27:07,433 INFO [trainer.py:765] (1/8) Epoch 4, batch 800, train_loss[loss=4.235, NarTop10Accuracy=0.4745, over 5097.00 frames. ], tot_loss[loss=4.146, NarTop10Accuracy=0.4836, over 5788.50 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 07:27:42,042 INFO [trainer.py:765] (1/8) Epoch 4, batch 900, train_loss[loss=4.148, NarTop10Accuracy=0.4868, over 6306.00 frames. ], tot_loss[loss=4.11, NarTop10Accuracy=0.4907, over 5815.76 frames. ], batch size: 13, lr: 1.90e-02 +2024-08-06 07:28:20,670 INFO [trainer.py:765] (1/8) Epoch 4, batch 1000, train_loss[loss=4.064, NarTop10Accuracy=0.4922, over 6723.00 frames. ], tot_loss[loss=4.107, NarTop10Accuracy=0.4913, over 5918.26 frames. ], batch size: 14, lr: 1.89e-02 +2024-08-06 07:28:54,071 INFO [trainer.py:765] (1/8) Epoch 4, batch 1100, train_loss[loss=4.12, NarTop10Accuracy=0.5018, over 6834.00 frames. ], tot_loss[loss=4.112, NarTop10Accuracy=0.4909, over 5959.60 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 07:29:29,599 INFO [trainer.py:765] (1/8) Epoch 4, batch 1200, train_loss[loss=4.137, NarTop10Accuracy=0.4836, over 7527.00 frames. ], tot_loss[loss=4.105, NarTop10Accuracy=0.4913, over 5954.86 frames. ], batch size: 31, lr: 1.87e-02 +2024-08-06 07:30:04,991 INFO [trainer.py:765] (1/8) Epoch 4, batch 1300, train_loss[loss=4.116, NarTop10Accuracy=0.4939, over 4986.00 frames. ], tot_loss[loss=4.076, NarTop10Accuracy=0.497, over 6016.67 frames. ], batch size: 6, lr: 1.87e-02 +2024-08-06 07:30:43,380 INFO [trainer.py:765] (1/8) Epoch 4, batch 1400, train_loss[loss=4.036, NarTop10Accuracy=0.5135, over 6224.00 frames. ], tot_loss[loss=4.081, NarTop10Accuracy=0.4968, over 6032.02 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 07:31:11,832 INFO [trainer.py:765] (1/8) Epoch 4, batch 1500, train_loss[loss=4.045, NarTop10Accuracy=0.5068, over 6153.00 frames. ], tot_loss[loss=4.074, NarTop10Accuracy=0.498, over 5968.61 frames. ], batch size: 48, lr: 1.85e-02 +2024-08-06 07:31:39,961 INFO [trainer.py:765] (1/8) Epoch 4, batch 1600, train_loss[loss=4.196, NarTop10Accuracy=0.4814, over 7182.00 frames. ], tot_loss[loss=4.067, NarTop10Accuracy=0.4995, over 5955.53 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 07:32:06,854 INFO [trainer.py:765] (1/8) Epoch 4, batch 1700, train_loss[loss=4.483, NarTop10Accuracy=0.414, over 6288.00 frames. ], tot_loss[loss=4.042, NarTop10Accuracy=0.5042, over 5927.69 frames. ], batch size: 13, lr: 1.84e-02 +2024-08-06 07:32:33,483 INFO [trainer.py:765] (1/8) Epoch 4, batch 1800, train_loss[loss=4.269, NarTop10Accuracy=0.4583, over 7169.00 frames. ], tot_loss[loss=4.045, NarTop10Accuracy=0.5037, over 6002.74 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 07:33:00,193 INFO [trainer.py:765] (1/8) Epoch 4, batch 1900, train_loss[loss=4.119, NarTop10Accuracy=0.4905, over 6124.00 frames. ], tot_loss[loss=4.068, NarTop10Accuracy=0.4992, over 6042.02 frames. ], batch size: 49, lr: 1.82e-02 +2024-08-06 07:33:25,990 INFO [trainer.py:765] (1/8) Epoch 4, batch 2000, train_loss[loss=4.484, NarTop10Accuracy=0.424, over 5916.00 frames. ], tot_loss[loss=4.049, NarTop10Accuracy=0.5038, over 6036.04 frames. ], batch size: 50, lr: 1.81e-02 +2024-08-06 07:33:51,512 INFO [trainer.py:765] (1/8) Epoch 4, batch 2100, train_loss[loss=3.77, NarTop10Accuracy=0.5771, over 4031.00 frames. ], tot_loss[loss=4.034, NarTop10Accuracy=0.5067, over 6000.97 frames. ], batch size: 4, lr: 1.81e-02 +2024-08-06 07:34:16,906 INFO [trainer.py:765] (1/8) Epoch 4, batch 2200, train_loss[loss=4.135, NarTop10Accuracy=0.4873, over 7353.00 frames. ], tot_loss[loss=4.038, NarTop10Accuracy=0.506, over 6058.37 frames. ], batch size: 31, lr: 1.80e-02 +2024-08-06 07:34:31,431 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 07:34:41,462 INFO [trainer.py:811] (1/8) Epoch 4, validation: loss=3.858, NarTop10Accuracy=0.5445, over 1907754.00 frames. +2024-08-06 07:34:41,463 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29668MB +2024-08-06 07:34:41,980 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.721e+02 1.919e+02 2.225e+02 9.682e+02, threshold=3.839e+02, percent-clipped=2.3 +2024-08-06 07:34:52,442 INFO [trainer.py:765] (1/8) Epoch 4, batch 2300, train_loss[loss=3.938, NarTop10Accuracy=0.5234, over 5739.00 frames. ], tot_loss[loss=4.037, NarTop10Accuracy=0.5065, over 6080.01 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 07:35:17,166 INFO [trainer.py:765] (1/8) Epoch 4, batch 2400, train_loss[loss=3.689, NarTop10Accuracy=0.574, over 5091.00 frames. ], tot_loss[loss=4.025, NarTop10Accuracy=0.5089, over 5874.56 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 07:35:40,622 INFO [trainer.py:765] (1/8) Epoch 4, batch 2500, train_loss[loss=4.23, NarTop10Accuracy=0.4682, over 5041.00 frames. ], tot_loss[loss=4.006, NarTop10Accuracy=0.5123, over 5528.81 frames. ], batch size: 6, lr: 1.78e-02 +2024-08-06 07:36:01,614 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 07:37:02,524 INFO [trainer.py:765] (1/8) Epoch 5, batch 100, train_loss[loss=3.892, NarTop10Accuracy=0.5303, over 7378.00 frames. ], tot_loss[loss=3.966, NarTop10Accuracy=0.5207, over 2364.36 frames. ], batch size: 30, lr: 1.66e-02 +2024-08-06 07:37:39,815 INFO [trainer.py:765] (1/8) Epoch 5, batch 200, train_loss[loss=4.005, NarTop10Accuracy=0.5137, over 6947.00 frames. ], tot_loss[loss=3.937, NarTop10Accuracy=0.5267, over 3860.82 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 07:38:13,471 INFO [trainer.py:765] (1/8) Epoch 5, batch 300, train_loss[loss=4.162, NarTop10Accuracy=0.4848, over 7034.00 frames. ], tot_loss[loss=3.922, NarTop10Accuracy=0.5301, over 4670.22 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 07:38:42,429 INFO [trainer.py:765] (1/8) Epoch 5, batch 400, train_loss[loss=3.982, NarTop10Accuracy=0.5282, over 5074.00 frames. ], tot_loss[loss=3.92, NarTop10Accuracy=0.5301, over 5121.30 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 07:39:17,020 INFO [trainer.py:765] (1/8) Epoch 5, batch 500, train_loss[loss=3.852, NarTop10Accuracy=0.54, over 6190.00 frames. ], tot_loss[loss=3.923, NarTop10Accuracy=0.5293, over 5407.91 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 07:39:51,943 INFO [trainer.py:765] (1/8) Epoch 5, batch 600, train_loss[loss=3.896, NarTop10Accuracy=0.5335, over 5894.00 frames. ], tot_loss[loss=3.909, NarTop10Accuracy=0.5325, over 5663.70 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 07:40:28,626 INFO [trainer.py:765] (1/8) Epoch 5, batch 700, train_loss[loss=3.624, NarTop10Accuracy=0.5871, over 4988.00 frames. ], tot_loss[loss=3.914, NarTop10Accuracy=0.5319, over 5733.77 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:02,367 INFO [trainer.py:765] (1/8) Epoch 5, batch 800, train_loss[loss=3.873, NarTop10Accuracy=0.5286, over 4965.00 frames. ], tot_loss[loss=3.919, NarTop10Accuracy=0.5309, over 5781.46 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:37,937 INFO [trainer.py:765] (1/8) Epoch 5, batch 900, train_loss[loss=4.266, NarTop10Accuracy=0.4645, over 6251.00 frames. ], tot_loss[loss=3.907, NarTop10Accuracy=0.5333, over 5816.88 frames. ], batch size: 13, lr: 1.61e-02 +2024-08-06 07:42:13,845 INFO [trainer.py:765] (1/8) Epoch 5, batch 1000, train_loss[loss=4.004, NarTop10Accuracy=0.5216, over 6226.00 frames. ], tot_loss[loss=3.891, NarTop10Accuracy=0.5359, over 5911.96 frames. ], batch size: 13, lr: 1.60e-02 +2024-08-06 07:42:46,468 INFO [trainer.py:765] (1/8) Epoch 5, batch 1100, train_loss[loss=3.866, NarTop10Accuracy=0.5318, over 7034.00 frames. ], tot_loss[loss=3.909, NarTop10Accuracy=0.5325, over 5951.20 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 07:43:25,226 INFO [trainer.py:765] (1/8) Epoch 5, batch 1200, train_loss[loss=4.069, NarTop10Accuracy=0.4905, over 7235.00 frames. ], tot_loss[loss=3.91, NarTop10Accuracy=0.5324, over 5950.62 frames. ], batch size: 30, lr: 1.59e-02 +2024-08-06 07:44:00,557 INFO [trainer.py:765] (1/8) Epoch 5, batch 1300, train_loss[loss=3.835, NarTop10Accuracy=0.5477, over 5049.00 frames. ], tot_loss[loss=3.905, NarTop10Accuracy=0.5334, over 6025.44 frames. ], batch size: 6, lr: 1.59e-02 +2024-08-06 07:44:30,238 INFO [trainer.py:765] (1/8) Epoch 5, batch 1400, train_loss[loss=3.899, NarTop10Accuracy=0.5402, over 6070.00 frames. ], tot_loss[loss=3.906, NarTop10Accuracy=0.5333, over 6057.45 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 07:45:02,845 INFO [trainer.py:765] (1/8) Epoch 5, batch 1500, train_loss[loss=3.837, NarTop10Accuracy=0.5511, over 6763.00 frames. ], tot_loss[loss=3.899, NarTop10Accuracy=0.5344, over 6002.51 frames. ], batch size: 49, lr: 1.57e-02 +2024-08-06 07:45:31,008 INFO [trainer.py:765] (1/8) Epoch 5, batch 1600, train_loss[loss=4.231, NarTop10Accuracy=0.4763, over 7097.00 frames. ], tot_loss[loss=3.913, NarTop10Accuracy=0.5319, over 5974.89 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 07:45:51,058 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 07:46:01,621 INFO [trainer.py:811] (1/8) Epoch 5, validation: loss=3.749, NarTop10Accuracy=0.5672, over 1907754.00 frames. +2024-08-06 07:46:01,622 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 29668MB +2024-08-06 07:46:02,123 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.669e+02 1.884e+02 2.190e+02 6.243e+02, threshold=3.768e+02, percent-clipped=1.8 +2024-08-06 07:46:08,362 INFO [trainer.py:765] (1/8) Epoch 5, batch 1700, train_loss[loss=3.777, NarTop10Accuracy=0.5604, over 6333.00 frames. ], tot_loss[loss=3.899, NarTop10Accuracy=0.5346, over 5947.41 frames. ], batch size: 13, lr: 1.56e-02 +2024-08-06 07:46:34,966 INFO [trainer.py:765] (1/8) Epoch 5, batch 1800, train_loss[loss=3.868, NarTop10Accuracy=0.5416, over 7114.00 frames. ], tot_loss[loss=3.897, NarTop10Accuracy=0.5352, over 6000.24 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 07:47:01,489 INFO [trainer.py:765] (1/8) Epoch 5, batch 1900, train_loss[loss=3.801, NarTop10Accuracy=0.5581, over 5799.00 frames. ], tot_loss[loss=3.897, NarTop10Accuracy=0.5354, over 6032.44 frames. ], batch size: 49, lr: 1.55e-02 +2024-08-06 07:47:27,146 INFO [trainer.py:765] (1/8) Epoch 5, batch 2000, train_loss[loss=3.816, NarTop10Accuracy=0.5637, over 6037.00 frames. ], tot_loss[loss=3.895, NarTop10Accuracy=0.5358, over 5997.48 frames. ], batch size: 48, lr: 1.55e-02 +2024-08-06 07:47:52,618 INFO [trainer.py:765] (1/8) Epoch 5, batch 2100, train_loss[loss=3.861, NarTop10Accuracy=0.5442, over 3998.00 frames. ], tot_loss[loss=3.905, NarTop10Accuracy=0.5337, over 5990.23 frames. ], batch size: 4, lr: 1.54e-02 +2024-08-06 07:48:17,992 INFO [trainer.py:765] (1/8) Epoch 5, batch 2200, train_loss[loss=3.94, NarTop10Accuracy=0.522, over 7377.00 frames. ], tot_loss[loss=3.893, NarTop10Accuracy=0.5362, over 6028.27 frames. ], batch size: 30, lr: 1.54e-02 +2024-08-06 07:48:43,421 INFO [trainer.py:765] (1/8) Epoch 5, batch 2300, train_loss[loss=4.03, NarTop10Accuracy=0.509, over 5783.00 frames. ], tot_loss[loss=3.901, NarTop10Accuracy=0.5345, over 6058.71 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 07:49:08,169 INFO [trainer.py:765] (1/8) Epoch 5, batch 2400, train_loss[loss=4.023, NarTop10Accuracy=0.5078, over 6738.00 frames. ], tot_loss[loss=3.894, NarTop10Accuracy=0.5361, over 5884.97 frames. ], batch size: 49, lr: 1.53e-02 +2024-08-06 07:49:31,644 INFO [trainer.py:765] (1/8) Epoch 5, batch 2500, train_loss[loss=3.757, NarTop10Accuracy=0.5655, over 5007.00 frames. ], tot_loss[loss=3.857, NarTop10Accuracy=0.5431, over 5523.92 frames. ], batch size: 6, lr: 1.52e-02 +2024-08-06 07:49:52,738 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 07:50:58,969 INFO [trainer.py:765] (1/8) Epoch 6, batch 100, train_loss[loss=3.73, NarTop10Accuracy=0.5753, over 7156.00 frames. ], tot_loss[loss=3.804, NarTop10Accuracy=0.555, over 2372.57 frames. ], batch size: 30, lr: 1.42e-02 +2024-08-06 07:51:31,788 INFO [trainer.py:765] (1/8) Epoch 6, batch 200, train_loss[loss=3.722, NarTop10Accuracy=0.5804, over 6900.00 frames. ], tot_loss[loss=3.796, NarTop10Accuracy=0.5564, over 3873.32 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 07:52:04,696 INFO [trainer.py:765] (1/8) Epoch 6, batch 300, train_loss[loss=3.538, NarTop10Accuracy=0.6178, over 7047.00 frames. ], tot_loss[loss=3.784, NarTop10Accuracy=0.5593, over 4677.92 frames. ], batch size: 22, lr: 1.41e-02 +2024-08-06 07:52:36,200 INFO [trainer.py:765] (1/8) Epoch 6, batch 400, train_loss[loss=3.773, NarTop10Accuracy=0.5595, over 5095.00 frames. ], tot_loss[loss=3.781, NarTop10Accuracy=0.5598, over 5118.65 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 07:53:06,102 INFO [trainer.py:765] (1/8) Epoch 6, batch 500, train_loss[loss=3.929, NarTop10Accuracy=0.5299, over 6083.00 frames. ], tot_loss[loss=3.767, NarTop10Accuracy=0.5624, over 5406.40 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 07:53:43,285 INFO [trainer.py:765] (1/8) Epoch 6, batch 600, train_loss[loss=3.849, NarTop10Accuracy=0.5458, over 5849.00 frames. ], tot_loss[loss=3.784, NarTop10Accuracy=0.5592, over 5669.10 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 07:54:15,438 INFO [trainer.py:765] (1/8) Epoch 6, batch 700, train_loss[loss=4.058, NarTop10Accuracy=0.4975, over 4930.00 frames. ], tot_loss[loss=3.795, NarTop10Accuracy=0.5572, over 5728.99 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:54:49,526 INFO [trainer.py:765] (1/8) Epoch 6, batch 800, train_loss[loss=3.631, NarTop10Accuracy=0.5903, over 5060.00 frames. ], tot_loss[loss=3.792, NarTop10Accuracy=0.5572, over 5797.67 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:55:21,984 INFO [trainer.py:765] (1/8) Epoch 6, batch 900, train_loss[loss=3.43, NarTop10Accuracy=0.6379, over 6292.00 frames. ], tot_loss[loss=3.787, NarTop10Accuracy=0.5584, over 5820.39 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 07:56:00,804 INFO [trainer.py:765] (1/8) Epoch 6, batch 1000, train_loss[loss=3.608, NarTop10Accuracy=0.6, over 6371.00 frames. ], tot_loss[loss=3.799, NarTop10Accuracy=0.5561, over 5929.63 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 07:56:34,171 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 07:56:44,742 INFO [trainer.py:811] (1/8) Epoch 6, validation: loss=3.634, NarTop10Accuracy=0.5919, over 1907754.00 frames. +2024-08-06 07:56:44,743 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 07:56:45,277 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.300e+02 1.714e+02 1.918e+02 2.211e+02 6.360e+02, threshold=3.836e+02, percent-clipped=1.6 +2024-08-06 07:56:46,639 INFO [trainer.py:765] (1/8) Epoch 6, batch 1100, train_loss[loss=3.741, NarTop10Accuracy=0.578, over 6746.00 frames. ], tot_loss[loss=3.799, NarTop10Accuracy=0.5558, over 5957.60 frames. ], batch size: 17, lr: 1.37e-02 +2024-08-06 07:57:24,888 INFO [trainer.py:765] (1/8) Epoch 6, batch 1200, train_loss[loss=4.056, NarTop10Accuracy=0.504, over 7329.00 frames. ], tot_loss[loss=3.797, NarTop10Accuracy=0.5566, over 5961.50 frames. ], batch size: 31, lr: 1.37e-02 +2024-08-06 07:57:56,612 INFO [trainer.py:765] (1/8) Epoch 6, batch 1300, train_loss[loss=3.523, NarTop10Accuracy=0.6008, over 5095.00 frames. ], tot_loss[loss=3.795, NarTop10Accuracy=0.5566, over 6025.39 frames. ], batch size: 6, lr: 1.37e-02 +2024-08-06 07:58:30,735 INFO [trainer.py:765] (1/8) Epoch 6, batch 1400, train_loss[loss=3.98, NarTop10Accuracy=0.5149, over 6103.00 frames. ], tot_loss[loss=3.8, NarTop10Accuracy=0.5556, over 6043.35 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 07:59:00,999 INFO [trainer.py:765] (1/8) Epoch 6, batch 1500, train_loss[loss=4.076, NarTop10Accuracy=0.4978, over 5884.00 frames. ], tot_loss[loss=3.803, NarTop10Accuracy=0.5545, over 5971.24 frames. ], batch size: 48, lr: 1.36e-02 +2024-08-06 07:59:28,933 INFO [trainer.py:765] (1/8) Epoch 6, batch 1600, train_loss[loss=3.71, NarTop10Accuracy=0.5652, over 7043.00 frames. ], tot_loss[loss=3.799, NarTop10Accuracy=0.5555, over 5953.64 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 07:59:55,618 INFO [trainer.py:765] (1/8) Epoch 6, batch 1700, train_loss[loss=3.708, NarTop10Accuracy=0.5658, over 6367.00 frames. ], tot_loss[loss=3.791, NarTop10Accuracy=0.5577, over 5943.57 frames. ], batch size: 13, lr: 1.35e-02 +2024-08-06 08:00:22,188 INFO [trainer.py:765] (1/8) Epoch 6, batch 1800, train_loss[loss=3.932, NarTop10Accuracy=0.5292, over 6694.00 frames. ], tot_loss[loss=3.786, NarTop10Accuracy=0.5579, over 5993.59 frames. ], batch size: 21, lr: 1.35e-02 +2024-08-06 08:00:48,795 INFO [trainer.py:765] (1/8) Epoch 6, batch 1900, train_loss[loss=4.129, NarTop10Accuracy=0.4894, over 6106.00 frames. ], tot_loss[loss=3.817, NarTop10Accuracy=0.552, over 6047.51 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 08:01:14,461 INFO [trainer.py:765] (1/8) Epoch 6, batch 2000, train_loss[loss=3.971, NarTop10Accuracy=0.5225, over 6129.00 frames. ], tot_loss[loss=3.796, NarTop10Accuracy=0.5558, over 6027.10 frames. ], batch size: 49, lr: 1.34e-02 +2024-08-06 08:01:43,133 INFO [trainer.py:765] (1/8) Epoch 6, batch 2100, train_loss[loss=3.274, NarTop10Accuracy=0.6612, over 4838.00 frames. ], tot_loss[loss=3.793, NarTop10Accuracy=0.5566, over 6009.84 frames. ], batch size: 5, lr: 1.33e-02 +2024-08-06 08:02:08,518 INFO [trainer.py:765] (1/8) Epoch 6, batch 2200, train_loss[loss=3.683, NarTop10Accuracy=0.5801, over 7373.00 frames. ], tot_loss[loss=3.798, NarTop10Accuracy=0.5559, over 6047.91 frames. ], batch size: 31, lr: 1.33e-02 +2024-08-06 08:02:33,916 INFO [trainer.py:765] (1/8) Epoch 6, batch 2300, train_loss[loss=3.509, NarTop10Accuracy=0.6173, over 5801.00 frames. ], tot_loss[loss=3.801, NarTop10Accuracy=0.555, over 6065.73 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 08:02:58,616 INFO [trainer.py:765] (1/8) Epoch 6, batch 2400, train_loss[loss=3.541, NarTop10Accuracy=0.597, over 5151.00 frames. ], tot_loss[loss=3.794, NarTop10Accuracy=0.5563, over 5875.63 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 08:03:21,939 INFO [trainer.py:765] (1/8) Epoch 6, batch 2500, train_loss[loss=3.792, NarTop10Accuracy=0.5611, over 5073.00 frames. ], tot_loss[loss=3.77, NarTop10Accuracy=0.561, over 5522.47 frames. ], batch size: 6, lr: 1.32e-02 +2024-08-06 08:03:42,842 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 08:04:42,817 INFO [trainer.py:765] (1/8) Epoch 7, batch 100, train_loss[loss=4.007, NarTop10Accuracy=0.521, over 7487.00 frames. ], tot_loss[loss=3.694, NarTop10Accuracy=0.5785, over 2378.93 frames. ], batch size: 31, lr: 1.23e-02 +2024-08-06 08:05:18,347 INFO [trainer.py:765] (1/8) Epoch 7, batch 200, train_loss[loss=3.816, NarTop10Accuracy=0.5591, over 6833.00 frames. ], tot_loss[loss=3.717, NarTop10Accuracy=0.5737, over 3882.01 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 08:05:46,773 INFO [trainer.py:765] (1/8) Epoch 7, batch 300, train_loss[loss=3.556, NarTop10Accuracy=0.6168, over 7149.00 frames. ], tot_loss[loss=3.724, NarTop10Accuracy=0.5722, over 4680.97 frames. ], batch size: 22, lr: 1.23e-02 +2024-08-06 08:06:22,091 INFO [trainer.py:765] (1/8) Epoch 7, batch 400, train_loss[loss=3.789, NarTop10Accuracy=0.5515, over 5221.00 frames. ], tot_loss[loss=3.716, NarTop10Accuracy=0.5733, over 5134.30 frames. ], batch size: 7, lr: 1.22e-02 +2024-08-06 08:06:52,316 INFO [trainer.py:765] (1/8) Epoch 7, batch 500, train_loss[loss=3.843, NarTop10Accuracy=0.5546, over 6200.00 frames. ], tot_loss[loss=3.717, NarTop10Accuracy=0.5729, over 5407.12 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 08:06:56,086 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 08:07:06,251 INFO [trainer.py:811] (1/8) Epoch 7, validation: loss=3.56, NarTop10Accuracy=0.6069, over 1907754.00 frames. +2024-08-06 08:07:06,252 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 08:07:06,837 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 1.760e+02 1.958e+02 2.227e+02 5.399e+02, threshold=3.916e+02, percent-clipped=0.8 +2024-08-06 08:07:33,151 INFO [trainer.py:765] (1/8) Epoch 7, batch 600, train_loss[loss=3.527, NarTop10Accuracy=0.6106, over 5751.00 frames. ], tot_loss[loss=3.713, NarTop10Accuracy=0.5738, over 5669.33 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 08:08:11,333 INFO [trainer.py:765] (1/8) Epoch 7, batch 700, train_loss[loss=3.518, NarTop10Accuracy=0.6119, over 4276.00 frames. ], tot_loss[loss=3.714, NarTop10Accuracy=0.5738, over 5727.57 frames. ], batch size: 5, lr: 1.21e-02 +2024-08-06 08:08:45,557 INFO [trainer.py:765] (1/8) Epoch 7, batch 800, train_loss[loss=3.558, NarTop10Accuracy=0.6016, over 5075.00 frames. ], tot_loss[loss=3.694, NarTop10Accuracy=0.5778, over 5773.85 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 08:09:17,739 INFO [trainer.py:765] (1/8) Epoch 7, batch 900, train_loss[loss=3.764, NarTop10Accuracy=0.5656, over 6203.00 frames. ], tot_loss[loss=3.701, NarTop10Accuracy=0.5762, over 5801.86 frames. ], batch size: 13, lr: 1.21e-02 +2024-08-06 08:09:54,192 INFO [trainer.py:765] (1/8) Epoch 7, batch 1000, train_loss[loss=3.821, NarTop10Accuracy=0.5413, over 6206.00 frames. ], tot_loss[loss=3.703, NarTop10Accuracy=0.5754, over 5893.67 frames. ], batch size: 13, lr: 1.20e-02 +2024-08-06 08:10:29,570 INFO [trainer.py:765] (1/8) Epoch 7, batch 1100, train_loss[loss=3.741, NarTop10Accuracy=0.567, over 6804.00 frames. ], tot_loss[loss=3.717, NarTop10Accuracy=0.5726, over 5941.20 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 08:11:02,491 INFO [trainer.py:765] (1/8) Epoch 7, batch 1200, train_loss[loss=3.975, NarTop10Accuracy=0.5139, over 7483.00 frames. ], tot_loss[loss=3.716, NarTop10Accuracy=0.5727, over 5952.66 frames. ], batch size: 31, lr: 1.20e-02 +2024-08-06 08:11:33,447 INFO [trainer.py:765] (1/8) Epoch 7, batch 1300, train_loss[loss=3.629, NarTop10Accuracy=0.5992, over 5073.00 frames. ], tot_loss[loss=3.707, NarTop10Accuracy=0.5744, over 6015.32 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 08:12:10,912 INFO [trainer.py:765] (1/8) Epoch 7, batch 1400, train_loss[loss=3.75, NarTop10Accuracy=0.5638, over 6174.00 frames. ], tot_loss[loss=3.713, NarTop10Accuracy=0.5731, over 6016.51 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 08:12:42,109 INFO [trainer.py:765] (1/8) Epoch 7, batch 1500, train_loss[loss=3.802, NarTop10Accuracy=0.5595, over 5407.00 frames. ], tot_loss[loss=3.701, NarTop10Accuracy=0.5748, over 5949.80 frames. ], batch size: 48, lr: 1.19e-02 +2024-08-06 08:13:13,238 INFO [trainer.py:765] (1/8) Epoch 7, batch 1600, train_loss[loss=3.53, NarTop10Accuracy=0.6128, over 7004.00 frames. ], tot_loss[loss=3.711, NarTop10Accuracy=0.5734, over 5940.20 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:13:40,016 INFO [trainer.py:765] (1/8) Epoch 7, batch 1700, train_loss[loss=3.624, NarTop10Accuracy=0.5796, over 6647.00 frames. ], tot_loss[loss=3.717, NarTop10Accuracy=0.5719, over 5944.71 frames. ], batch size: 14, lr: 1.18e-02 +2024-08-06 08:14:06,584 INFO [trainer.py:765] (1/8) Epoch 7, batch 1800, train_loss[loss=3.777, NarTop10Accuracy=0.5653, over 7097.00 frames. ], tot_loss[loss=3.726, NarTop10Accuracy=0.5703, over 6006.58 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:14:33,223 INFO [trainer.py:765] (1/8) Epoch 7, batch 1900, train_loss[loss=4.137, NarTop10Accuracy=0.4908, over 6266.00 frames. ], tot_loss[loss=3.727, NarTop10Accuracy=0.57, over 6045.73 frames. ], batch size: 49, lr: 1.17e-02 +2024-08-06 08:14:58,995 INFO [trainer.py:765] (1/8) Epoch 7, batch 2000, train_loss[loss=3.754, NarTop10Accuracy=0.57, over 6201.00 frames. ], tot_loss[loss=3.73, NarTop10Accuracy=0.5696, over 6023.54 frames. ], batch size: 49, lr: 1.17e-02 +2024-08-06 08:15:24,423 INFO [trainer.py:765] (1/8) Epoch 7, batch 2100, train_loss[loss=3.766, NarTop10Accuracy=0.5711, over 4613.00 frames. ], tot_loss[loss=3.724, NarTop10Accuracy=0.5707, over 5996.30 frames. ], batch size: 5, lr: 1.17e-02 +2024-08-06 08:15:49,960 INFO [trainer.py:765] (1/8) Epoch 7, batch 2200, train_loss[loss=4.132, NarTop10Accuracy=0.4787, over 7303.00 frames. ], tot_loss[loss=3.735, NarTop10Accuracy=0.5682, over 6024.28 frames. ], batch size: 31, lr: 1.17e-02 +2024-08-06 08:16:15,490 INFO [trainer.py:765] (1/8) Epoch 7, batch 2300, train_loss[loss=3.977, NarTop10Accuracy=0.5185, over 5853.00 frames. ], tot_loss[loss=3.738, NarTop10Accuracy=0.5673, over 6069.22 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 08:16:40,319 INFO [trainer.py:765] (1/8) Epoch 7, batch 2400, train_loss[loss=3.892, NarTop10Accuracy=0.5341, over 6117.00 frames. ], tot_loss[loss=3.741, NarTop10Accuracy=0.5675, over 5901.63 frames. ], batch size: 49, lr: 1.16e-02 +2024-08-06 08:17:03,739 INFO [trainer.py:765] (1/8) Epoch 7, batch 2500, train_loss[loss=3.732, NarTop10Accuracy=0.5647, over 5108.00 frames. ], tot_loss[loss=3.721, NarTop10Accuracy=0.571, over 5546.44 frames. ], batch size: 6, lr: 1.16e-02 +2024-08-06 08:17:06,843 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 08:17:17,433 INFO [trainer.py:811] (1/8) Epoch 7, validation: loss=3.591, NarTop10Accuracy=0.6002, over 1907754.00 frames. +2024-08-06 08:17:17,433 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 08:17:17,902 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.794e+02 1.981e+02 2.246e+02 4.644e+02, threshold=3.962e+02, percent-clipped=1.0 +2024-08-06 08:17:35,358 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 08:18:36,193 INFO [trainer.py:765] (1/8) Epoch 8, batch 100, train_loss[loss=3.732, NarTop10Accuracy=0.5625, over 7398.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5843, over 2376.18 frames. ], batch size: 30, lr: 1.09e-02 +2024-08-06 08:19:15,019 INFO [trainer.py:765] (1/8) Epoch 8, batch 200, train_loss[loss=3.569, NarTop10Accuracy=0.6045, over 6806.00 frames. ], tot_loss[loss=3.663, NarTop10Accuracy=0.5844, over 3873.47 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 08:19:43,561 INFO [trainer.py:765] (1/8) Epoch 8, batch 300, train_loss[loss=3.574, NarTop10Accuracy=0.6002, over 7223.00 frames. ], tot_loss[loss=3.667, NarTop10Accuracy=0.584, over 4673.96 frames. ], batch size: 22, lr: 1.08e-02 +2024-08-06 08:20:16,268 INFO [trainer.py:765] (1/8) Epoch 8, batch 400, train_loss[loss=3.573, NarTop10Accuracy=0.5974, over 5127.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.5841, over 5115.23 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 08:20:48,421 INFO [trainer.py:765] (1/8) Epoch 8, batch 500, train_loss[loss=3.458, NarTop10Accuracy=0.6359, over 6514.00 frames. ], tot_loss[loss=3.655, NarTop10Accuracy=0.586, over 5396.80 frames. ], batch size: 12, lr: 1.08e-02 +2024-08-06 08:21:23,737 INFO [trainer.py:765] (1/8) Epoch 8, batch 600, train_loss[loss=3.902, NarTop10Accuracy=0.5278, over 5619.00 frames. ], tot_loss[loss=3.661, NarTop10Accuracy=0.5842, over 5678.32 frames. ], batch size: 9, lr: 1.07e-02 +2024-08-06 08:21:57,607 INFO [trainer.py:765] (1/8) Epoch 8, batch 700, train_loss[loss=3.803, NarTop10Accuracy=0.5494, over 5129.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.5838, over 5753.78 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:22:27,341 INFO [trainer.py:765] (1/8) Epoch 8, batch 800, train_loss[loss=3.178, NarTop10Accuracy=0.6707, over 5100.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.582, over 5806.52 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:23:06,892 INFO [trainer.py:765] (1/8) Epoch 8, batch 900, train_loss[loss=3.357, NarTop10Accuracy=0.6456, over 6656.00 frames. ], tot_loss[loss=3.662, NarTop10Accuracy=0.5836, over 5822.93 frames. ], batch size: 14, lr: 1.07e-02 +2024-08-06 08:23:42,943 INFO [trainer.py:765] (1/8) Epoch 8, batch 1000, train_loss[loss=3.552, NarTop10Accuracy=0.6147, over 6367.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.5828, over 5924.84 frames. ], batch size: 13, lr: 1.06e-02 +2024-08-06 08:24:15,105 INFO [trainer.py:765] (1/8) Epoch 8, batch 1100, train_loss[loss=3.897, NarTop10Accuracy=0.5405, over 6896.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.5806, over 5955.57 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 08:24:57,339 INFO [trainer.py:765] (1/8) Epoch 8, batch 1200, train_loss[loss=3.563, NarTop10Accuracy=0.6049, over 7311.00 frames. ], tot_loss[loss=3.681, NarTop10Accuracy=0.5788, over 5940.48 frames. ], batch size: 31, lr: 1.06e-02 +2024-08-06 08:25:26,604 INFO [trainer.py:765] (1/8) Epoch 8, batch 1300, train_loss[loss=3.694, NarTop10Accuracy=0.5863, over 5179.00 frames. ], tot_loss[loss=3.66, NarTop10Accuracy=0.5831, over 6016.84 frames. ], batch size: 6, lr: 1.06e-02 +2024-08-06 08:26:00,605 INFO [trainer.py:765] (1/8) Epoch 8, batch 1400, train_loss[loss=3.626, NarTop10Accuracy=0.5866, over 6072.00 frames. ], tot_loss[loss=3.677, NarTop10Accuracy=0.5802, over 6017.50 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 08:26:28,987 INFO [trainer.py:765] (1/8) Epoch 8, batch 1500, train_loss[loss=3.763, NarTop10Accuracy=0.5724, over 6488.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.5827, over 5979.72 frames. ], batch size: 50, lr: 1.05e-02 +2024-08-06 08:26:56,933 INFO [trainer.py:765] (1/8) Epoch 8, batch 1600, train_loss[loss=3.858, NarTop10Accuracy=0.5433, over 7214.00 frames. ], tot_loss[loss=3.668, NarTop10Accuracy=0.5822, over 5958.10 frames. ], batch size: 22, lr: 1.05e-02 +2024-08-06 08:27:23,763 INFO [trainer.py:765] (1/8) Epoch 8, batch 1700, train_loss[loss=3.554, NarTop10Accuracy=0.6035, over 6232.00 frames. ], tot_loss[loss=3.668, NarTop10Accuracy=0.5824, over 5936.60 frames. ], batch size: 13, lr: 1.05e-02 +2024-08-06 08:27:50,462 INFO [trainer.py:765] (1/8) Epoch 8, batch 1800, train_loss[loss=3.613, NarTop10Accuracy=0.5994, over 7107.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.5836, over 6008.18 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 08:28:17,180 INFO [trainer.py:765] (1/8) Epoch 8, batch 1900, train_loss[loss=4.129, NarTop10Accuracy=0.4908, over 6105.00 frames. ], tot_loss[loss=3.672, NarTop10Accuracy=0.5816, over 6045.34 frames. ], batch size: 49, lr: 1.04e-02 +2024-08-06 08:28:25,164 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 08:28:35,290 INFO [trainer.py:811] (1/8) Epoch 8, validation: loss=3.507, NarTop10Accuracy=0.6181, over 1907754.00 frames. +2024-08-06 08:28:35,291 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 08:28:35,796 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.304e+02 1.789e+02 1.988e+02 2.230e+02 4.452e+02, threshold=3.975e+02, percent-clipped=0.5 +2024-08-06 08:28:52,983 INFO [trainer.py:765] (1/8) Epoch 8, batch 2000, train_loss[loss=3.751, NarTop10Accuracy=0.5651, over 5654.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.5828, over 6014.65 frames. ], batch size: 48, lr: 1.04e-02 +2024-08-06 08:29:18,485 INFO [trainer.py:765] (1/8) Epoch 8, batch 2100, train_loss[loss=3.523, NarTop10Accuracy=0.6154, over 3942.00 frames. ], tot_loss[loss=3.663, NarTop10Accuracy=0.5832, over 5998.49 frames. ], batch size: 4, lr: 1.04e-02 +2024-08-06 08:29:43,790 INFO [trainer.py:765] (1/8) Epoch 8, batch 2200, train_loss[loss=4.028, NarTop10Accuracy=0.5113, over 7234.00 frames. ], tot_loss[loss=3.675, NarTop10Accuracy=0.5806, over 6030.68 frames. ], batch size: 30, lr: 1.03e-02 +2024-08-06 08:30:09,134 INFO [trainer.py:765] (1/8) Epoch 8, batch 2300, train_loss[loss=3.534, NarTop10Accuracy=0.5986, over 5797.00 frames. ], tot_loss[loss=3.688, NarTop10Accuracy=0.5787, over 6049.10 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 08:30:33,791 INFO [trainer.py:765] (1/8) Epoch 8, batch 2400, train_loss[loss=3.76, NarTop10Accuracy=0.5615, over 5810.00 frames. ], tot_loss[loss=3.696, NarTop10Accuracy=0.5774, over 5851.14 frames. ], batch size: 48, lr: 1.03e-02 +2024-08-06 08:30:57,139 INFO [trainer.py:765] (1/8) Epoch 8, batch 2500, train_loss[loss=3.452, NarTop10Accuracy=0.6305, over 5112.00 frames. ], tot_loss[loss=3.675, NarTop10Accuracy=0.5808, over 5518.30 frames. ], batch size: 6, lr: 1.03e-02 +2024-08-06 08:31:18,348 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 08:32:19,099 INFO [trainer.py:765] (1/8) Epoch 9, batch 100, train_loss[loss=3.783, NarTop10Accuracy=0.5637, over 7263.00 frames. ], tot_loss[loss=3.611, NarTop10Accuracy=0.596, over 2374.54 frames. ], batch size: 30, lr: 9.71e-03 +2024-08-06 08:32:51,461 INFO [trainer.py:765] (1/8) Epoch 9, batch 200, train_loss[loss=3.501, NarTop10Accuracy=0.6108, over 6791.00 frames. ], tot_loss[loss=3.595, NarTop10Accuracy=0.5989, over 3879.07 frames. ], batch size: 17, lr: 9.69e-03 +2024-08-06 08:33:27,115 INFO [trainer.py:765] (1/8) Epoch 9, batch 300, train_loss[loss=3.821, NarTop10Accuracy=0.5558, over 7093.00 frames. ], tot_loss[loss=3.596, NarTop10Accuracy=0.5989, over 4683.25 frames. ], batch size: 22, lr: 9.67e-03 +2024-08-06 08:34:00,964 INFO [trainer.py:765] (1/8) Epoch 9, batch 400, train_loss[loss=3.482, NarTop10Accuracy=0.6196, over 5113.00 frames. ], tot_loss[loss=3.59, NarTop10Accuracy=0.5995, over 5133.50 frames. ], batch size: 7, lr: 9.64e-03 +2024-08-06 08:34:32,880 INFO [trainer.py:765] (1/8) Epoch 9, batch 500, train_loss[loss=3.76, NarTop10Accuracy=0.5704, over 6224.00 frames. ], tot_loss[loss=3.577, NarTop10Accuracy=0.6017, over 5400.48 frames. ], batch size: 11, lr: 9.62e-03 +2024-08-06 08:35:07,497 INFO [trainer.py:765] (1/8) Epoch 9, batch 600, train_loss[loss=3.398, NarTop10Accuracy=0.6427, over 5809.00 frames. ], tot_loss[loss=3.582, NarTop10Accuracy=0.601, over 5667.94 frames. ], batch size: 9, lr: 9.60e-03 +2024-08-06 08:35:42,824 INFO [trainer.py:765] (1/8) Epoch 9, batch 700, train_loss[loss=3.776, NarTop10Accuracy=0.5656, over 4973.00 frames. ], tot_loss[loss=3.59, NarTop10Accuracy=0.5995, over 5743.66 frames. ], batch size: 6, lr: 9.58e-03 +2024-08-06 08:36:14,821 INFO [trainer.py:765] (1/8) Epoch 9, batch 800, train_loss[loss=3.249, NarTop10Accuracy=0.6659, over 4996.00 frames. ], tot_loss[loss=3.607, NarTop10Accuracy=0.5959, over 5803.23 frames. ], batch size: 6, lr: 9.56e-03 +2024-08-06 08:36:46,454 INFO [trainer.py:765] (1/8) Epoch 9, batch 900, train_loss[loss=3.259, NarTop10Accuracy=0.6664, over 6682.00 frames. ], tot_loss[loss=3.612, NarTop10Accuracy=0.5952, over 5827.55 frames. ], batch size: 14, lr: 9.54e-03 +2024-08-06 08:37:26,564 INFO [trainer.py:765] (1/8) Epoch 9, batch 1000, train_loss[loss=3.552, NarTop10Accuracy=0.6071, over 6354.00 frames. ], tot_loss[loss=3.617, NarTop10Accuracy=0.5935, over 5928.68 frames. ], batch size: 13, lr: 9.52e-03 +2024-08-06 08:37:59,421 INFO [trainer.py:765] (1/8) Epoch 9, batch 1100, train_loss[loss=3.94, NarTop10Accuracy=0.5256, over 6941.00 frames. ], tot_loss[loss=3.629, NarTop10Accuracy=0.5907, over 5951.85 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 08:38:31,995 INFO [trainer.py:765] (1/8) Epoch 9, batch 1200, train_loss[loss=3.747, NarTop10Accuracy=0.5729, over 6999.00 frames. ], tot_loss[loss=3.63, NarTop10Accuracy=0.5905, over 5945.08 frames. ], batch size: 30, lr: 9.48e-03 +2024-08-06 08:39:11,840 INFO [trainer.py:765] (1/8) Epoch 9, batch 1300, train_loss[loss=3.574, NarTop10Accuracy=0.6046, over 4992.00 frames. ], tot_loss[loss=3.63, NarTop10Accuracy=0.5904, over 6020.09 frames. ], batch size: 6, lr: 9.46e-03 +2024-08-06 08:39:27,117 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 08:39:38,197 INFO [trainer.py:811] (1/8) Epoch 9, validation: loss=3.495, NarTop10Accuracy=0.6214, over 1907754.00 frames. +2024-08-06 08:39:38,197 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 08:39:38,758 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 1.781e+02 1.970e+02 2.189e+02 6.315e+02, threshold=3.940e+02, percent-clipped=0.6 +2024-08-06 08:39:52,278 INFO [trainer.py:765] (1/8) Epoch 9, batch 1400, train_loss[loss=3.532, NarTop10Accuracy=0.6133, over 6147.00 frames. ], tot_loss[loss=3.625, NarTop10Accuracy=0.592, over 6036.07 frames. ], batch size: 11, lr: 9.43e-03 +2024-08-06 08:40:22,331 INFO [trainer.py:765] (1/8) Epoch 9, batch 1500, train_loss[loss=3.963, NarTop10Accuracy=0.5218, over 6355.00 frames. ], tot_loss[loss=3.634, NarTop10Accuracy=0.5899, over 5982.08 frames. ], batch size: 51, lr: 9.41e-03 +2024-08-06 08:40:50,367 INFO [trainer.py:765] (1/8) Epoch 9, batch 1600, train_loss[loss=3.814, NarTop10Accuracy=0.5483, over 7034.00 frames. ], tot_loss[loss=3.636, NarTop10Accuracy=0.5897, over 5964.21 frames. ], batch size: 22, lr: 9.39e-03 +2024-08-06 08:41:17,151 INFO [trainer.py:765] (1/8) Epoch 9, batch 1700, train_loss[loss=3.78, NarTop10Accuracy=0.5632, over 6251.00 frames. ], tot_loss[loss=3.642, NarTop10Accuracy=0.5881, over 5951.50 frames. ], batch size: 13, lr: 9.37e-03 +2024-08-06 08:41:43,812 INFO [trainer.py:765] (1/8) Epoch 9, batch 1800, train_loss[loss=3.92, NarTop10Accuracy=0.5377, over 7211.00 frames. ], tot_loss[loss=3.628, NarTop10Accuracy=0.5908, over 6026.49 frames. ], batch size: 22, lr: 9.35e-03 +2024-08-06 08:42:10,495 INFO [trainer.py:765] (1/8) Epoch 9, batch 1900, train_loss[loss=3.655, NarTop10Accuracy=0.5955, over 6257.00 frames. ], tot_loss[loss=3.633, NarTop10Accuracy=0.59, over 6045.55 frames. ], batch size: 49, lr: 9.33e-03 +2024-08-06 08:42:36,203 INFO [trainer.py:765] (1/8) Epoch 9, batch 2000, train_loss[loss=3.801, NarTop10Accuracy=0.5613, over 6116.00 frames. ], tot_loss[loss=3.639, NarTop10Accuracy=0.5886, over 6020.27 frames. ], batch size: 49, lr: 9.31e-03 +2024-08-06 08:43:01,667 INFO [trainer.py:765] (1/8) Epoch 9, batch 2100, train_loss[loss=3.607, NarTop10Accuracy=0.591, over 3951.00 frames. ], tot_loss[loss=3.637, NarTop10Accuracy=0.5893, over 6001.08 frames. ], batch size: 4, lr: 9.30e-03 +2024-08-06 08:43:27,178 INFO [trainer.py:765] (1/8) Epoch 9, batch 2200, train_loss[loss=3.626, NarTop10Accuracy=0.5965, over 7371.00 frames. ], tot_loss[loss=3.633, NarTop10Accuracy=0.5894, over 6041.21 frames. ], batch size: 31, lr: 9.28e-03 +2024-08-06 08:43:52,671 INFO [trainer.py:765] (1/8) Epoch 9, batch 2300, train_loss[loss=3.548, NarTop10Accuracy=0.5889, over 5864.00 frames. ], tot_loss[loss=3.654, NarTop10Accuracy=0.5851, over 6058.76 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 08:44:20,549 INFO [trainer.py:765] (1/8) Epoch 9, batch 2400, train_loss[loss=3.735, NarTop10Accuracy=0.5749, over 5879.00 frames. ], tot_loss[loss=3.647, NarTop10Accuracy=0.5867, over 5903.57 frames. ], batch size: 48, lr: 9.24e-03 +2024-08-06 08:44:44,001 INFO [trainer.py:765] (1/8) Epoch 9, batch 2500, train_loss[loss=3.708, NarTop10Accuracy=0.5791, over 5016.00 frames. ], tot_loss[loss=3.63, NarTop10Accuracy=0.5905, over 5563.30 frames. ], batch size: 6, lr: 9.22e-03 +2024-08-06 08:45:04,912 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 08:46:09,063 INFO [trainer.py:765] (1/8) Epoch 10, batch 100, train_loss[loss=3.433, NarTop10Accuracy=0.6349, over 7337.00 frames. ], tot_loss[loss=3.599, NarTop10Accuracy=0.5982, over 2375.26 frames. ], batch size: 30, lr: 8.75e-03 +2024-08-06 08:46:44,073 INFO [trainer.py:765] (1/8) Epoch 10, batch 200, train_loss[loss=3.431, NarTop10Accuracy=0.6365, over 6837.00 frames. ], tot_loss[loss=3.569, NarTop10Accuracy=0.604, over 3862.32 frames. ], batch size: 17, lr: 8.73e-03 +2024-08-06 08:47:14,442 INFO [trainer.py:765] (1/8) Epoch 10, batch 300, train_loss[loss=3.531, NarTop10Accuracy=0.5996, over 7228.00 frames. ], tot_loss[loss=3.562, NarTop10Accuracy=0.605, over 4675.91 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 08:47:46,120 INFO [trainer.py:765] (1/8) Epoch 10, batch 400, train_loss[loss=3.677, NarTop10Accuracy=0.5788, over 5160.00 frames. ], tot_loss[loss=3.573, NarTop10Accuracy=0.6029, over 5121.23 frames. ], batch size: 7, lr: 8.70e-03 +2024-08-06 08:48:22,369 INFO [trainer.py:765] (1/8) Epoch 10, batch 500, train_loss[loss=3.355, NarTop10Accuracy=0.6475, over 6086.00 frames. ], tot_loss[loss=3.569, NarTop10Accuracy=0.6038, over 5397.45 frames. ], batch size: 11, lr: 8.68e-03 +2024-08-06 08:48:53,459 INFO [trainer.py:765] (1/8) Epoch 10, batch 600, train_loss[loss=3.405, NarTop10Accuracy=0.6368, over 5822.00 frames. ], tot_loss[loss=3.572, NarTop10Accuracy=0.6028, over 5664.96 frames. ], batch size: 9, lr: 8.66e-03 +2024-08-06 08:49:26,707 INFO [trainer.py:765] (1/8) Epoch 10, batch 700, train_loss[loss=3.202, NarTop10Accuracy=0.6861, over 4934.00 frames. ], tot_loss[loss=3.582, NarTop10Accuracy=0.6005, over 5727.86 frames. ], batch size: 6, lr: 8.65e-03 +2024-08-06 08:49:49,165 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 08:50:00,983 INFO [trainer.py:811] (1/8) Epoch 10, validation: loss=3.46, NarTop10Accuracy=0.6279, over 1907754.00 frames. +2024-08-06 08:50:00,984 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 08:50:01,724 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.353e+02 1.818e+02 1.985e+02 2.213e+02 4.843e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 08:50:09,801 INFO [trainer.py:765] (1/8) Epoch 10, batch 800, train_loss[loss=3.656, NarTop10Accuracy=0.593, over 5129.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.6006, over 5795.32 frames. ], batch size: 6, lr: 8.63e-03 +2024-08-06 08:50:42,890 INFO [trainer.py:765] (1/8) Epoch 10, batch 900, train_loss[loss=3.315, NarTop10Accuracy=0.6606, over 6197.00 frames. ], tot_loss[loss=3.574, NarTop10Accuracy=0.6024, over 5826.76 frames. ], batch size: 13, lr: 8.61e-03 +2024-08-06 08:51:18,460 INFO [trainer.py:765] (1/8) Epoch 10, batch 1000, train_loss[loss=3.88, NarTop10Accuracy=0.5443, over 6227.00 frames. ], tot_loss[loss=3.588, NarTop10Accuracy=0.5994, over 5919.76 frames. ], batch size: 13, lr: 8.59e-03 +2024-08-06 08:51:57,362 INFO [trainer.py:765] (1/8) Epoch 10, batch 1100, train_loss[loss=3.483, NarTop10Accuracy=0.6311, over 7021.00 frames. ], tot_loss[loss=3.602, NarTop10Accuracy=0.5964, over 5946.18 frames. ], batch size: 17, lr: 8.58e-03 +2024-08-06 08:52:32,048 INFO [trainer.py:765] (1/8) Epoch 10, batch 1200, train_loss[loss=3.555, NarTop10Accuracy=0.6193, over 7389.00 frames. ], tot_loss[loss=3.6, NarTop10Accuracy=0.5967, over 5951.03 frames. ], batch size: 30, lr: 8.56e-03 +2024-08-06 08:53:06,607 INFO [trainer.py:765] (1/8) Epoch 10, batch 1300, train_loss[loss=3.621, NarTop10Accuracy=0.596, over 5147.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.5978, over 6011.47 frames. ], batch size: 6, lr: 8.54e-03 +2024-08-06 08:53:46,880 INFO [trainer.py:765] (1/8) Epoch 10, batch 1400, train_loss[loss=3.631, NarTop10Accuracy=0.5968, over 6185.00 frames. ], tot_loss[loss=3.603, NarTop10Accuracy=0.596, over 6034.53 frames. ], batch size: 11, lr: 8.53e-03 +2024-08-06 08:54:17,501 INFO [trainer.py:765] (1/8) Epoch 10, batch 1500, train_loss[loss=3.618, NarTop10Accuracy=0.5999, over 5995.00 frames. ], tot_loss[loss=3.584, NarTop10Accuracy=0.5999, over 5989.93 frames. ], batch size: 49, lr: 8.51e-03 +2024-08-06 08:54:45,525 INFO [trainer.py:765] (1/8) Epoch 10, batch 1600, train_loss[loss=3.506, NarTop10Accuracy=0.6191, over 7185.00 frames. ], tot_loss[loss=3.6, NarTop10Accuracy=0.5966, over 5962.96 frames. ], batch size: 22, lr: 8.49e-03 +2024-08-06 08:55:12,299 INFO [trainer.py:765] (1/8) Epoch 10, batch 1700, train_loss[loss=3.344, NarTop10Accuracy=0.6426, over 6343.00 frames. ], tot_loss[loss=3.602, NarTop10Accuracy=0.5962, over 5948.11 frames. ], batch size: 13, lr: 8.48e-03 +2024-08-06 08:55:41,989 INFO [trainer.py:765] (1/8) Epoch 10, batch 1800, train_loss[loss=3.556, NarTop10Accuracy=0.61, over 7239.00 frames. ], tot_loss[loss=3.602, NarTop10Accuracy=0.5961, over 6019.11 frames. ], batch size: 22, lr: 8.46e-03 +2024-08-06 08:56:08,572 INFO [trainer.py:765] (1/8) Epoch 10, batch 1900, train_loss[loss=3.993, NarTop10Accuracy=0.5155, over 6288.00 frames. ], tot_loss[loss=3.603, NarTop10Accuracy=0.5958, over 6055.65 frames. ], batch size: 48, lr: 8.45e-03 +2024-08-06 08:56:34,287 INFO [trainer.py:765] (1/8) Epoch 10, batch 2000, train_loss[loss=3.685, NarTop10Accuracy=0.5739, over 6309.00 frames. ], tot_loss[loss=3.607, NarTop10Accuracy=0.5954, over 6025.70 frames. ], batch size: 49, lr: 8.43e-03 +2024-08-06 08:56:59,751 INFO [trainer.py:765] (1/8) Epoch 10, batch 2100, train_loss[loss=3.635, NarTop10Accuracy=0.593, over 4766.00 frames. ], tot_loss[loss=3.604, NarTop10Accuracy=0.5957, over 6013.90 frames. ], batch size: 5, lr: 8.41e-03 +2024-08-06 08:57:25,280 INFO [trainer.py:765] (1/8) Epoch 10, batch 2200, train_loss[loss=3.776, NarTop10Accuracy=0.5636, over 7148.00 frames. ], tot_loss[loss=3.605, NarTop10Accuracy=0.5955, over 6042.29 frames. ], batch size: 30, lr: 8.40e-03 +2024-08-06 08:57:50,682 INFO [trainer.py:765] (1/8) Epoch 10, batch 2300, train_loss[loss=3.454, NarTop10Accuracy=0.6384, over 5881.00 frames. ], tot_loss[loss=3.618, NarTop10Accuracy=0.5929, over 6056.82 frames. ], batch size: 9, lr: 8.38e-03 +2024-08-06 08:58:15,344 INFO [trainer.py:765] (1/8) Epoch 10, batch 2400, train_loss[loss=3.497, NarTop10Accuracy=0.6096, over 5258.00 frames. ], tot_loss[loss=3.616, NarTop10Accuracy=0.5937, over 5874.37 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 08:58:38,809 INFO [trainer.py:765] (1/8) Epoch 10, batch 2500, train_loss[loss=3.249, NarTop10Accuracy=0.6497, over 5130.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.5982, over 5533.91 frames. ], batch size: 6, lr: 8.35e-03 +2024-08-06 08:58:59,809 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 09:00:03,681 INFO [trainer.py:765] (1/8) Epoch 11, batch 100, train_loss[loss=3.457, NarTop10Accuracy=0.6259, over 7301.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.6123, over 2359.10 frames. ], batch size: 30, lr: 7.96e-03 +2024-08-06 09:00:30,915 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 09:00:41,217 INFO [trainer.py:811] (1/8) Epoch 11, validation: loss=3.404, NarTop10Accuracy=0.6396, over 1907754.00 frames. +2024-08-06 09:00:41,218 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 09:00:41,774 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 1.800e+02 1.980e+02 2.200e+02 4.491e+02, threshold=3.959e+02, percent-clipped=0.2 +2024-08-06 09:00:46,859 INFO [trainer.py:765] (1/8) Epoch 11, batch 200, train_loss[loss=3.825, NarTop10Accuracy=0.5553, over 6919.00 frames. ], tot_loss[loss=3.531, NarTop10Accuracy=0.6118, over 3861.58 frames. ], batch size: 17, lr: 7.94e-03 +2024-08-06 09:01:17,853 INFO [trainer.py:765] (1/8) Epoch 11, batch 300, train_loss[loss=3.591, NarTop10Accuracy=0.5953, over 7217.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6105, over 4677.83 frames. ], batch size: 22, lr: 7.93e-03 +2024-08-06 09:01:50,533 INFO [trainer.py:765] (1/8) Epoch 11, batch 400, train_loss[loss=3.295, NarTop10Accuracy=0.6555, over 5125.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.611, over 5120.94 frames. ], batch size: 7, lr: 7.91e-03 +2024-08-06 09:02:21,238 INFO [trainer.py:765] (1/8) Epoch 11, batch 500, train_loss[loss=3.482, NarTop10Accuracy=0.622, over 6011.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6105, over 5410.24 frames. ], batch size: 11, lr: 7.90e-03 +2024-08-06 09:03:01,741 INFO [trainer.py:765] (1/8) Epoch 11, batch 600, train_loss[loss=3.373, NarTop10Accuracy=0.6372, over 5787.00 frames. ], tot_loss[loss=3.543, NarTop10Accuracy=0.6095, over 5688.62 frames. ], batch size: 9, lr: 7.88e-03 +2024-08-06 09:03:38,236 INFO [trainer.py:765] (1/8) Epoch 11, batch 700, train_loss[loss=3.414, NarTop10Accuracy=0.6345, over 5074.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.6087, over 5746.82 frames. ], batch size: 6, lr: 7.87e-03 +2024-08-06 09:04:10,755 INFO [trainer.py:765] (1/8) Epoch 11, batch 800, train_loss[loss=3.679, NarTop10Accuracy=0.5939, over 4992.00 frames. ], tot_loss[loss=3.559, NarTop10Accuracy=0.6051, over 5806.18 frames. ], batch size: 6, lr: 7.86e-03 +2024-08-06 09:04:50,082 INFO [trainer.py:765] (1/8) Epoch 11, batch 900, train_loss[loss=3.44, NarTop10Accuracy=0.6319, over 6232.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.6082, over 5830.76 frames. ], batch size: 13, lr: 7.84e-03 +2024-08-06 09:05:27,011 INFO [trainer.py:765] (1/8) Epoch 11, batch 1000, train_loss[loss=3.389, NarTop10Accuracy=0.6354, over 6310.00 frames. ], tot_loss[loss=3.541, NarTop10Accuracy=0.6087, over 5936.03 frames. ], batch size: 13, lr: 7.83e-03 +2024-08-06 09:06:00,349 INFO [trainer.py:765] (1/8) Epoch 11, batch 1100, train_loss[loss=3.604, NarTop10Accuracy=0.6068, over 6816.00 frames. ], tot_loss[loss=3.557, NarTop10Accuracy=0.605, over 5953.47 frames. ], batch size: 17, lr: 7.81e-03 +2024-08-06 09:06:40,945 INFO [trainer.py:765] (1/8) Epoch 11, batch 1200, train_loss[loss=3.652, NarTop10Accuracy=0.5893, over 7503.00 frames. ], tot_loss[loss=3.564, NarTop10Accuracy=0.6038, over 5957.13 frames. ], batch size: 30, lr: 7.80e-03 +2024-08-06 09:07:15,493 INFO [trainer.py:765] (1/8) Epoch 11, batch 1300, train_loss[loss=3.476, NarTop10Accuracy=0.6312, over 5133.00 frames. ], tot_loss[loss=3.574, NarTop10Accuracy=0.6016, over 6025.16 frames. ], batch size: 6, lr: 7.79e-03 +2024-08-06 09:07:47,628 INFO [trainer.py:765] (1/8) Epoch 11, batch 1400, train_loss[loss=3.448, NarTop10Accuracy=0.6225, over 6134.00 frames. ], tot_loss[loss=3.584, NarTop10Accuracy=0.5998, over 6036.20 frames. ], batch size: 11, lr: 7.77e-03 +2024-08-06 09:08:18,986 INFO [trainer.py:765] (1/8) Epoch 11, batch 1500, train_loss[loss=3.604, NarTop10Accuracy=0.6005, over 5806.00 frames. ], tot_loss[loss=3.588, NarTop10Accuracy=0.5989, over 5973.86 frames. ], batch size: 49, lr: 7.76e-03 +2024-08-06 09:08:47,148 INFO [trainer.py:765] (1/8) Epoch 11, batch 1600, train_loss[loss=3.419, NarTop10Accuracy=0.6405, over 7031.00 frames. ], tot_loss[loss=3.588, NarTop10Accuracy=0.599, over 5965.66 frames. ], batch size: 22, lr: 7.74e-03 +2024-08-06 09:09:13,949 INFO [trainer.py:765] (1/8) Epoch 11, batch 1700, train_loss[loss=3.455, NarTop10Accuracy=0.6191, over 6304.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.6002, over 5939.79 frames. ], batch size: 13, lr: 7.73e-03 +2024-08-06 09:09:40,731 INFO [trainer.py:765] (1/8) Epoch 11, batch 1800, train_loss[loss=3.497, NarTop10Accuracy=0.6055, over 6902.00 frames. ], tot_loss[loss=3.593, NarTop10Accuracy=0.5982, over 5999.80 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 09:10:07,341 INFO [trainer.py:765] (1/8) Epoch 11, batch 1900, train_loss[loss=3.759, NarTop10Accuracy=0.5719, over 6534.00 frames. ], tot_loss[loss=3.601, NarTop10Accuracy=0.5966, over 6028.41 frames. ], batch size: 48, lr: 7.70e-03 +2024-08-06 09:10:33,038 INFO [trainer.py:765] (1/8) Epoch 11, batch 2000, train_loss[loss=3.633, NarTop10Accuracy=0.5896, over 5739.00 frames. ], tot_loss[loss=3.597, NarTop10Accuracy=0.597, over 6019.53 frames. ], batch size: 48, lr: 7.69e-03 +2024-08-06 09:10:58,440 INFO [trainer.py:765] (1/8) Epoch 11, batch 2100, train_loss[loss=3.145, NarTop10Accuracy=0.675, over 3977.00 frames. ], tot_loss[loss=3.579, NarTop10Accuracy=0.6007, over 5986.24 frames. ], batch size: 4, lr: 7.68e-03 +2024-08-06 09:11:20,709 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 09:11:31,457 INFO [trainer.py:811] (1/8) Epoch 11, validation: loss=3.372, NarTop10Accuracy=0.6462, over 1907754.00 frames. +2024-08-06 09:11:31,458 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 09:11:31,930 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.800e+02 1.966e+02 2.160e+02 4.000e+02, threshold=3.933e+02, percent-clipped=0.1 +2024-08-06 09:11:34,519 INFO [trainer.py:765] (1/8) Epoch 11, batch 2200, train_loss[loss=3.545, NarTop10Accuracy=0.6097, over 7112.00 frames. ], tot_loss[loss=3.584, NarTop10Accuracy=0.6, over 6019.35 frames. ], batch size: 30, lr: 7.66e-03 +2024-08-06 09:11:59,940 INFO [trainer.py:765] (1/8) Epoch 11, batch 2300, train_loss[loss=3.587, NarTop10Accuracy=0.5965, over 5891.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.5985, over 6061.59 frames. ], batch size: 9, lr: 7.65e-03 +2024-08-06 09:12:24,696 INFO [trainer.py:765] (1/8) Epoch 11, batch 2400, train_loss[loss=3.913, NarTop10Accuracy=0.5394, over 6056.00 frames. ], tot_loss[loss=3.607, NarTop10Accuracy=0.5954, over 5889.79 frames. ], batch size: 51, lr: 7.64e-03 +2024-08-06 09:12:47,879 INFO [trainer.py:765] (1/8) Epoch 11, batch 2500, train_loss[loss=3.569, NarTop10Accuracy=0.596, over 5115.00 frames. ], tot_loss[loss=3.584, NarTop10Accuracy=0.6005, over 5530.31 frames. ], batch size: 6, lr: 7.62e-03 +2024-08-06 09:13:09,242 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 09:14:12,279 INFO [trainer.py:765] (1/8) Epoch 12, batch 100, train_loss[loss=3.4, NarTop10Accuracy=0.6417, over 7147.00 frames. ], tot_loss[loss=3.536, NarTop10Accuracy=0.6112, over 2364.65 frames. ], batch size: 30, lr: 7.29e-03 +2024-08-06 09:14:48,096 INFO [trainer.py:765] (1/8) Epoch 12, batch 200, train_loss[loss=3.461, NarTop10Accuracy=0.6259, over 6858.00 frames. ], tot_loss[loss=3.515, NarTop10Accuracy=0.6155, over 3853.84 frames. ], batch size: 17, lr: 7.28e-03 +2024-08-06 09:15:20,021 INFO [trainer.py:765] (1/8) Epoch 12, batch 300, train_loss[loss=3.38, NarTop10Accuracy=0.6343, over 7112.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6179, over 4651.25 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 09:15:52,634 INFO [trainer.py:765] (1/8) Epoch 12, batch 400, train_loss[loss=3.473, NarTop10Accuracy=0.6313, over 5123.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6138, over 5116.28 frames. ], batch size: 7, lr: 7.25e-03 +2024-08-06 09:16:26,433 INFO [trainer.py:765] (1/8) Epoch 12, batch 500, train_loss[loss=3.634, NarTop10Accuracy=0.5974, over 6078.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6134, over 5384.23 frames. ], batch size: 11, lr: 7.24e-03 +2024-08-06 09:16:59,239 INFO [trainer.py:765] (1/8) Epoch 12, batch 600, train_loss[loss=3.267, NarTop10Accuracy=0.6511, over 5765.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.6108, over 5655.65 frames. ], batch size: 9, lr: 7.23e-03 +2024-08-06 09:17:36,318 INFO [trainer.py:765] (1/8) Epoch 12, batch 700, train_loss[loss=3.581, NarTop10Accuracy=0.598, over 5134.00 frames. ], tot_loss[loss=3.525, NarTop10Accuracy=0.6124, over 5729.81 frames. ], batch size: 6, lr: 7.22e-03 +2024-08-06 09:18:07,753 INFO [trainer.py:765] (1/8) Epoch 12, batch 800, train_loss[loss=3.543, NarTop10Accuracy=0.597, over 4913.00 frames. ], tot_loss[loss=3.521, NarTop10Accuracy=0.613, over 5792.62 frames. ], batch size: 6, lr: 7.21e-03 +2024-08-06 09:18:43,779 INFO [trainer.py:765] (1/8) Epoch 12, batch 900, train_loss[loss=3.745, NarTop10Accuracy=0.5549, over 6241.00 frames. ], tot_loss[loss=3.535, NarTop10Accuracy=0.6099, over 5803.74 frames. ], batch size: 13, lr: 7.19e-03 +2024-08-06 09:19:17,689 INFO [trainer.py:765] (1/8) Epoch 12, batch 1000, train_loss[loss=3.693, NarTop10Accuracy=0.5841, over 6246.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6097, over 5922.97 frames. ], batch size: 13, lr: 7.18e-03 +2024-08-06 09:19:52,427 INFO [trainer.py:765] (1/8) Epoch 12, batch 1100, train_loss[loss=3.501, NarTop10Accuracy=0.6205, over 6883.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6095, over 5962.38 frames. ], batch size: 17, lr: 7.17e-03 +2024-08-06 09:20:29,443 INFO [trainer.py:765] (1/8) Epoch 12, batch 1200, train_loss[loss=3.432, NarTop10Accuracy=0.6273, over 7494.00 frames. ], tot_loss[loss=3.547, NarTop10Accuracy=0.6076, over 5955.79 frames. ], batch size: 31, lr: 7.16e-03 +2024-08-06 09:21:02,826 INFO [trainer.py:765] (1/8) Epoch 12, batch 1300, train_loss[loss=3.683, NarTop10Accuracy=0.591, over 5132.00 frames. ], tot_loss[loss=3.547, NarTop10Accuracy=0.6076, over 6021.71 frames. ], batch size: 6, lr: 7.15e-03 +2024-08-06 09:21:36,981 INFO [trainer.py:765] (1/8) Epoch 12, batch 1400, train_loss[loss=3.335, NarTop10Accuracy=0.6382, over 5965.00 frames. ], tot_loss[loss=3.553, NarTop10Accuracy=0.6061, over 6034.25 frames. ], batch size: 11, lr: 7.13e-03 +2024-08-06 09:22:09,920 INFO [trainer.py:765] (1/8) Epoch 12, batch 1500, train_loss[loss=3.619, NarTop10Accuracy=0.5926, over 5899.00 frames. ], tot_loss[loss=3.555, NarTop10Accuracy=0.6059, over 5965.54 frames. ], batch size: 49, lr: 7.12e-03 +2024-08-06 09:22:38,026 INFO [trainer.py:765] (1/8) Epoch 12, batch 1600, train_loss[loss=3.656, NarTop10Accuracy=0.5866, over 7213.00 frames. ], tot_loss[loss=3.562, NarTop10Accuracy=0.6045, over 5956.03 frames. ], batch size: 22, lr: 7.11e-03 +2024-08-06 09:22:39,859 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 09:22:49,889 INFO [trainer.py:811] (1/8) Epoch 12, validation: loss=3.364, NarTop10Accuracy=0.6481, over 1907754.00 frames. +2024-08-06 09:22:49,889 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 09:22:50,413 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.796e+02 1.978e+02 2.176e+02 4.603e+02, threshold=3.957e+02, percent-clipped=0.2 +2024-08-06 09:23:14,786 INFO [trainer.py:765] (1/8) Epoch 12, batch 1700, train_loss[loss=3.503, NarTop10Accuracy=0.6158, over 6808.00 frames. ], tot_loss[loss=3.562, NarTop10Accuracy=0.6042, over 5942.23 frames. ], batch size: 14, lr: 7.10e-03 +2024-08-06 09:23:41,386 INFO [trainer.py:765] (1/8) Epoch 12, batch 1800, train_loss[loss=3.354, NarTop10Accuracy=0.6622, over 7136.00 frames. ], tot_loss[loss=3.554, NarTop10Accuracy=0.6059, over 6011.58 frames. ], batch size: 22, lr: 7.09e-03 +2024-08-06 09:24:07,957 INFO [trainer.py:765] (1/8) Epoch 12, batch 1900, train_loss[loss=3.558, NarTop10Accuracy=0.6031, over 5980.00 frames. ], tot_loss[loss=3.56, NarTop10Accuracy=0.6047, over 6054.02 frames. ], batch size: 49, lr: 7.08e-03 +2024-08-06 09:24:33,619 INFO [trainer.py:765] (1/8) Epoch 12, batch 2000, train_loss[loss=3.61, NarTop10Accuracy=0.5958, over 6079.00 frames. ], tot_loss[loss=3.565, NarTop10Accuracy=0.6035, over 6027.07 frames. ], batch size: 50, lr: 7.07e-03 +2024-08-06 09:24:59,038 INFO [trainer.py:765] (1/8) Epoch 12, batch 2100, train_loss[loss=3.773, NarTop10Accuracy=0.5631, over 4803.00 frames. ], tot_loss[loss=3.569, NarTop10Accuracy=0.6033, over 6021.49 frames. ], batch size: 5, lr: 7.05e-03 +2024-08-06 09:25:24,509 INFO [trainer.py:765] (1/8) Epoch 12, batch 2200, train_loss[loss=3.422, NarTop10Accuracy=0.6288, over 7329.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.6027, over 6056.22 frames. ], batch size: 30, lr: 7.04e-03 +2024-08-06 09:25:49,927 INFO [trainer.py:765] (1/8) Epoch 12, batch 2300, train_loss[loss=3.713, NarTop10Accuracy=0.5691, over 5601.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.6012, over 6071.65 frames. ], batch size: 9, lr: 7.03e-03 +2024-08-06 09:26:14,656 INFO [trainer.py:765] (1/8) Epoch 12, batch 2400, train_loss[loss=3.454, NarTop10Accuracy=0.6311, over 5248.00 frames. ], tot_loss[loss=3.58, NarTop10Accuracy=0.6014, over 5890.08 frames. ], batch size: 7, lr: 7.02e-03 +2024-08-06 09:26:38,155 INFO [trainer.py:765] (1/8) Epoch 12, batch 2500, train_loss[loss=3.55, NarTop10Accuracy=0.5992, over 4968.00 frames. ], tot_loss[loss=3.56, NarTop10Accuracy=0.6054, over 5547.67 frames. ], batch size: 6, lr: 7.01e-03 +2024-08-06 09:26:59,493 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 09:28:03,611 INFO [trainer.py:765] (1/8) Epoch 13, batch 100, train_loss[loss=3.518, NarTop10Accuracy=0.6167, over 7129.00 frames. ], tot_loss[loss=3.523, NarTop10Accuracy=0.6144, over 2364.61 frames. ], batch size: 30, lr: 6.72e-03 +2024-08-06 09:28:36,906 INFO [trainer.py:765] (1/8) Epoch 13, batch 200, train_loss[loss=3.287, NarTop10Accuracy=0.6615, over 6979.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6159, over 3862.11 frames. ], batch size: 17, lr: 6.71e-03 +2024-08-06 09:29:07,170 INFO [trainer.py:765] (1/8) Epoch 13, batch 300, train_loss[loss=3.313, NarTop10Accuracy=0.6539, over 7028.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6181, over 4661.41 frames. ], batch size: 22, lr: 6.70e-03 +2024-08-06 09:29:41,039 INFO [trainer.py:765] (1/8) Epoch 13, batch 400, train_loss[loss=3.391, NarTop10Accuracy=0.6409, over 5704.00 frames. ], tot_loss[loss=3.484, NarTop10Accuracy=0.6209, over 5110.53 frames. ], batch size: 8, lr: 6.69e-03 +2024-08-06 09:30:13,730 INFO [trainer.py:765] (1/8) Epoch 13, batch 500, train_loss[loss=3.801, NarTop10Accuracy=0.5469, over 6153.00 frames. ], tot_loss[loss=3.482, NarTop10Accuracy=0.6214, over 5383.58 frames. ], batch size: 11, lr: 6.68e-03 +2024-08-06 09:30:47,198 INFO [trainer.py:765] (1/8) Epoch 13, batch 600, train_loss[loss=3.417, NarTop10Accuracy=0.6498, over 5824.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6203, over 5662.41 frames. ], batch size: 9, lr: 6.67e-03 +2024-08-06 09:31:23,821 INFO [trainer.py:765] (1/8) Epoch 13, batch 700, train_loss[loss=3.496, NarTop10Accuracy=0.6243, over 4275.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6167, over 5740.82 frames. ], batch size: 5, lr: 6.66e-03 +2024-08-06 09:31:58,208 INFO [trainer.py:765] (1/8) Epoch 13, batch 800, train_loss[loss=3.689, NarTop10Accuracy=0.5734, over 5051.00 frames. ], tot_loss[loss=3.509, NarTop10Accuracy=0.6157, over 5807.44 frames. ], batch size: 6, lr: 6.65e-03 +2024-08-06 09:32:29,193 INFO [trainer.py:765] (1/8) Epoch 13, batch 900, train_loss[loss=3.406, NarTop10Accuracy=0.6374, over 6417.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6166, over 5817.60 frames. ], batch size: 13, lr: 6.64e-03 +2024-08-06 09:33:03,133 INFO [trainer.py:765] (1/8) Epoch 13, batch 1000, train_loss[loss=3.497, NarTop10Accuracy=0.6168, over 6829.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6168, over 5921.44 frames. ], batch size: 14, lr: 6.63e-03 +2024-08-06 09:33:14,219 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 09:33:24,525 INFO [trainer.py:811] (1/8) Epoch 13, validation: loss=3.389, NarTop10Accuracy=0.6428, over 1907754.00 frames. +2024-08-06 09:33:24,525 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 09:33:25,132 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.457e+02 1.794e+02 1.964e+02 2.145e+02 3.608e+02, threshold=3.929e+02, percent-clipped=0.0 +2024-08-06 09:33:51,714 INFO [trainer.py:765] (1/8) Epoch 13, batch 1100, train_loss[loss=3.597, NarTop10Accuracy=0.604, over 6848.00 frames. ], tot_loss[loss=3.526, NarTop10Accuracy=0.6117, over 5947.74 frames. ], batch size: 17, lr: 6.62e-03 +2024-08-06 09:34:25,485 INFO [trainer.py:765] (1/8) Epoch 13, batch 1200, train_loss[loss=3.663, NarTop10Accuracy=0.5839, over 7203.00 frames. ], tot_loss[loss=3.52, NarTop10Accuracy=0.613, over 5958.10 frames. ], batch size: 30, lr: 6.61e-03 +2024-08-06 09:35:05,085 INFO [trainer.py:765] (1/8) Epoch 13, batch 1300, train_loss[loss=3.49, NarTop10Accuracy=0.6167, over 5039.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.6132, over 6012.41 frames. ], batch size: 6, lr: 6.60e-03 +2024-08-06 09:35:36,404 INFO [trainer.py:765] (1/8) Epoch 13, batch 1400, train_loss[loss=3.442, NarTop10Accuracy=0.627, over 6242.00 frames. ], tot_loss[loss=3.535, NarTop10Accuracy=0.6099, over 6038.05 frames. ], batch size: 11, lr: 6.59e-03 +2024-08-06 09:36:07,320 INFO [trainer.py:765] (1/8) Epoch 13, batch 1500, train_loss[loss=3.668, NarTop10Accuracy=0.5843, over 6087.00 frames. ], tot_loss[loss=3.539, NarTop10Accuracy=0.6088, over 5971.40 frames. ], batch size: 49, lr: 6.58e-03 +2024-08-06 09:36:35,389 INFO [trainer.py:765] (1/8) Epoch 13, batch 1600, train_loss[loss=3.663, NarTop10Accuracy=0.5846, over 7288.00 frames. ], tot_loss[loss=3.541, NarTop10Accuracy=0.6088, over 5966.93 frames. ], batch size: 22, lr: 6.57e-03 +2024-08-06 09:37:02,143 INFO [trainer.py:765] (1/8) Epoch 13, batch 1700, train_loss[loss=3.531, NarTop10Accuracy=0.6144, over 6324.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6096, over 5938.59 frames. ], batch size: 13, lr: 6.56e-03 +2024-08-06 09:37:28,778 INFO [trainer.py:765] (1/8) Epoch 13, batch 1800, train_loss[loss=3.466, NarTop10Accuracy=0.6239, over 7300.00 frames. ], tot_loss[loss=3.53, NarTop10Accuracy=0.6114, over 5999.74 frames. ], batch size: 22, lr: 6.55e-03 +2024-08-06 09:37:55,386 INFO [trainer.py:765] (1/8) Epoch 13, batch 1900, train_loss[loss=3.515, NarTop10Accuracy=0.614, over 5859.00 frames. ], tot_loss[loss=3.547, NarTop10Accuracy=0.6083, over 6031.79 frames. ], batch size: 50, lr: 6.54e-03 +2024-08-06 09:38:21,123 INFO [trainer.py:765] (1/8) Epoch 13, batch 2000, train_loss[loss=3.54, NarTop10Accuracy=0.6021, over 5429.00 frames. ], tot_loss[loss=3.546, NarTop10Accuracy=0.6085, over 5998.24 frames. ], batch size: 49, lr: 6.53e-03 +2024-08-06 09:38:49,691 INFO [trainer.py:765] (1/8) Epoch 13, batch 2100, train_loss[loss=3.38, NarTop10Accuracy=0.65, over 3874.00 frames. ], tot_loss[loss=3.546, NarTop10Accuracy=0.6086, over 5981.51 frames. ], batch size: 4, lr: 6.52e-03 +2024-08-06 09:39:15,107 INFO [trainer.py:765] (1/8) Epoch 13, batch 2200, train_loss[loss=3.619, NarTop10Accuracy=0.5941, over 6903.00 frames. ], tot_loss[loss=3.549, NarTop10Accuracy=0.6075, over 6009.30 frames. ], batch size: 30, lr: 6.51e-03 +2024-08-06 09:39:40,618 INFO [trainer.py:765] (1/8) Epoch 13, batch 2300, train_loss[loss=3.482, NarTop10Accuracy=0.6158, over 5696.00 frames. ], tot_loss[loss=3.546, NarTop10Accuracy=0.6079, over 6051.56 frames. ], batch size: 9, lr: 6.50e-03 +2024-08-06 09:40:05,343 INFO [trainer.py:765] (1/8) Epoch 13, batch 2400, train_loss[loss=3.311, NarTop10Accuracy=0.6553, over 5292.00 frames. ], tot_loss[loss=3.555, NarTop10Accuracy=0.606, over 5864.33 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 09:40:28,767 INFO [trainer.py:765] (1/8) Epoch 13, batch 2500, train_loss[loss=3.33, NarTop10Accuracy=0.6604, over 4220.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6091, over 5536.44 frames. ], batch size: 5, lr: 6.48e-03 +2024-08-06 09:40:50,382 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 09:41:48,980 INFO [trainer.py:765] (1/8) Epoch 14, batch 100, train_loss[loss=3.42, NarTop10Accuracy=0.6413, over 6956.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6254, over 2376.19 frames. ], batch size: 30, lr: 6.24e-03 +2024-08-06 09:42:22,938 INFO [trainer.py:765] (1/8) Epoch 14, batch 200, train_loss[loss=3.758, NarTop10Accuracy=0.5647, over 6979.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6261, over 3874.03 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 09:42:58,414 INFO [trainer.py:765] (1/8) Epoch 14, batch 300, train_loss[loss=3.64, NarTop10Accuracy=0.582, over 7098.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6202, over 4690.39 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 09:43:30,439 INFO [trainer.py:765] (1/8) Epoch 14, batch 400, train_loss[loss=3.335, NarTop10Accuracy=0.6444, over 5121.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.6205, over 5133.91 frames. ], batch size: 7, lr: 6.21e-03 +2024-08-06 09:43:42,487 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 09:43:53,651 INFO [trainer.py:811] (1/8) Epoch 14, validation: loss=3.321, NarTop10Accuracy=0.6566, over 1907754.00 frames. +2024-08-06 09:43:53,652 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 09:43:54,212 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.805e+02 1.968e+02 2.158e+02 4.264e+02, threshold=3.936e+02, percent-clipped=0.2 +2024-08-06 09:44:11,700 INFO [trainer.py:765] (1/8) Epoch 14, batch 500, train_loss[loss=3.356, NarTop10Accuracy=0.6567, over 6256.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6211, over 5430.65 frames. ], batch size: 11, lr: 6.20e-03 +2024-08-06 09:44:47,166 INFO [trainer.py:765] (1/8) Epoch 14, batch 600, train_loss[loss=3.876, NarTop10Accuracy=0.5351, over 5897.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.623, over 5680.10 frames. ], batch size: 9, lr: 6.19e-03 +2024-08-06 09:45:19,803 INFO [trainer.py:765] (1/8) Epoch 14, batch 700, train_loss[loss=3.741, NarTop10Accuracy=0.5736, over 5161.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.623, over 5755.15 frames. ], batch size: 6, lr: 6.18e-03 +2024-08-06 09:45:58,435 INFO [trainer.py:765] (1/8) Epoch 14, batch 800, train_loss[loss=3.282, NarTop10Accuracy=0.6561, over 4985.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6199, over 5791.76 frames. ], batch size: 6, lr: 6.17e-03 +2024-08-06 09:46:35,420 INFO [trainer.py:765] (1/8) Epoch 14, batch 900, train_loss[loss=3.691, NarTop10Accuracy=0.5768, over 6665.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6193, over 5838.34 frames. ], batch size: 14, lr: 6.17e-03 +2024-08-06 09:47:08,400 INFO [trainer.py:765] (1/8) Epoch 14, batch 1000, train_loss[loss=3.474, NarTop10Accuracy=0.618, over 6192.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6195, over 5936.96 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 09:47:47,663 INFO [trainer.py:765] (1/8) Epoch 14, batch 1100, train_loss[loss=3.499, NarTop10Accuracy=0.6206, over 6964.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6183, over 5969.26 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 09:48:23,500 INFO [trainer.py:765] (1/8) Epoch 14, batch 1200, train_loss[loss=3.365, NarTop10Accuracy=0.6419, over 7419.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6183, over 5966.05 frames. ], batch size: 31, lr: 6.14e-03 +2024-08-06 09:48:57,971 INFO [trainer.py:765] (1/8) Epoch 14, batch 1300, train_loss[loss=3.384, NarTop10Accuracy=0.6521, over 4917.00 frames. ], tot_loss[loss=3.496, NarTop10Accuracy=0.6181, over 6017.36 frames. ], batch size: 6, lr: 6.13e-03 +2024-08-06 09:49:30,234 INFO [trainer.py:765] (1/8) Epoch 14, batch 1400, train_loss[loss=3.346, NarTop10Accuracy=0.6378, over 6236.00 frames. ], tot_loss[loss=3.515, NarTop10Accuracy=0.6139, over 6041.65 frames. ], batch size: 11, lr: 6.12e-03 +2024-08-06 09:50:07,531 INFO [trainer.py:765] (1/8) Epoch 14, batch 1500, train_loss[loss=3.593, NarTop10Accuracy=0.5987, over 6034.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6122, over 5978.59 frames. ], batch size: 49, lr: 6.11e-03 +2024-08-06 09:50:35,637 INFO [trainer.py:765] (1/8) Epoch 14, batch 1600, train_loss[loss=3.408, NarTop10Accuracy=0.633, over 7194.00 frames. ], tot_loss[loss=3.519, NarTop10Accuracy=0.6131, over 5966.05 frames. ], batch size: 22, lr: 6.10e-03 +2024-08-06 09:51:02,378 INFO [trainer.py:765] (1/8) Epoch 14, batch 1700, train_loss[loss=3.455, NarTop10Accuracy=0.6339, over 6193.00 frames. ], tot_loss[loss=3.517, NarTop10Accuracy=0.6138, over 5955.07 frames. ], batch size: 13, lr: 6.10e-03 +2024-08-06 09:51:28,994 INFO [trainer.py:765] (1/8) Epoch 14, batch 1800, train_loss[loss=3.541, NarTop10Accuracy=0.6101, over 7131.00 frames. ], tot_loss[loss=3.502, NarTop10Accuracy=0.6168, over 6026.09 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 09:51:55,729 INFO [trainer.py:765] (1/8) Epoch 14, batch 1900, train_loss[loss=3.809, NarTop10Accuracy=0.559, over 6496.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6143, over 6061.54 frames. ], batch size: 48, lr: 6.08e-03 +2024-08-06 09:52:21,503 INFO [trainer.py:765] (1/8) Epoch 14, batch 2000, train_loss[loss=3.499, NarTop10Accuracy=0.62, over 5931.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6142, over 6031.81 frames. ], batch size: 48, lr: 6.07e-03 +2024-08-06 09:52:47,011 INFO [trainer.py:765] (1/8) Epoch 14, batch 2100, train_loss[loss=3.324, NarTop10Accuracy=0.6572, over 3943.00 frames. ], tot_loss[loss=3.509, NarTop10Accuracy=0.6151, over 6021.13 frames. ], batch size: 4, lr: 6.06e-03 +2024-08-06 09:53:12,480 INFO [trainer.py:765] (1/8) Epoch 14, batch 2200, train_loss[loss=3.496, NarTop10Accuracy=0.6236, over 7201.00 frames. ], tot_loss[loss=3.513, NarTop10Accuracy=0.614, over 6040.77 frames. ], batch size: 31, lr: 6.05e-03 +2024-08-06 09:53:37,976 INFO [trainer.py:765] (1/8) Epoch 14, batch 2300, train_loss[loss=3.522, NarTop10Accuracy=0.6112, over 5700.00 frames. ], tot_loss[loss=3.527, NarTop10Accuracy=0.6113, over 6061.08 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 09:54:02,717 INFO [trainer.py:765] (1/8) Epoch 14, batch 2400, train_loss[loss=3.773, NarTop10Accuracy=0.5647, over 5122.00 frames. ], tot_loss[loss=3.526, NarTop10Accuracy=0.6118, over 5893.70 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 09:54:12,820 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 09:54:24,304 INFO [trainer.py:811] (1/8) Epoch 14, validation: loss=3.364, NarTop10Accuracy=0.6477, over 1907754.00 frames. +2024-08-06 09:54:24,304 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 09:54:24,752 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.815e+02 1.970e+02 2.165e+02 3.684e+02, threshold=3.939e+02, percent-clipped=0.0 +2024-08-06 09:54:37,619 INFO [trainer.py:765] (1/8) Epoch 14, batch 2500, train_loss[loss=3.495, NarTop10Accuracy=0.6064, over 4965.00 frames. ], tot_loss[loss=3.502, NarTop10Accuracy=0.6161, over 5538.24 frames. ], batch size: 6, lr: 6.03e-03 +2024-08-06 09:54:58,950 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 09:56:03,097 INFO [trainer.py:765] (1/8) Epoch 15, batch 100, train_loss[loss=3.404, NarTop10Accuracy=0.6402, over 7049.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6272, over 2360.32 frames. ], batch size: 31, lr: 5.81e-03 +2024-08-06 09:56:35,980 INFO [trainer.py:765] (1/8) Epoch 15, batch 200, train_loss[loss=3.197, NarTop10Accuracy=0.6824, over 6944.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6276, over 3855.32 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 09:57:07,653 INFO [trainer.py:765] (1/8) Epoch 15, batch 300, train_loss[loss=3.226, NarTop10Accuracy=0.6768, over 7218.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6273, over 4666.13 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 09:57:38,464 INFO [trainer.py:765] (1/8) Epoch 15, batch 400, train_loss[loss=3.645, NarTop10Accuracy=0.5928, over 5089.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6271, over 5111.58 frames. ], batch size: 7, lr: 5.79e-03 +2024-08-06 09:58:12,235 INFO [trainer.py:765] (1/8) Epoch 15, batch 500, train_loss[loss=3.359, NarTop10Accuracy=0.648, over 6094.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6259, over 5386.84 frames. ], batch size: 11, lr: 5.78e-03 +2024-08-06 09:58:47,543 INFO [trainer.py:765] (1/8) Epoch 15, batch 600, train_loss[loss=3.489, NarTop10Accuracy=0.6183, over 5805.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.6236, over 5667.01 frames. ], batch size: 9, lr: 5.77e-03 +2024-08-06 09:59:17,062 INFO [trainer.py:765] (1/8) Epoch 15, batch 700, train_loss[loss=3.169, NarTop10Accuracy=0.6914, over 5167.00 frames. ], tot_loss[loss=3.482, NarTop10Accuracy=0.621, over 5726.29 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 09:59:55,588 INFO [trainer.py:765] (1/8) Epoch 15, batch 800, train_loss[loss=3.718, NarTop10Accuracy=0.5739, over 4981.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6209, over 5804.10 frames. ], batch size: 6, lr: 5.76e-03 +2024-08-06 10:00:32,024 INFO [trainer.py:765] (1/8) Epoch 15, batch 900, train_loss[loss=3.628, NarTop10Accuracy=0.5956, over 6731.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6217, over 5815.22 frames. ], batch size: 14, lr: 5.75e-03 +2024-08-06 10:01:05,538 INFO [trainer.py:765] (1/8) Epoch 15, batch 1000, train_loss[loss=3.274, NarTop10Accuracy=0.6627, over 6593.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.6228, over 5932.81 frames. ], batch size: 14, lr: 5.74e-03 +2024-08-06 10:01:45,154 INFO [trainer.py:765] (1/8) Epoch 15, batch 1100, train_loss[loss=3.284, NarTop10Accuracy=0.65, over 6799.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6186, over 5954.46 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 10:02:18,756 INFO [trainer.py:765] (1/8) Epoch 15, batch 1200, train_loss[loss=3.822, NarTop10Accuracy=0.5506, over 7269.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6201, over 5949.61 frames. ], batch size: 30, lr: 5.73e-03 +2024-08-06 10:02:51,921 INFO [trainer.py:765] (1/8) Epoch 15, batch 1300, train_loss[loss=3.193, NarTop10Accuracy=0.6569, over 4351.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6196, over 6020.30 frames. ], batch size: 5, lr: 5.72e-03 +2024-08-06 10:03:25,435 INFO [trainer.py:765] (1/8) Epoch 15, batch 1400, train_loss[loss=3.598, NarTop10Accuracy=0.5952, over 6186.00 frames. ], tot_loss[loss=3.505, NarTop10Accuracy=0.6162, over 6051.83 frames. ], batch size: 11, lr: 5.71e-03 +2024-08-06 10:03:59,041 INFO [trainer.py:765] (1/8) Epoch 15, batch 1500, train_loss[loss=3.45, NarTop10Accuracy=0.6203, over 6175.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6183, over 5971.58 frames. ], batch size: 49, lr: 5.71e-03 +2024-08-06 10:04:27,106 INFO [trainer.py:765] (1/8) Epoch 15, batch 1600, train_loss[loss=3.795, NarTop10Accuracy=0.5601, over 7394.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.6206, over 5964.28 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 10:04:53,907 INFO [trainer.py:765] (1/8) Epoch 15, batch 1700, train_loss[loss=3.782, NarTop10Accuracy=0.5549, over 6321.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6187, over 5947.46 frames. ], batch size: 13, lr: 5.69e-03 +2024-08-06 10:05:20,729 INFO [trainer.py:765] (1/8) Epoch 15, batch 1800, train_loss[loss=3.792, NarTop10Accuracy=0.559, over 7027.00 frames. ], tot_loss[loss=3.507, NarTop10Accuracy=0.6161, over 6014.90 frames. ], batch size: 22, lr: 5.68e-03 +2024-08-06 10:05:37,266 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 10:05:47,411 INFO [trainer.py:811] (1/8) Epoch 15, validation: loss=3.325, NarTop10Accuracy=0.6551, over 1907754.00 frames. +2024-08-06 10:05:47,412 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 10:05:47,919 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.835e+02 1.986e+02 2.156e+02 4.531e+02, threshold=3.972e+02, percent-clipped=0.1 +2024-08-06 10:05:57,569 INFO [trainer.py:765] (1/8) Epoch 15, batch 1900, train_loss[loss=3.834, NarTop10Accuracy=0.5619, over 6350.00 frames. ], tot_loss[loss=3.515, NarTop10Accuracy=0.6142, over 6048.00 frames. ], batch size: 50, lr: 5.68e-03 +2024-08-06 10:06:23,372 INFO [trainer.py:765] (1/8) Epoch 15, batch 2000, train_loss[loss=3.544, NarTop10Accuracy=0.6188, over 5895.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.6176, over 6011.31 frames. ], batch size: 49, lr: 5.67e-03 +2024-08-06 10:06:48,758 INFO [trainer.py:765] (1/8) Epoch 15, batch 2100, train_loss[loss=3.291, NarTop10Accuracy=0.6667, over 3894.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6165, over 5995.84 frames. ], batch size: 4, lr: 5.66e-03 +2024-08-06 10:07:14,171 INFO [trainer.py:765] (1/8) Epoch 15, batch 2200, train_loss[loss=3.281, NarTop10Accuracy=0.661, over 7284.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6178, over 6042.90 frames. ], batch size: 30, lr: 5.65e-03 +2024-08-06 10:07:39,629 INFO [trainer.py:765] (1/8) Epoch 15, batch 2300, train_loss[loss=3.376, NarTop10Accuracy=0.6329, over 5787.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.615, over 6069.21 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 10:08:04,361 INFO [trainer.py:765] (1/8) Epoch 15, batch 2400, train_loss[loss=3.737, NarTop10Accuracy=0.5682, over 5239.00 frames. ], tot_loss[loss=3.51, NarTop10Accuracy=0.615, over 5882.56 frames. ], batch size: 7, lr: 5.64e-03 +2024-08-06 10:08:27,713 INFO [trainer.py:765] (1/8) Epoch 15, batch 2500, train_loss[loss=3.62, NarTop10Accuracy=0.5948, over 4958.00 frames. ], tot_loss[loss=3.509, NarTop10Accuracy=0.615, over 5522.27 frames. ], batch size: 6, lr: 5.63e-03 +2024-08-06 10:08:49,095 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 10:09:44,183 INFO [trainer.py:765] (1/8) Epoch 16, batch 100, train_loss[loss=3.548, NarTop10Accuracy=0.6103, over 6856.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6274, over 2365.63 frames. ], batch size: 30, lr: 5.44e-03 +2024-08-06 10:10:23,207 INFO [trainer.py:765] (1/8) Epoch 16, batch 200, train_loss[loss=3.391, NarTop10Accuracy=0.6396, over 6771.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6294, over 3854.58 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 10:10:58,841 INFO [trainer.py:765] (1/8) Epoch 16, batch 300, train_loss[loss=3.293, NarTop10Accuracy=0.6652, over 7255.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6287, over 4665.14 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 10:11:29,593 INFO [trainer.py:765] (1/8) Epoch 16, batch 400, train_loss[loss=3.379, NarTop10Accuracy=0.6461, over 5192.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6272, over 5117.03 frames. ], batch size: 7, lr: 5.42e-03 +2024-08-06 10:12:02,297 INFO [trainer.py:765] (1/8) Epoch 16, batch 500, train_loss[loss=3.511, NarTop10Accuracy=0.6241, over 6110.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6275, over 5411.05 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 10:12:42,340 INFO [trainer.py:765] (1/8) Epoch 16, batch 600, train_loss[loss=3.366, NarTop10Accuracy=0.6422, over 5795.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6285, over 5696.32 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 10:13:13,948 INFO [trainer.py:765] (1/8) Epoch 16, batch 700, train_loss[loss=3.135, NarTop10Accuracy=0.6862, over 4996.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6259, over 5754.62 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:13:46,283 INFO [trainer.py:765] (1/8) Epoch 16, batch 800, train_loss[loss=3.869, NarTop10Accuracy=0.5505, over 5279.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6268, over 5808.71 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:14:23,293 INFO [trainer.py:765] (1/8) Epoch 16, batch 900, train_loss[loss=3.292, NarTop10Accuracy=0.6439, over 6234.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6286, over 5822.89 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 10:15:00,057 INFO [trainer.py:765] (1/8) Epoch 16, batch 1000, train_loss[loss=3.591, NarTop10Accuracy=0.6019, over 6387.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.6242, over 5919.20 frames. ], batch size: 13, lr: 5.38e-03 +2024-08-06 10:15:30,507 INFO [trainer.py:765] (1/8) Epoch 16, batch 1100, train_loss[loss=3.328, NarTop10Accuracy=0.6468, over 7029.00 frames. ], tot_loss[loss=3.48, NarTop10Accuracy=0.6213, over 5933.42 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 10:16:11,382 INFO [trainer.py:765] (1/8) Epoch 16, batch 1200, train_loss[loss=3.558, NarTop10Accuracy=0.6168, over 6906.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6214, over 5933.14 frames. ], batch size: 30, lr: 5.37e-03 +2024-08-06 10:16:39,395 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 10:16:49,676 INFO [trainer.py:811] (1/8) Epoch 16, validation: loss=3.375, NarTop10Accuracy=0.6455, over 1907754.00 frames. +2024-08-06 10:16:49,676 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 10:16:52,482 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 1.814e+02 1.975e+02 2.151e+02 4.776e+02, threshold=3.950e+02, percent-clipped=0.2 +2024-08-06 10:16:58,042 INFO [trainer.py:765] (1/8) Epoch 16, batch 1300, train_loss[loss=3.879, NarTop10Accuracy=0.541, over 5176.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6219, over 6002.22 frames. ], batch size: 6, lr: 5.36e-03 +2024-08-06 10:17:29,376 INFO [trainer.py:765] (1/8) Epoch 16, batch 1400, train_loss[loss=3.312, NarTop10Accuracy=0.6487, over 6154.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6227, over 6011.92 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 10:18:02,354 INFO [trainer.py:765] (1/8) Epoch 16, batch 1500, train_loss[loss=3.508, NarTop10Accuracy=0.6186, over 6364.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6226, over 5971.58 frames. ], batch size: 49, lr: 5.35e-03 +2024-08-06 10:18:30,469 INFO [trainer.py:765] (1/8) Epoch 16, batch 1600, train_loss[loss=3.537, NarTop10Accuracy=0.6006, over 6965.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.62, over 5959.83 frames. ], batch size: 22, lr: 5.34e-03 +2024-08-06 10:18:57,272 INFO [trainer.py:765] (1/8) Epoch 16, batch 1700, train_loss[loss=3.697, NarTop10Accuracy=0.5839, over 6264.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6227, over 5948.56 frames. ], batch size: 13, lr: 5.34e-03 +2024-08-06 10:19:23,978 INFO [trainer.py:765] (1/8) Epoch 16, batch 1800, train_loss[loss=3.766, NarTop10Accuracy=0.5714, over 7109.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6207, over 6012.80 frames. ], batch size: 22, lr: 5.33e-03 +2024-08-06 10:19:50,772 INFO [trainer.py:765] (1/8) Epoch 16, batch 1900, train_loss[loss=3.779, NarTop10Accuracy=0.5682, over 6048.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6215, over 6045.10 frames. ], batch size: 49, lr: 5.32e-03 +2024-08-06 10:20:16,602 INFO [trainer.py:765] (1/8) Epoch 16, batch 2000, train_loss[loss=3.55, NarTop10Accuracy=0.6061, over 5340.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6201, over 6008.99 frames. ], batch size: 48, lr: 5.32e-03 +2024-08-06 10:20:42,160 INFO [trainer.py:765] (1/8) Epoch 16, batch 2100, train_loss[loss=3.806, NarTop10Accuracy=0.5579, over 3958.00 frames. ], tot_loss[loss=3.496, NarTop10Accuracy=0.6183, over 5993.76 frames. ], batch size: 4, lr: 5.31e-03 +2024-08-06 10:21:07,651 INFO [trainer.py:765] (1/8) Epoch 16, batch 2200, train_loss[loss=3.289, NarTop10Accuracy=0.6532, over 7207.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6193, over 6032.20 frames. ], batch size: 30, lr: 5.30e-03 +2024-08-06 10:21:36,082 INFO [trainer.py:765] (1/8) Epoch 16, batch 2300, train_loss[loss=3.692, NarTop10Accuracy=0.5732, over 5769.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.617, over 6056.37 frames. ], batch size: 9, lr: 5.30e-03 +2024-08-06 10:22:00,907 INFO [trainer.py:765] (1/8) Epoch 16, batch 2400, train_loss[loss=3.211, NarTop10Accuracy=0.6788, over 5077.00 frames. ], tot_loss[loss=3.499, NarTop10Accuracy=0.6177, over 5862.50 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 10:22:24,290 INFO [trainer.py:765] (1/8) Epoch 16, batch 2500, train_loss[loss=3.412, NarTop10Accuracy=0.6333, over 5070.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6236, over 5523.19 frames. ], batch size: 6, lr: 5.28e-03 +2024-08-06 10:22:45,790 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 10:23:45,727 INFO [trainer.py:765] (1/8) Epoch 17, batch 100, train_loss[loss=3.515, NarTop10Accuracy=0.6085, over 7110.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6319, over 2379.03 frames. ], batch size: 30, lr: 5.12e-03 +2024-08-06 10:24:19,033 INFO [trainer.py:765] (1/8) Epoch 17, batch 200, train_loss[loss=3.296, NarTop10Accuracy=0.6637, over 6907.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6341, over 3877.26 frames. ], batch size: 17, lr: 5.11e-03 +2024-08-06 10:24:53,441 INFO [trainer.py:765] (1/8) Epoch 17, batch 300, train_loss[loss=3.542, NarTop10Accuracy=0.6062, over 6976.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6324, over 4676.53 frames. ], batch size: 22, lr: 5.10e-03 +2024-08-06 10:25:28,013 INFO [trainer.py:765] (1/8) Epoch 17, batch 400, train_loss[loss=3.47, NarTop10Accuracy=0.6229, over 5196.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6313, over 5107.71 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 10:25:58,606 INFO [trainer.py:765] (1/8) Epoch 17, batch 500, train_loss[loss=3.393, NarTop10Accuracy=0.6296, over 6133.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6319, over 5386.09 frames. ], batch size: 11, lr: 5.09e-03 +2024-08-06 10:26:29,756 INFO [trainer.py:765] (1/8) Epoch 17, batch 600, train_loss[loss=3.59, NarTop10Accuracy=0.6097, over 5786.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6318, over 5653.92 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 10:27:07,498 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 10:27:17,547 INFO [trainer.py:811] (1/8) Epoch 17, validation: loss=3.327, NarTop10Accuracy=0.6554, over 1907754.00 frames. +2024-08-06 10:27:17,548 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 10:27:18,066 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 1.825e+02 1.985e+02 2.150e+02 4.169e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 10:27:18,071 INFO [trainer.py:765] (1/8) Epoch 17, batch 700, train_loss[loss=3.451, NarTop10Accuracy=0.6289, over 4985.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.627, over 5733.05 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 10:27:49,841 INFO [trainer.py:765] (1/8) Epoch 17, batch 800, train_loss[loss=3.427, NarTop10Accuracy=0.6429, over 5260.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6283, over 5774.77 frames. ], batch size: 6, lr: 5.07e-03 +2024-08-06 10:28:24,838 INFO [trainer.py:765] (1/8) Epoch 17, batch 900, train_loss[loss=3.295, NarTop10Accuracy=0.6632, over 6214.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.627, over 5812.01 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 10:28:59,683 INFO [trainer.py:765] (1/8) Epoch 17, batch 1000, train_loss[loss=3.36, NarTop10Accuracy=0.6571, over 6617.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6272, over 5922.64 frames. ], batch size: 14, lr: 5.06e-03 +2024-08-06 10:29:36,659 INFO [trainer.py:765] (1/8) Epoch 17, batch 1100, train_loss[loss=3.414, NarTop10Accuracy=0.637, over 6934.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.625, over 5967.25 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 10:30:08,241 INFO [trainer.py:765] (1/8) Epoch 17, batch 1200, train_loss[loss=3.57, NarTop10Accuracy=0.6065, over 7246.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6248, over 5962.68 frames. ], batch size: 30, lr: 5.05e-03 +2024-08-06 10:30:47,102 INFO [trainer.py:765] (1/8) Epoch 17, batch 1300, train_loss[loss=3.116, NarTop10Accuracy=0.6787, over 5042.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6243, over 6032.45 frames. ], batch size: 6, lr: 5.04e-03 +2024-08-06 10:31:20,893 INFO [trainer.py:765] (1/8) Epoch 17, batch 1400, train_loss[loss=3.388, NarTop10Accuracy=0.6285, over 6066.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.6231, over 6046.59 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 10:31:51,401 INFO [trainer.py:765] (1/8) Epoch 17, batch 1500, train_loss[loss=3.426, NarTop10Accuracy=0.6354, over 5838.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6254, over 5960.74 frames. ], batch size: 49, lr: 5.03e-03 +2024-08-06 10:32:19,400 INFO [trainer.py:765] (1/8) Epoch 17, batch 1600, train_loss[loss=3.482, NarTop10Accuracy=0.6153, over 7054.00 frames. ], tot_loss[loss=3.466, NarTop10Accuracy=0.6242, over 5936.66 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 10:32:50,394 INFO [trainer.py:765] (1/8) Epoch 17, batch 1700, train_loss[loss=3.82, NarTop10Accuracy=0.5485, over 6252.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6203, over 5915.83 frames. ], batch size: 13, lr: 5.02e-03 +2024-08-06 10:33:17,036 INFO [trainer.py:765] (1/8) Epoch 17, batch 1800, train_loss[loss=3.716, NarTop10Accuracy=0.57, over 7041.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6199, over 5984.11 frames. ], batch size: 22, lr: 5.02e-03 +2024-08-06 10:33:43,596 INFO [trainer.py:765] (1/8) Epoch 17, batch 1900, train_loss[loss=3.724, NarTop10Accuracy=0.5606, over 5470.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6196, over 6017.72 frames. ], batch size: 51, lr: 5.01e-03 +2024-08-06 10:34:09,287 INFO [trainer.py:765] (1/8) Epoch 17, batch 2000, train_loss[loss=3.893, NarTop10Accuracy=0.5358, over 6183.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6205, over 6006.14 frames. ], batch size: 52, lr: 5.00e-03 +2024-08-06 10:34:34,801 INFO [trainer.py:765] (1/8) Epoch 17, batch 2100, train_loss[loss=3.209, NarTop10Accuracy=0.6614, over 3942.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6189, over 5985.91 frames. ], batch size: 4, lr: 5.00e-03 +2024-08-06 10:35:00,245 INFO [trainer.py:765] (1/8) Epoch 17, batch 2200, train_loss[loss=3.226, NarTop10Accuracy=0.6668, over 7200.00 frames. ], tot_loss[loss=3.474, NarTop10Accuracy=0.6231, over 6038.85 frames. ], batch size: 31, lr: 4.99e-03 +2024-08-06 10:35:25,733 INFO [trainer.py:765] (1/8) Epoch 17, batch 2300, train_loss[loss=3.377, NarTop10Accuracy=0.6413, over 5791.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6206, over 6068.41 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 10:35:50,526 INFO [trainer.py:765] (1/8) Epoch 17, batch 2400, train_loss[loss=3.735, NarTop10Accuracy=0.5546, over 5302.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6198, over 5881.53 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 10:36:14,105 INFO [trainer.py:765] (1/8) Epoch 17, batch 2500, train_loss[loss=3.8, NarTop10Accuracy=0.5577, over 5105.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6225, over 5537.33 frames. ], batch size: 6, lr: 4.98e-03 +2024-08-06 10:36:35,793 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 10:37:32,051 INFO [trainer.py:765] (1/8) Epoch 18, batch 100, train_loss[loss=3.237, NarTop10Accuracy=0.6708, over 7162.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6353, over 2362.58 frames. ], batch size: 30, lr: 4.83e-03 +2024-08-06 10:37:39,163 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 10:37:49,085 INFO [trainer.py:811] (1/8) Epoch 18, validation: loss=3.339, NarTop10Accuracy=0.6526, over 1907754.00 frames. +2024-08-06 10:37:49,085 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 10:37:49,685 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.841e+02 1.993e+02 2.161e+02 3.871e+02, threshold=3.985e+02, percent-clipped=0.0 +2024-08-06 10:38:18,144 INFO [trainer.py:765] (1/8) Epoch 18, batch 200, train_loss[loss=3.486, NarTop10Accuracy=0.6233, over 6986.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6332, over 3856.60 frames. ], batch size: 17, lr: 4.82e-03 +2024-08-06 10:38:50,198 INFO [trainer.py:765] (1/8) Epoch 18, batch 300, train_loss[loss=3.582, NarTop10Accuracy=0.6083, over 7126.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6338, over 4669.24 frames. ], batch size: 22, lr: 4.81e-03 +2024-08-06 10:39:23,744 INFO [trainer.py:765] (1/8) Epoch 18, batch 400, train_loss[loss=3.543, NarTop10Accuracy=0.6088, over 5206.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6334, over 5125.29 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 10:39:54,103 INFO [trainer.py:765] (1/8) Epoch 18, batch 500, train_loss[loss=3.286, NarTop10Accuracy=0.6529, over 6228.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6346, over 5413.21 frames. ], batch size: 11, lr: 4.80e-03 +2024-08-06 10:40:28,526 INFO [trainer.py:765] (1/8) Epoch 18, batch 600, train_loss[loss=3.407, NarTop10Accuracy=0.6441, over 5772.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6342, over 5676.60 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 10:41:02,143 INFO [trainer.py:765] (1/8) Epoch 18, batch 700, train_loss[loss=3.335, NarTop10Accuracy=0.6526, over 5256.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6308, over 5761.17 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:41:38,519 INFO [trainer.py:765] (1/8) Epoch 18, batch 800, train_loss[loss=3.602, NarTop10Accuracy=0.6104, over 5246.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6307, over 5804.95 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:42:12,611 INFO [trainer.py:765] (1/8) Epoch 18, batch 900, train_loss[loss=3.54, NarTop10Accuracy=0.6112, over 6341.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6301, over 5818.26 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:42:46,702 INFO [trainer.py:765] (1/8) Epoch 18, batch 1000, train_loss[loss=3.354, NarTop10Accuracy=0.6489, over 6203.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6296, over 5921.04 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:43:24,183 INFO [trainer.py:765] (1/8) Epoch 18, batch 1100, train_loss[loss=3.538, NarTop10Accuracy=0.5988, over 6436.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.625, over 5939.10 frames. ], batch size: 16, lr: 4.77e-03 +2024-08-06 10:44:02,363 INFO [trainer.py:765] (1/8) Epoch 18, batch 1200, train_loss[loss=3.45, NarTop10Accuracy=0.632, over 7186.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.626, over 5942.50 frames. ], batch size: 30, lr: 4.77e-03 +2024-08-06 10:44:35,920 INFO [trainer.py:765] (1/8) Epoch 18, batch 1300, train_loss[loss=3.399, NarTop10Accuracy=0.6539, over 5063.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6277, over 6003.77 frames. ], batch size: 6, lr: 4.76e-03 +2024-08-06 10:45:10,238 INFO [trainer.py:765] (1/8) Epoch 18, batch 1400, train_loss[loss=3.42, NarTop10Accuracy=0.6276, over 6241.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6267, over 6033.64 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 10:45:40,976 INFO [trainer.py:765] (1/8) Epoch 18, batch 1500, train_loss[loss=3.816, NarTop10Accuracy=0.5579, over 5828.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6259, over 5966.95 frames. ], batch size: 48, lr: 4.75e-03 +2024-08-06 10:46:09,056 INFO [trainer.py:765] (1/8) Epoch 18, batch 1600, train_loss[loss=3.292, NarTop10Accuracy=0.6622, over 7038.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.6238, over 5962.28 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 10:46:35,859 INFO [trainer.py:765] (1/8) Epoch 18, batch 1700, train_loss[loss=3.737, NarTop10Accuracy=0.561, over 6244.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6253, over 5957.36 frames. ], batch size: 13, lr: 4.74e-03 +2024-08-06 10:47:02,438 INFO [trainer.py:765] (1/8) Epoch 18, batch 1800, train_loss[loss=3.397, NarTop10Accuracy=0.6365, over 7361.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6246, over 6020.01 frames. ], batch size: 22, lr: 4.74e-03 +2024-08-06 10:47:29,093 INFO [trainer.py:765] (1/8) Epoch 18, batch 1900, train_loss[loss=3.73, NarTop10Accuracy=0.5701, over 6250.00 frames. ], tot_loss[loss=3.474, NarTop10Accuracy=0.6228, over 6049.11 frames. ], batch size: 49, lr: 4.73e-03 +2024-08-06 10:47:54,884 INFO [trainer.py:765] (1/8) Epoch 18, batch 2000, train_loss[loss=3.435, NarTop10Accuracy=0.627, over 6031.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6222, over 6010.43 frames. ], batch size: 48, lr: 4.73e-03 +2024-08-06 10:48:20,370 INFO [trainer.py:765] (1/8) Epoch 18, batch 2100, train_loss[loss=3.259, NarTop10Accuracy=0.6642, over 3967.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6228, over 5998.74 frames. ], batch size: 4, lr: 4.72e-03 +2024-08-06 10:48:24,747 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 10:48:35,039 INFO [trainer.py:811] (1/8) Epoch 18, validation: loss=3.307, NarTop10Accuracy=0.6593, over 1907754.00 frames. +2024-08-06 10:48:35,040 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30083MB +2024-08-06 10:48:35,534 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 1.855e+02 2.003e+02 2.193e+02 3.481e+02, threshold=4.005e+02, percent-clipped=0.0 +2024-08-06 10:48:56,096 INFO [trainer.py:765] (1/8) Epoch 18, batch 2200, train_loss[loss=3.382, NarTop10Accuracy=0.6486, over 7136.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6233, over 6027.10 frames. ], batch size: 30, lr: 4.72e-03 +2024-08-06 10:49:21,521 INFO [trainer.py:765] (1/8) Epoch 18, batch 2300, train_loss[loss=3.323, NarTop10Accuracy=0.6545, over 5877.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6233, over 6055.61 frames. ], batch size: 9, lr: 4.71e-03 +2024-08-06 10:49:46,256 INFO [trainer.py:765] (1/8) Epoch 18, batch 2400, train_loss[loss=3.189, NarTop10Accuracy=0.6671, over 5300.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6227, over 5877.44 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 10:50:09,708 INFO [trainer.py:765] (1/8) Epoch 18, batch 2500, train_loss[loss=3.301, NarTop10Accuracy=0.6661, over 5109.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6254, over 5541.51 frames. ], batch size: 6, lr: 4.70e-03 +2024-08-06 10:50:30,782 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 10:51:33,564 INFO [trainer.py:765] (1/8) Epoch 19, batch 100, train_loss[loss=3.337, NarTop10Accuracy=0.6588, over 7440.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6371, over 2353.29 frames. ], batch size: 31, lr: 4.57e-03 +2024-08-06 10:52:06,164 INFO [trainer.py:765] (1/8) Epoch 19, batch 200, train_loss[loss=3.647, NarTop10Accuracy=0.5901, over 6646.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6375, over 3861.58 frames. ], batch size: 17, lr: 4.56e-03 +2024-08-06 10:52:40,031 INFO [trainer.py:765] (1/8) Epoch 19, batch 300, train_loss[loss=3.551, NarTop10Accuracy=0.599, over 7018.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6377, over 4676.36 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 10:53:12,830 INFO [trainer.py:765] (1/8) Epoch 19, batch 400, train_loss[loss=3.013, NarTop10Accuracy=0.7106, over 5222.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6348, over 5125.02 frames. ], batch size: 7, lr: 4.55e-03 +2024-08-06 10:53:45,020 INFO [trainer.py:765] (1/8) Epoch 19, batch 500, train_loss[loss=3.278, NarTop10Accuracy=0.6469, over 6055.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.637, over 5408.91 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 10:54:18,601 INFO [trainer.py:765] (1/8) Epoch 19, batch 600, train_loss[loss=3.122, NarTop10Accuracy=0.6904, over 5797.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.637, over 5688.97 frames. ], batch size: 9, lr: 4.54e-03 +2024-08-06 10:54:54,112 INFO [trainer.py:765] (1/8) Epoch 19, batch 700, train_loss[loss=3.433, NarTop10Accuracy=0.6184, over 5210.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6345, over 5750.36 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 10:55:29,925 INFO [trainer.py:765] (1/8) Epoch 19, batch 800, train_loss[loss=3.539, NarTop10Accuracy=0.6179, over 4294.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6329, over 5799.41 frames. ], batch size: 5, lr: 4.53e-03 +2024-08-06 10:56:02,238 INFO [trainer.py:765] (1/8) Epoch 19, batch 900, train_loss[loss=3.562, NarTop10Accuracy=0.6176, over 6137.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6326, over 5812.35 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 10:56:38,300 INFO [trainer.py:765] (1/8) Epoch 19, batch 1000, train_loss[loss=3.297, NarTop10Accuracy=0.6538, over 6213.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6304, over 5926.42 frames. ], batch size: 13, lr: 4.52e-03 +2024-08-06 10:57:15,187 INFO [trainer.py:765] (1/8) Epoch 19, batch 1100, train_loss[loss=3.268, NarTop10Accuracy=0.6657, over 6916.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6284, over 5954.30 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 10:57:46,665 INFO [trainer.py:765] (1/8) Epoch 19, batch 1200, train_loss[loss=3.346, NarTop10Accuracy=0.6566, over 6997.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6287, over 5934.09 frames. ], batch size: 30, lr: 4.51e-03 +2024-08-06 10:58:23,901 INFO [trainer.py:765] (1/8) Epoch 19, batch 1300, train_loss[loss=3.464, NarTop10Accuracy=0.6305, over 4966.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6288, over 6004.29 frames. ], batch size: 6, lr: 4.51e-03 +2024-08-06 10:58:58,028 INFO [trainer.py:765] (1/8) Epoch 19, batch 1400, train_loss[loss=3.352, NarTop10Accuracy=0.6449, over 6117.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6277, over 6017.98 frames. ], batch size: 11, lr: 4.50e-03 +2024-08-06 10:59:30,770 INFO [trainer.py:765] (1/8) Epoch 19, batch 1500, train_loss[loss=3.691, NarTop10Accuracy=0.5762, over 5718.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6291, over 5977.04 frames. ], batch size: 49, lr: 4.50e-03 +2024-08-06 10:59:40,831 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 10:59:50,899 INFO [trainer.py:811] (1/8) Epoch 19, validation: loss=3.276, NarTop10Accuracy=0.6653, over 1907754.00 frames. +2024-08-06 10:59:50,899 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30472MB +2024-08-06 10:59:51,426 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.829e+02 1.984e+02 2.176e+02 3.542e+02, threshold=3.967e+02, percent-clipped=0.0 +2024-08-06 11:00:08,816 INFO [trainer.py:765] (1/8) Epoch 19, batch 1600, train_loss[loss=3.632, NarTop10Accuracy=0.5814, over 7086.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6287, over 5953.21 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:00:35,588 INFO [trainer.py:765] (1/8) Epoch 19, batch 1700, train_loss[loss=3.645, NarTop10Accuracy=0.5867, over 6256.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.625, over 5949.83 frames. ], batch size: 13, lr: 4.49e-03 +2024-08-06 11:01:02,257 INFO [trainer.py:765] (1/8) Epoch 19, batch 1800, train_loss[loss=3.391, NarTop10Accuracy=0.6437, over 7107.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.627, over 6009.03 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:01:28,930 INFO [trainer.py:765] (1/8) Epoch 19, batch 1900, train_loss[loss=3.587, NarTop10Accuracy=0.5935, over 5837.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6224, over 6058.36 frames. ], batch size: 50, lr: 4.48e-03 +2024-08-06 11:01:54,633 INFO [trainer.py:765] (1/8) Epoch 19, batch 2000, train_loss[loss=3.559, NarTop10Accuracy=0.6018, over 5758.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6249, over 6037.36 frames. ], batch size: 48, lr: 4.48e-03 +2024-08-06 11:02:20,186 INFO [trainer.py:765] (1/8) Epoch 19, batch 2100, train_loss[loss=3.333, NarTop10Accuracy=0.6576, over 4031.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6247, over 6010.37 frames. ], batch size: 4, lr: 4.47e-03 +2024-08-06 11:02:45,695 INFO [trainer.py:765] (1/8) Epoch 19, batch 2200, train_loss[loss=3.492, NarTop10Accuracy=0.6159, over 7127.00 frames. ], tot_loss[loss=3.467, NarTop10Accuracy=0.6241, over 6033.86 frames. ], batch size: 31, lr: 4.47e-03 +2024-08-06 11:03:11,131 INFO [trainer.py:765] (1/8) Epoch 19, batch 2300, train_loss[loss=3.175, NarTop10Accuracy=0.6875, over 5878.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6245, over 6056.55 frames. ], batch size: 9, lr: 4.46e-03 +2024-08-06 11:03:35,951 INFO [trainer.py:765] (1/8) Epoch 19, batch 2400, train_loss[loss=3.664, NarTop10Accuracy=0.5882, over 6723.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6224, over 5878.19 frames. ], batch size: 49, lr: 4.46e-03 +2024-08-06 11:03:59,406 INFO [trainer.py:765] (1/8) Epoch 19, batch 2500, train_loss[loss=3.651, NarTop10Accuracy=0.5893, over 4915.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6292, over 5535.31 frames. ], batch size: 6, lr: 4.45e-03 +2024-08-06 11:04:23,889 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 11:05:26,561 INFO [trainer.py:765] (1/8) Epoch 20, batch 100, train_loss[loss=3.402, NarTop10Accuracy=0.6343, over 6996.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6412, over 2361.34 frames. ], batch size: 30, lr: 4.33e-03 +2024-08-06 11:05:57,409 INFO [trainer.py:765] (1/8) Epoch 20, batch 200, train_loss[loss=3.182, NarTop10Accuracy=0.6765, over 6873.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.643, over 3868.54 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 11:06:30,634 INFO [trainer.py:765] (1/8) Epoch 20, batch 300, train_loss[loss=3.252, NarTop10Accuracy=0.672, over 6973.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6402, over 4656.76 frames. ], batch size: 22, lr: 4.32e-03 +2024-08-06 11:07:06,396 INFO [trainer.py:765] (1/8) Epoch 20, batch 400, train_loss[loss=3.135, NarTop10Accuracy=0.6985, over 5197.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6407, over 5115.69 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 11:07:38,166 INFO [trainer.py:765] (1/8) Epoch 20, batch 500, train_loss[loss=3.353, NarTop10Accuracy=0.6451, over 6127.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6382, over 5398.09 frames. ], batch size: 11, lr: 4.31e-03 +2024-08-06 11:08:11,568 INFO [trainer.py:765] (1/8) Epoch 20, batch 600, train_loss[loss=3.258, NarTop10Accuracy=0.6668, over 5767.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6384, over 5670.13 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 11:08:46,274 INFO [trainer.py:765] (1/8) Epoch 20, batch 700, train_loss[loss=3.427, NarTop10Accuracy=0.6432, over 5092.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6345, over 5731.27 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 11:09:23,425 INFO [trainer.py:765] (1/8) Epoch 20, batch 800, train_loss[loss=3.318, NarTop10Accuracy=0.6545, over 5092.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6324, over 5806.26 frames. ], batch size: 6, lr: 4.30e-03 +2024-08-06 11:09:53,513 INFO [trainer.py:765] (1/8) Epoch 20, batch 900, train_loss[loss=3.448, NarTop10Accuracy=0.641, over 6179.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6326, over 5825.85 frames. ], batch size: 13, lr: 4.30e-03 +2024-08-06 11:10:12,199 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 11:10:23,738 INFO [trainer.py:811] (1/8) Epoch 20, validation: loss=3.279, NarTop10Accuracy=0.6658, over 1907754.00 frames. +2024-08-06 11:10:23,739 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30472MB +2024-08-06 11:10:24,298 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.847e+02 2.007e+02 2.180e+02 4.417e+02, threshold=4.013e+02, percent-clipped=0.1 +2024-08-06 11:10:42,964 INFO [trainer.py:765] (1/8) Epoch 20, batch 1000, train_loss[loss=3.238, NarTop10Accuracy=0.6643, over 6211.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6336, over 5919.13 frames. ], batch size: 13, lr: 4.29e-03 +2024-08-06 11:11:21,021 INFO [trainer.py:765] (1/8) Epoch 20, batch 1100, train_loss[loss=3.397, NarTop10Accuracy=0.6422, over 6841.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6318, over 5961.94 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 11:11:55,393 INFO [trainer.py:765] (1/8) Epoch 20, batch 1200, train_loss[loss=3.429, NarTop10Accuracy=0.6278, over 7460.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6295, over 5963.17 frames. ], batch size: 31, lr: 4.28e-03 +2024-08-06 11:12:30,751 INFO [trainer.py:765] (1/8) Epoch 20, batch 1300, train_loss[loss=3.906, NarTop10Accuracy=0.5406, over 5140.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6326, over 6034.74 frames. ], batch size: 6, lr: 4.28e-03 +2024-08-06 11:13:10,291 INFO [trainer.py:765] (1/8) Epoch 20, batch 1400, train_loss[loss=3.381, NarTop10Accuracy=0.6262, over 6158.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6308, over 6063.59 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 11:13:38,988 INFO [trainer.py:765] (1/8) Epoch 20, batch 1500, train_loss[loss=3.555, NarTop10Accuracy=0.6135, over 6184.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6309, over 6000.87 frames. ], batch size: 49, lr: 4.27e-03 +2024-08-06 11:14:07,051 INFO [trainer.py:765] (1/8) Epoch 20, batch 1600, train_loss[loss=3.446, NarTop10Accuracy=0.616, over 7215.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6293, over 5963.72 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 11:14:33,909 INFO [trainer.py:765] (1/8) Epoch 20, batch 1700, train_loss[loss=3.586, NarTop10Accuracy=0.6045, over 6582.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6303, over 5933.87 frames. ], batch size: 14, lr: 4.26e-03 +2024-08-06 11:15:00,589 INFO [trainer.py:765] (1/8) Epoch 20, batch 1800, train_loss[loss=3.196, NarTop10Accuracy=0.6712, over 7195.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6284, over 5996.30 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 11:15:27,276 INFO [trainer.py:765] (1/8) Epoch 20, batch 1900, train_loss[loss=3.456, NarTop10Accuracy=0.6334, over 6379.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6261, over 6036.52 frames. ], batch size: 49, lr: 4.26e-03 +2024-08-06 11:15:56,437 INFO [trainer.py:765] (1/8) Epoch 20, batch 2000, train_loss[loss=3.568, NarTop10Accuracy=0.6081, over 6796.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6258, over 6027.32 frames. ], batch size: 49, lr: 4.25e-03 +2024-08-06 11:16:21,960 INFO [trainer.py:765] (1/8) Epoch 20, batch 2100, train_loss[loss=3.402, NarTop10Accuracy=0.6304, over 3998.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6255, over 5997.31 frames. ], batch size: 4, lr: 4.25e-03 +2024-08-06 11:16:47,405 INFO [trainer.py:765] (1/8) Epoch 20, batch 2200, train_loss[loss=3.441, NarTop10Accuracy=0.6316, over 7340.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6259, over 6047.74 frames. ], batch size: 33, lr: 4.24e-03 +2024-08-06 11:17:12,907 INFO [trainer.py:765] (1/8) Epoch 20, batch 2300, train_loss[loss=3.698, NarTop10Accuracy=0.5714, over 5813.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6249, over 6067.35 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 11:17:37,714 INFO [trainer.py:765] (1/8) Epoch 20, batch 2400, train_loss[loss=3.217, NarTop10Accuracy=0.6664, over 5121.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6254, over 5883.61 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 11:18:01,247 INFO [trainer.py:765] (1/8) Epoch 20, batch 2500, train_loss[loss=3.831, NarTop10Accuracy=0.5435, over 5006.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.63, over 5558.47 frames. ], batch size: 6, lr: 4.23e-03 +2024-08-06 11:18:22,207 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 11:19:21,459 INFO [trainer.py:765] (1/8) Epoch 21, batch 100, train_loss[loss=3.249, NarTop10Accuracy=0.6837, over 7330.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6392, over 2370.29 frames. ], batch size: 30, lr: 4.12e-03 +2024-08-06 11:19:56,522 INFO [trainer.py:765] (1/8) Epoch 21, batch 200, train_loss[loss=3.537, NarTop10Accuracy=0.6134, over 6826.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6393, over 3867.20 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 11:20:26,597 INFO [trainer.py:765] (1/8) Epoch 21, batch 300, train_loss[loss=3.561, NarTop10Accuracy=0.6121, over 7224.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6433, over 4670.17 frames. ], batch size: 22, lr: 4.11e-03 +2024-08-06 11:20:54,240 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 11:21:04,970 INFO [trainer.py:811] (1/8) Epoch 21, validation: loss=3.291, NarTop10Accuracy=0.6625, over 1907754.00 frames. +2024-08-06 11:21:04,970 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30472MB +2024-08-06 11:21:05,486 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 1.858e+02 2.007e+02 2.193e+02 3.729e+02, threshold=4.015e+02, percent-clipped=0.0 +2024-08-06 11:21:12,221 INFO [trainer.py:765] (1/8) Epoch 21, batch 400, train_loss[loss=3.783, NarTop10Accuracy=0.5666, over 5103.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6406, over 5135.01 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 11:21:47,569 INFO [trainer.py:765] (1/8) Epoch 21, batch 500, train_loss[loss=3.216, NarTop10Accuracy=0.6753, over 6179.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6423, over 5413.78 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 11:22:18,237 INFO [trainer.py:765] (1/8) Epoch 21, batch 600, train_loss[loss=3.528, NarTop10Accuracy=0.611, over 5768.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6397, over 5670.72 frames. ], batch size: 9, lr: 4.10e-03 +2024-08-06 11:22:56,842 INFO [trainer.py:765] (1/8) Epoch 21, batch 700, train_loss[loss=3.33, NarTop10Accuracy=0.6516, over 5078.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6367, over 5745.60 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 11:23:33,075 INFO [trainer.py:765] (1/8) Epoch 21, batch 800, train_loss[loss=3.232, NarTop10Accuracy=0.6647, over 5221.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6364, over 5807.84 frames. ], batch size: 6, lr: 4.09e-03 +2024-08-06 11:24:03,021 INFO [trainer.py:765] (1/8) Epoch 21, batch 900, train_loss[loss=3.623, NarTop10Accuracy=0.5858, over 6295.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6357, over 5827.06 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 11:24:37,089 INFO [trainer.py:765] (1/8) Epoch 21, batch 1000, train_loss[loss=3.411, NarTop10Accuracy=0.6355, over 6774.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6333, over 5932.00 frames. ], batch size: 14, lr: 4.09e-03 +2024-08-06 11:25:16,427 INFO [trainer.py:765] (1/8) Epoch 21, batch 1100, train_loss[loss=3.494, NarTop10Accuracy=0.6166, over 6897.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6318, over 5969.49 frames. ], batch size: 17, lr: 4.08e-03 +2024-08-06 11:25:47,739 INFO [trainer.py:765] (1/8) Epoch 21, batch 1200, train_loss[loss=3.395, NarTop10Accuracy=0.6369, over 7292.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.635, over 5953.63 frames. ], batch size: 30, lr: 4.08e-03 +2024-08-06 11:26:23,056 INFO [trainer.py:765] (1/8) Epoch 21, batch 1300, train_loss[loss=3.482, NarTop10Accuracy=0.6206, over 4998.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6358, over 6033.40 frames. ], batch size: 6, lr: 4.07e-03 +2024-08-06 11:27:00,081 INFO [trainer.py:765] (1/8) Epoch 21, batch 1400, train_loss[loss=3.324, NarTop10Accuracy=0.6605, over 6053.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6327, over 6034.94 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 11:27:35,326 INFO [trainer.py:765] (1/8) Epoch 21, batch 1500, train_loss[loss=3.718, NarTop10Accuracy=0.5747, over 6220.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6317, over 5975.49 frames. ], batch size: 49, lr: 4.07e-03 +2024-08-06 11:28:03,315 INFO [trainer.py:765] (1/8) Epoch 21, batch 1600, train_loss[loss=3.358, NarTop10Accuracy=0.6457, over 7105.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6318, over 5953.66 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 11:28:30,105 INFO [trainer.py:765] (1/8) Epoch 21, batch 1700, train_loss[loss=3.456, NarTop10Accuracy=0.6325, over 6822.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6307, over 5924.98 frames. ], batch size: 14, lr: 4.06e-03 +2024-08-06 11:28:56,641 INFO [trainer.py:765] (1/8) Epoch 21, batch 1800, train_loss[loss=3.574, NarTop10Accuracy=0.5965, over 6963.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.63, over 6002.41 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 11:29:23,198 INFO [trainer.py:765] (1/8) Epoch 21, batch 1900, train_loss[loss=3.522, NarTop10Accuracy=0.6126, over 6517.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6277, over 6052.94 frames. ], batch size: 49, lr: 4.05e-03 +2024-08-06 11:29:49,029 INFO [trainer.py:765] (1/8) Epoch 21, batch 2000, train_loss[loss=3.458, NarTop10Accuracy=0.6322, over 6307.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6291, over 6025.12 frames. ], batch size: 50, lr: 4.05e-03 +2024-08-06 11:30:14,528 INFO [trainer.py:765] (1/8) Epoch 21, batch 2100, train_loss[loss=3.211, NarTop10Accuracy=0.6747, over 3857.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6289, over 6004.18 frames. ], batch size: 4, lr: 4.04e-03 +2024-08-06 11:30:39,870 INFO [trainer.py:765] (1/8) Epoch 21, batch 2200, train_loss[loss=3.548, NarTop10Accuracy=0.6088, over 7465.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6277, over 6031.48 frames. ], batch size: 31, lr: 4.04e-03 +2024-08-06 11:31:05,472 INFO [trainer.py:765] (1/8) Epoch 21, batch 2300, train_loss[loss=3.468, NarTop10Accuracy=0.6227, over 5773.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6267, over 6054.90 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 11:31:23,874 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 11:31:34,439 INFO [trainer.py:811] (1/8) Epoch 21, validation: loss=3.272, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 11:31:34,439 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30472MB +2024-08-06 11:31:34,937 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 1.892e+02 2.038e+02 2.210e+02 4.910e+02, threshold=4.076e+02, percent-clipped=0.1 +2024-08-06 11:31:40,751 INFO [trainer.py:765] (1/8) Epoch 21, batch 2400, train_loss[loss=3.349, NarTop10Accuracy=0.655, over 5243.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6269, over 5878.80 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 11:32:04,055 INFO [trainer.py:765] (1/8) Epoch 21, batch 2500, train_loss[loss=3.148, NarTop10Accuracy=0.6935, over 5053.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6335, over 5527.38 frames. ], batch size: 6, lr: 4.03e-03 +2024-08-06 11:32:25,462 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 11:33:29,682 INFO [trainer.py:765] (1/8) Epoch 22, batch 100, train_loss[loss=3.566, NarTop10Accuracy=0.6113, over 7093.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6462, over 2362.74 frames. ], batch size: 30, lr: 3.93e-03 +2024-08-06 11:34:05,036 INFO [trainer.py:765] (1/8) Epoch 22, batch 200, train_loss[loss=3.27, NarTop10Accuracy=0.6639, over 7019.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6448, over 3865.59 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 11:34:37,619 INFO [trainer.py:765] (1/8) Epoch 22, batch 300, train_loss[loss=3.253, NarTop10Accuracy=0.6711, over 6907.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6456, over 4661.57 frames. ], batch size: 22, lr: 3.92e-03 +2024-08-06 11:35:09,969 INFO [trainer.py:765] (1/8) Epoch 22, batch 400, train_loss[loss=3.276, NarTop10Accuracy=0.6566, over 5291.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6418, over 5120.77 frames. ], batch size: 7, lr: 3.92e-03 +2024-08-06 11:35:42,508 INFO [trainer.py:765] (1/8) Epoch 22, batch 500, train_loss[loss=3.444, NarTop10Accuracy=0.6324, over 6210.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6409, over 5405.26 frames. ], batch size: 11, lr: 3.91e-03 +2024-08-06 11:36:16,059 INFO [trainer.py:765] (1/8) Epoch 22, batch 600, train_loss[loss=3.271, NarTop10Accuracy=0.6719, over 5698.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6409, over 5681.56 frames. ], batch size: 9, lr: 3.91e-03 +2024-08-06 11:36:53,858 INFO [trainer.py:765] (1/8) Epoch 22, batch 700, train_loss[loss=3.283, NarTop10Accuracy=0.6577, over 5051.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6394, over 5763.01 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 11:37:28,480 INFO [trainer.py:765] (1/8) Epoch 22, batch 800, train_loss[loss=3.149, NarTop10Accuracy=0.6889, over 5252.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6375, over 5831.08 frames. ], batch size: 6, lr: 3.90e-03 +2024-08-06 11:38:03,950 INFO [trainer.py:765] (1/8) Epoch 22, batch 900, train_loss[loss=3.254, NarTop10Accuracy=0.6709, over 6150.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6384, over 5845.46 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 11:38:38,329 INFO [trainer.py:765] (1/8) Epoch 22, batch 1000, train_loss[loss=3.22, NarTop10Accuracy=0.6762, over 6424.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6375, over 5934.81 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 11:39:14,789 INFO [trainer.py:765] (1/8) Epoch 22, batch 1100, train_loss[loss=3.531, NarTop10Accuracy=0.6139, over 6785.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6366, over 5958.45 frames. ], batch size: 17, lr: 3.89e-03 +2024-08-06 11:39:48,523 INFO [trainer.py:765] (1/8) Epoch 22, batch 1200, train_loss[loss=3.384, NarTop10Accuracy=0.6428, over 7369.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.637, over 5949.78 frames. ], batch size: 31, lr: 3.89e-03 +2024-08-06 11:40:25,246 INFO [trainer.py:765] (1/8) Epoch 22, batch 1300, train_loss[loss=3.454, NarTop10Accuracy=0.6256, over 5017.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6359, over 6026.76 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 11:41:00,609 INFO [trainer.py:765] (1/8) Epoch 22, batch 1400, train_loss[loss=3.657, NarTop10Accuracy=0.593, over 6063.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6358, over 6052.39 frames. ], batch size: 11, lr: 3.88e-03 +2024-08-06 11:41:31,584 INFO [trainer.py:765] (1/8) Epoch 22, batch 1500, train_loss[loss=3.599, NarTop10Accuracy=0.5979, over 6009.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.634, over 5985.80 frames. ], batch size: 48, lr: 3.88e-03 +2024-08-06 11:41:59,677 INFO [trainer.py:765] (1/8) Epoch 22, batch 1600, train_loss[loss=3.377, NarTop10Accuracy=0.6544, over 7158.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6313, over 5957.28 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 11:42:26,463 INFO [trainer.py:765] (1/8) Epoch 22, batch 1700, train_loss[loss=3.535, NarTop10Accuracy=0.6186, over 6271.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6303, over 5946.76 frames. ], batch size: 13, lr: 3.87e-03 +2024-08-06 11:42:50,723 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 11:43:00,818 INFO [trainer.py:811] (1/8) Epoch 22, validation: loss=3.305, NarTop10Accuracy=0.6597, over 1907754.00 frames. +2024-08-06 11:43:00,819 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30472MB +2024-08-06 11:43:01,327 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.500e+02 1.900e+02 2.042e+02 2.234e+02 3.494e+02, threshold=4.085e+02, percent-clipped=0.0 +2024-08-06 11:43:03,219 INFO [trainer.py:765] (1/8) Epoch 22, batch 1800, train_loss[loss=3.419, NarTop10Accuracy=0.6381, over 7085.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6313, over 6003.92 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 11:43:29,752 INFO [trainer.py:765] (1/8) Epoch 22, batch 1900, train_loss[loss=3.712, NarTop10Accuracy=0.5697, over 6063.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6312, over 6039.88 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 11:43:55,485 INFO [trainer.py:765] (1/8) Epoch 22, batch 2000, train_loss[loss=3.762, NarTop10Accuracy=0.5607, over 5734.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6311, over 6015.74 frames. ], batch size: 51, lr: 3.86e-03 +2024-08-06 11:44:20,932 INFO [trainer.py:765] (1/8) Epoch 22, batch 2100, train_loss[loss=3.189, NarTop10Accuracy=0.6845, over 4817.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.631, over 5988.07 frames. ], batch size: 5, lr: 3.86e-03 +2024-08-06 11:44:46,456 INFO [trainer.py:765] (1/8) Epoch 22, batch 2200, train_loss[loss=3.807, NarTop10Accuracy=0.5577, over 7113.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6314, over 6039.79 frames. ], batch size: 30, lr: 3.86e-03 +2024-08-06 11:45:11,882 INFO [trainer.py:765] (1/8) Epoch 22, batch 2300, train_loss[loss=3.296, NarTop10Accuracy=0.6568, over 5752.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6294, over 6053.62 frames. ], batch size: 9, lr: 3.85e-03 +2024-08-06 11:45:36,583 INFO [trainer.py:765] (1/8) Epoch 22, batch 2400, train_loss[loss=3.66, NarTop10Accuracy=0.5804, over 5537.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.627, over 5872.42 frames. ], batch size: 48, lr: 3.85e-03 +2024-08-06 11:46:00,081 INFO [trainer.py:765] (1/8) Epoch 22, batch 2500, train_loss[loss=3.262, NarTop10Accuracy=0.6472, over 5069.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6331, over 5538.06 frames. ], batch size: 6, lr: 3.85e-03 +2024-08-06 11:46:21,605 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 11:47:20,476 INFO [trainer.py:765] (1/8) Epoch 23, batch 100, train_loss[loss=3.27, NarTop10Accuracy=0.6723, over 7310.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.647, over 2368.66 frames. ], batch size: 31, lr: 3.75e-03 +2024-08-06 11:47:52,035 INFO [trainer.py:765] (1/8) Epoch 23, batch 200, train_loss[loss=3.452, NarTop10Accuracy=0.6302, over 6889.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6474, over 3854.34 frames. ], batch size: 17, lr: 3.75e-03 +2024-08-06 11:48:33,921 INFO [trainer.py:765] (1/8) Epoch 23, batch 300, train_loss[loss=3.441, NarTop10Accuracy=0.6297, over 7108.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.644, over 4659.21 frames. ], batch size: 22, lr: 3.75e-03 +2024-08-06 11:49:06,655 INFO [trainer.py:765] (1/8) Epoch 23, batch 400, train_loss[loss=3.263, NarTop10Accuracy=0.6775, over 5203.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6421, over 5109.34 frames. ], batch size: 7, lr: 3.74e-03 +2024-08-06 11:49:37,618 INFO [trainer.py:765] (1/8) Epoch 23, batch 500, train_loss[loss=3.462, NarTop10Accuracy=0.6395, over 6198.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6418, over 5383.77 frames. ], batch size: 11, lr: 3.74e-03 +2024-08-06 11:50:06,740 INFO [trainer.py:765] (1/8) Epoch 23, batch 600, train_loss[loss=3.496, NarTop10Accuracy=0.6142, over 5776.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6419, over 5650.96 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 11:50:47,600 INFO [trainer.py:765] (1/8) Epoch 23, batch 700, train_loss[loss=3.328, NarTop10Accuracy=0.656, over 5030.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6395, over 5738.24 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:21,344 INFO [trainer.py:765] (1/8) Epoch 23, batch 800, train_loss[loss=3.401, NarTop10Accuracy=0.647, over 5084.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6409, over 5796.86 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:52,397 INFO [trainer.py:765] (1/8) Epoch 23, batch 900, train_loss[loss=3.261, NarTop10Accuracy=0.6633, over 6212.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6422, over 5817.48 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 11:52:33,918 INFO [trainer.py:765] (1/8) Epoch 23, batch 1000, train_loss[loss=3.266, NarTop10Accuracy=0.6641, over 6690.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6396, over 5919.19 frames. ], batch size: 14, lr: 3.73e-03 +2024-08-06 11:53:08,607 INFO [trainer.py:765] (1/8) Epoch 23, batch 1100, train_loss[loss=3.444, NarTop10Accuracy=0.6165, over 6990.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6362, over 5947.87 frames. ], batch size: 17, lr: 3.72e-03 +2024-08-06 11:53:40,339 INFO [trainer.py:765] (1/8) Epoch 23, batch 1200, train_loss[loss=3.366, NarTop10Accuracy=0.6406, over 7323.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6355, over 5943.55 frames. ], batch size: 30, lr: 3.72e-03 +2024-08-06 11:53:42,824 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 11:53:53,935 INFO [trainer.py:811] (1/8) Epoch 23, validation: loss=3.236, NarTop10Accuracy=0.6739, over 1907754.00 frames. +2024-08-06 11:53:53,935 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30472MB +2024-08-06 11:53:54,457 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.901e+02 2.047e+02 2.234e+02 4.368e+02, threshold=4.093e+02, percent-clipped=0.1 +2024-08-06 11:54:30,447 INFO [trainer.py:765] (1/8) Epoch 23, batch 1300, train_loss[loss=3.338, NarTop10Accuracy=0.6556, over 5139.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6366, over 6032.55 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 11:55:04,197 INFO [trainer.py:765] (1/8) Epoch 23, batch 1400, train_loss[loss=3.396, NarTop10Accuracy=0.6466, over 6055.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6362, over 6027.93 frames. ], batch size: 11, lr: 3.71e-03 +2024-08-06 11:55:35,398 INFO [trainer.py:765] (1/8) Epoch 23, batch 1500, train_loss[loss=3.658, NarTop10Accuracy=0.587, over 6041.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6316, over 5978.07 frames. ], batch size: 49, lr: 3.71e-03 +2024-08-06 11:56:03,428 INFO [trainer.py:765] (1/8) Epoch 23, batch 1600, train_loss[loss=3.428, NarTop10Accuracy=0.6374, over 7043.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6331, over 5959.04 frames. ], batch size: 22, lr: 3.71e-03 +2024-08-06 11:56:30,202 INFO [trainer.py:765] (1/8) Epoch 23, batch 1700, train_loss[loss=3.498, NarTop10Accuracy=0.6159, over 6235.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6308, over 5941.15 frames. ], batch size: 13, lr: 3.70e-03 +2024-08-06 11:56:56,969 INFO [trainer.py:765] (1/8) Epoch 23, batch 1800, train_loss[loss=3.34, NarTop10Accuracy=0.66, over 7312.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6318, over 5993.39 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 11:57:23,597 INFO [trainer.py:765] (1/8) Epoch 23, batch 1900, train_loss[loss=3.452, NarTop10Accuracy=0.6212, over 6218.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6301, over 6033.93 frames. ], batch size: 49, lr: 3.70e-03 +2024-08-06 11:57:49,251 INFO [trainer.py:765] (1/8) Epoch 23, batch 2000, train_loss[loss=3.791, NarTop10Accuracy=0.5622, over 6382.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6296, over 6022.15 frames. ], batch size: 52, lr: 3.69e-03 +2024-08-06 11:58:14,770 INFO [trainer.py:765] (1/8) Epoch 23, batch 2100, train_loss[loss=3.459, NarTop10Accuracy=0.6255, over 4032.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6279, over 6002.98 frames. ], batch size: 4, lr: 3.69e-03 +2024-08-06 11:58:40,238 INFO [trainer.py:765] (1/8) Epoch 23, batch 2200, train_loss[loss=3.537, NarTop10Accuracy=0.6163, over 7334.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6311, over 6046.69 frames. ], batch size: 32, lr: 3.69e-03 +2024-08-06 11:59:08,916 INFO [trainer.py:765] (1/8) Epoch 23, batch 2300, train_loss[loss=3.347, NarTop10Accuracy=0.6582, over 5802.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6289, over 6068.30 frames. ], batch size: 9, lr: 3.68e-03 +2024-08-06 11:59:33,602 INFO [trainer.py:765] (1/8) Epoch 23, batch 2400, train_loss[loss=3.323, NarTop10Accuracy=0.661, over 5097.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6273, over 5898.51 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 11:59:57,011 INFO [trainer.py:765] (1/8) Epoch 23, batch 2500, train_loss[loss=3.403, NarTop10Accuracy=0.643, over 5073.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.634, over 5555.16 frames. ], batch size: 6, lr: 3.68e-03 +2024-08-06 12:00:18,571 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 12:01:22,111 INFO [trainer.py:765] (1/8) Epoch 24, batch 100, train_loss[loss=3.482, NarTop10Accuracy=0.6203, over 7231.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6433, over 2356.13 frames. ], batch size: 30, lr: 3.59e-03 +2024-08-06 12:01:51,342 INFO [trainer.py:765] (1/8) Epoch 24, batch 200, train_loss[loss=3.563, NarTop10Accuracy=0.6001, over 6991.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6454, over 3868.03 frames. ], batch size: 17, lr: 3.59e-03 +2024-08-06 12:02:23,513 INFO [trainer.py:765] (1/8) Epoch 24, batch 300, train_loss[loss=3.291, NarTop10Accuracy=0.6687, over 7060.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.645, over 4680.90 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 12:03:02,847 INFO [trainer.py:765] (1/8) Epoch 24, batch 400, train_loss[loss=3.141, NarTop10Accuracy=0.6856, over 5265.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.645, over 5121.27 frames. ], batch size: 7, lr: 3.59e-03 +2024-08-06 12:03:31,256 INFO [trainer.py:765] (1/8) Epoch 24, batch 500, train_loss[loss=3.246, NarTop10Accuracy=0.6734, over 6128.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6461, over 5412.30 frames. ], batch size: 11, lr: 3.58e-03 +2024-08-06 12:04:00,173 INFO [trainer.py:765] (1/8) Epoch 24, batch 600, train_loss[loss=3.465, NarTop10Accuracy=0.6321, over 5754.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6457, over 5685.28 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 12:04:12,531 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 12:04:22,775 INFO [trainer.py:811] (1/8) Epoch 24, validation: loss=3.282, NarTop10Accuracy=0.6644, over 1907754.00 frames. +2024-08-06 12:04:22,776 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30472MB +2024-08-06 12:04:23,310 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 1.905e+02 2.071e+02 2.258e+02 3.709e+02, threshold=4.142e+02, percent-clipped=0.0 +2024-08-06 12:04:51,733 INFO [trainer.py:765] (1/8) Epoch 24, batch 700, train_loss[loss=3.077, NarTop10Accuracy=0.6929, over 5023.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6451, over 5748.66 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 12:05:21,274 INFO [trainer.py:765] (1/8) Epoch 24, batch 800, train_loss[loss=3.361, NarTop10Accuracy=0.6558, over 4990.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.644, over 5785.57 frames. ], batch size: 6, lr: 3.57e-03 +2024-08-06 12:05:51,754 INFO [trainer.py:765] (1/8) Epoch 24, batch 900, train_loss[loss=3.746, NarTop10Accuracy=0.5671, over 6172.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6434, over 5807.06 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 12:06:32,812 INFO [trainer.py:765] (1/8) Epoch 24, batch 1000, train_loss[loss=3.03, NarTop10Accuracy=0.7097, over 6153.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6421, over 5911.45 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 12:07:09,040 INFO [trainer.py:765] (1/8) Epoch 24, batch 1100, train_loss[loss=3.394, NarTop10Accuracy=0.6335, over 6861.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6413, over 5952.95 frames. ], batch size: 17, lr: 3.56e-03 +2024-08-06 12:07:38,135 INFO [trainer.py:765] (1/8) Epoch 24, batch 1200, train_loss[loss=3.515, NarTop10Accuracy=0.6231, over 7158.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6387, over 5944.31 frames. ], batch size: 31, lr: 3.56e-03 +2024-08-06 12:08:20,731 INFO [trainer.py:765] (1/8) Epoch 24, batch 1300, train_loss[loss=3.297, NarTop10Accuracy=0.6555, over 4999.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6369, over 6013.76 frames. ], batch size: 6, lr: 3.56e-03 +2024-08-06 12:08:56,066 INFO [trainer.py:765] (1/8) Epoch 24, batch 1400, train_loss[loss=3.207, NarTop10Accuracy=0.6678, over 6057.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6352, over 6025.32 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 12:09:24,338 INFO [trainer.py:765] (1/8) Epoch 24, batch 1500, train_loss[loss=3.493, NarTop10Accuracy=0.6269, over 6195.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6343, over 5972.17 frames. ], batch size: 49, lr: 3.55e-03 +2024-08-06 12:09:52,525 INFO [trainer.py:765] (1/8) Epoch 24, batch 1600, train_loss[loss=3.327, NarTop10Accuracy=0.6562, over 7254.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6361, over 5961.07 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 12:10:22,546 INFO [trainer.py:765] (1/8) Epoch 24, batch 1700, train_loss[loss=3.375, NarTop10Accuracy=0.6341, over 6310.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6338, over 5947.18 frames. ], batch size: 13, lr: 3.55e-03 +2024-08-06 12:10:49,273 INFO [trainer.py:765] (1/8) Epoch 24, batch 1800, train_loss[loss=3.349, NarTop10Accuracy=0.6527, over 7150.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6349, over 6006.35 frames. ], batch size: 22, lr: 3.54e-03 +2024-08-06 12:11:15,847 INFO [trainer.py:765] (1/8) Epoch 24, batch 1900, train_loss[loss=3.457, NarTop10Accuracy=0.6271, over 6107.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6335, over 6037.15 frames. ], batch size: 49, lr: 3.54e-03 +2024-08-06 12:11:41,667 INFO [trainer.py:765] (1/8) Epoch 24, batch 2000, train_loss[loss=3.381, NarTop10Accuracy=0.6378, over 5864.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6333, over 6028.86 frames. ], batch size: 50, lr: 3.54e-03 +2024-08-06 12:12:07,104 INFO [trainer.py:765] (1/8) Epoch 24, batch 2100, train_loss[loss=3.473, NarTop10Accuracy=0.6274, over 4829.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6357, over 6002.37 frames. ], batch size: 5, lr: 3.54e-03 +2024-08-06 12:12:33,373 INFO [trainer.py:765] (1/8) Epoch 24, batch 2200, train_loss[loss=3.456, NarTop10Accuracy=0.6273, over 7710.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6343, over 6045.45 frames. ], batch size: 31, lr: 3.53e-03 +2024-08-06 12:12:58,772 INFO [trainer.py:765] (1/8) Epoch 24, batch 2300, train_loss[loss=3.291, NarTop10Accuracy=0.6545, over 5657.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6312, over 6064.07 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 12:13:23,487 INFO [trainer.py:765] (1/8) Epoch 24, batch 2400, train_loss[loss=3.581, NarTop10Accuracy=0.6097, over 6204.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6299, over 5899.04 frames. ], batch size: 49, lr: 3.53e-03 +2024-08-06 12:13:47,006 INFO [trainer.py:765] (1/8) Epoch 24, batch 2500, train_loss[loss=3.146, NarTop10Accuracy=0.6945, over 4997.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6354, over 5556.84 frames. ], batch size: 6, lr: 3.52e-03 +2024-08-06 12:14:08,242 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 12:14:50,196 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 12:15:00,657 INFO [trainer.py:811] (1/8) Epoch 25, validation: loss=3.279, NarTop10Accuracy=0.6656, over 1907754.00 frames. +2024-08-06 12:15:00,658 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 12:15:01,363 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.921e+02 2.068e+02 2.276e+02 6.228e+02, threshold=4.136e+02, percent-clipped=0.3 +2024-08-06 12:15:17,917 INFO [trainer.py:765] (1/8) Epoch 25, batch 100, train_loss[loss=3.284, NarTop10Accuracy=0.6642, over 7236.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6488, over 2360.09 frames. ], batch size: 30, lr: 3.45e-03 +2024-08-06 12:15:53,499 INFO [trainer.py:765] (1/8) Epoch 25, batch 200, train_loss[loss=3.222, NarTop10Accuracy=0.6725, over 6807.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6493, over 3857.83 frames. ], batch size: 17, lr: 3.44e-03 +2024-08-06 12:16:23,595 INFO [trainer.py:765] (1/8) Epoch 25, batch 300, train_loss[loss=3.381, NarTop10Accuracy=0.6468, over 7178.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6464, over 4669.30 frames. ], batch size: 22, lr: 3.44e-03 +2024-08-06 12:16:59,163 INFO [trainer.py:765] (1/8) Epoch 25, batch 400, train_loss[loss=3.492, NarTop10Accuracy=0.6248, over 5104.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6467, over 5113.24 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 12:17:32,096 INFO [trainer.py:765] (1/8) Epoch 25, batch 500, train_loss[loss=3.352, NarTop10Accuracy=0.6499, over 6285.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6482, over 5406.04 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 12:18:05,181 INFO [trainer.py:765] (1/8) Epoch 25, batch 600, train_loss[loss=3.395, NarTop10Accuracy=0.6406, over 5788.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6479, over 5671.93 frames. ], batch size: 9, lr: 3.43e-03 +2024-08-06 12:18:39,597 INFO [trainer.py:765] (1/8) Epoch 25, batch 700, train_loss[loss=3.329, NarTop10Accuracy=0.6508, over 4990.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.647, over 5746.37 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 12:19:16,014 INFO [trainer.py:765] (1/8) Epoch 25, batch 800, train_loss[loss=3.273, NarTop10Accuracy=0.6754, over 5022.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6433, over 5804.69 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 12:19:49,558 INFO [trainer.py:765] (1/8) Epoch 25, batch 900, train_loss[loss=3.259, NarTop10Accuracy=0.6679, over 6343.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6423, over 5826.59 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 12:20:23,876 INFO [trainer.py:765] (1/8) Epoch 25, batch 1000, train_loss[loss=3.212, NarTop10Accuracy=0.6763, over 6285.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6428, over 5919.46 frames. ], batch size: 13, lr: 3.42e-03 +2024-08-06 12:21:01,915 INFO [trainer.py:765] (1/8) Epoch 25, batch 1100, train_loss[loss=3.33, NarTop10Accuracy=0.6485, over 6837.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.639, over 5952.75 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 12:21:40,638 INFO [trainer.py:765] (1/8) Epoch 25, batch 1200, train_loss[loss=3.437, NarTop10Accuracy=0.6258, over 7271.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6402, over 5965.41 frames. ], batch size: 30, lr: 3.42e-03 +2024-08-06 12:22:11,837 INFO [trainer.py:765] (1/8) Epoch 25, batch 1300, train_loss[loss=3.544, NarTop10Accuracy=0.6109, over 5179.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6403, over 6021.88 frames. ], batch size: 6, lr: 3.41e-03 +2024-08-06 12:22:48,550 INFO [trainer.py:765] (1/8) Epoch 25, batch 1400, train_loss[loss=3.632, NarTop10Accuracy=0.6039, over 6156.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6403, over 6054.48 frames. ], batch size: 11, lr: 3.41e-03 +2024-08-06 12:23:21,655 INFO [trainer.py:765] (1/8) Epoch 25, batch 1500, train_loss[loss=3.853, NarTop10Accuracy=0.5432, over 5949.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6385, over 5970.10 frames. ], batch size: 50, lr: 3.41e-03 +2024-08-06 12:23:49,717 INFO [trainer.py:765] (1/8) Epoch 25, batch 1600, train_loss[loss=3.39, NarTop10Accuracy=0.6385, over 7170.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6375, over 5947.62 frames. ], batch size: 22, lr: 3.41e-03 +2024-08-06 12:24:16,373 INFO [trainer.py:765] (1/8) Epoch 25, batch 1700, train_loss[loss=3.459, NarTop10Accuracy=0.6177, over 6759.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6369, over 5941.97 frames. ], batch size: 14, lr: 3.40e-03 +2024-08-06 12:24:43,092 INFO [trainer.py:765] (1/8) Epoch 25, batch 1800, train_loss[loss=3.164, NarTop10Accuracy=0.6757, over 7002.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6369, over 6011.40 frames. ], batch size: 22, lr: 3.40e-03 +2024-08-06 12:25:09,776 INFO [trainer.py:765] (1/8) Epoch 25, batch 1900, train_loss[loss=3.694, NarTop10Accuracy=0.5792, over 6385.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6317, over 6052.73 frames. ], batch size: 50, lr: 3.40e-03 +2024-08-06 12:25:35,710 INFO [trainer.py:765] (1/8) Epoch 25, batch 2000, train_loss[loss=3.634, NarTop10Accuracy=0.588, over 5507.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6355, over 6013.86 frames. ], batch size: 49, lr: 3.40e-03 +2024-08-06 12:25:47,854 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 12:25:58,846 INFO [trainer.py:811] (1/8) Epoch 25, validation: loss=3.265, NarTop10Accuracy=0.667, over 1907754.00 frames. +2024-08-06 12:25:58,847 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 12:25:59,344 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 1.947e+02 2.092e+02 2.280e+02 8.190e+02, threshold=4.185e+02, percent-clipped=0.2 +2024-08-06 12:26:12,224 INFO [trainer.py:765] (1/8) Epoch 25, batch 2100, train_loss[loss=3.277, NarTop10Accuracy=0.6495, over 4805.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6334, over 6003.74 frames. ], batch size: 5, lr: 3.39e-03 +2024-08-06 12:26:37,833 INFO [trainer.py:765] (1/8) Epoch 25, batch 2200, train_loss[loss=3.455, NarTop10Accuracy=0.6307, over 7329.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.634, over 6038.36 frames. ], batch size: 31, lr: 3.39e-03 +2024-08-06 12:27:03,344 INFO [trainer.py:765] (1/8) Epoch 25, batch 2300, train_loss[loss=3.6, NarTop10Accuracy=0.6079, over 5917.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6328, over 6077.23 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 12:27:28,151 INFO [trainer.py:765] (1/8) Epoch 25, batch 2400, train_loss[loss=3.796, NarTop10Accuracy=0.5625, over 6097.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6323, over 5886.77 frames. ], batch size: 52, lr: 3.39e-03 +2024-08-06 12:27:51,732 INFO [trainer.py:765] (1/8) Epoch 25, batch 2500, train_loss[loss=3.344, NarTop10Accuracy=0.6537, over 5095.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6375, over 5534.07 frames. ], batch size: 6, lr: 3.38e-03 +2024-08-06 12:28:13,195 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 12:29:08,881 INFO [trainer.py:765] (1/8) Epoch 26, batch 100, train_loss[loss=3.73, NarTop10Accuracy=0.5614, over 7150.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6489, over 2375.37 frames. ], batch size: 30, lr: 3.31e-03 +2024-08-06 12:29:44,318 INFO [trainer.py:765] (1/8) Epoch 26, batch 200, train_loss[loss=3.269, NarTop10Accuracy=0.6641, over 6556.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6472, over 3878.89 frames. ], batch size: 16, lr: 3.31e-03 +2024-08-06 12:30:19,754 INFO [trainer.py:765] (1/8) Epoch 26, batch 300, train_loss[loss=3.284, NarTop10Accuracy=0.6541, over 7197.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6446, over 4691.17 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 12:30:52,509 INFO [trainer.py:765] (1/8) Epoch 26, batch 400, train_loss[loss=3.302, NarTop10Accuracy=0.6528, over 5065.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.646, over 5144.37 frames. ], batch size: 7, lr: 3.30e-03 +2024-08-06 12:31:26,531 INFO [trainer.py:765] (1/8) Epoch 26, batch 500, train_loss[loss=3.456, NarTop10Accuracy=0.6294, over 6066.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6474, over 5425.37 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 12:31:59,782 INFO [trainer.py:765] (1/8) Epoch 26, batch 600, train_loss[loss=3.381, NarTop10Accuracy=0.6485, over 5927.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6454, over 5685.35 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 12:32:36,966 INFO [trainer.py:765] (1/8) Epoch 26, batch 700, train_loss[loss=3.41, NarTop10Accuracy=0.6433, over 4980.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6472, over 5753.14 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 12:33:10,809 INFO [trainer.py:765] (1/8) Epoch 26, batch 800, train_loss[loss=3.288, NarTop10Accuracy=0.6565, over 5041.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6461, over 5798.49 frames. ], batch size: 6, lr: 3.29e-03 +2024-08-06 12:33:46,257 INFO [trainer.py:765] (1/8) Epoch 26, batch 900, train_loss[loss=3.587, NarTop10Accuracy=0.5918, over 6227.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6432, over 5806.82 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 12:34:22,903 INFO [trainer.py:765] (1/8) Epoch 26, batch 1000, train_loss[loss=3.133, NarTop10Accuracy=0.7004, over 6355.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6414, over 5916.11 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 12:34:57,798 INFO [trainer.py:765] (1/8) Epoch 26, batch 1100, train_loss[loss=3.346, NarTop10Accuracy=0.6655, over 6933.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6411, over 5971.29 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 12:35:31,893 INFO [trainer.py:765] (1/8) Epoch 26, batch 1200, train_loss[loss=3.303, NarTop10Accuracy=0.6647, over 7208.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.641, over 5960.14 frames. ], batch size: 30, lr: 3.28e-03 +2024-08-06 12:36:10,658 INFO [trainer.py:765] (1/8) Epoch 26, batch 1300, train_loss[loss=3.661, NarTop10Accuracy=0.5827, over 5012.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6409, over 6024.23 frames. ], batch size: 6, lr: 3.28e-03 +2024-08-06 12:36:44,564 INFO [trainer.py:765] (1/8) Epoch 26, batch 1400, train_loss[loss=3.201, NarTop10Accuracy=0.684, over 5969.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6377, over 6032.14 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 12:37:03,593 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 12:37:13,567 INFO [trainer.py:811] (1/8) Epoch 26, validation: loss=3.231, NarTop10Accuracy=0.6753, over 1907754.00 frames. +2024-08-06 12:37:13,568 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 12:37:14,077 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.928e+02 2.102e+02 2.299e+02 4.602e+02, threshold=4.203e+02, percent-clipped=0.2 +2024-08-06 12:37:23,027 INFO [trainer.py:765] (1/8) Epoch 26, batch 1500, train_loss[loss=3.751, NarTop10Accuracy=0.5732, over 6276.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6387, over 5961.31 frames. ], batch size: 49, lr: 3.28e-03 +2024-08-06 12:37:51,061 INFO [trainer.py:765] (1/8) Epoch 26, batch 1600, train_loss[loss=3.481, NarTop10Accuracy=0.616, over 7049.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6388, over 5948.74 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:38:17,853 INFO [trainer.py:765] (1/8) Epoch 26, batch 1700, train_loss[loss=3.562, NarTop10Accuracy=0.6063, over 6656.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6358, over 5944.23 frames. ], batch size: 14, lr: 3.27e-03 +2024-08-06 12:38:44,383 INFO [trainer.py:765] (1/8) Epoch 26, batch 1800, train_loss[loss=3.149, NarTop10Accuracy=0.6865, over 6986.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6365, over 5999.27 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:39:10,952 INFO [trainer.py:765] (1/8) Epoch 26, batch 1900, train_loss[loss=3.584, NarTop10Accuracy=0.6003, over 5926.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6344, over 6040.86 frames. ], batch size: 51, lr: 3.27e-03 +2024-08-06 12:39:36,610 INFO [trainer.py:765] (1/8) Epoch 26, batch 2000, train_loss[loss=3.492, NarTop10Accuracy=0.6175, over 5953.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6359, over 6016.74 frames. ], batch size: 48, lr: 3.26e-03 +2024-08-06 12:40:02,147 INFO [trainer.py:765] (1/8) Epoch 26, batch 2100, train_loss[loss=3.319, NarTop10Accuracy=0.6452, over 4885.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.639, over 6013.44 frames. ], batch size: 5, lr: 3.26e-03 +2024-08-06 12:40:27,758 INFO [trainer.py:765] (1/8) Epoch 26, batch 2200, train_loss[loss=3.319, NarTop10Accuracy=0.6472, over 7058.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6383, over 6059.59 frames. ], batch size: 30, lr: 3.26e-03 +2024-08-06 12:40:53,232 INFO [trainer.py:765] (1/8) Epoch 26, batch 2300, train_loss[loss=3.286, NarTop10Accuracy=0.6723, over 5635.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6351, over 6104.21 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 12:41:17,930 INFO [trainer.py:765] (1/8) Epoch 26, batch 2400, train_loss[loss=3.21, NarTop10Accuracy=0.6678, over 5265.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6355, over 5900.80 frames. ], batch size: 7, lr: 3.25e-03 +2024-08-06 12:41:44,477 INFO [trainer.py:765] (1/8) Epoch 26, batch 2500, train_loss[loss=2.852, NarTop10Accuracy=0.7464, over 5143.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6406, over 5553.22 frames. ], batch size: 6, lr: 3.25e-03 +2024-08-06 12:42:05,684 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 12:43:12,533 INFO [trainer.py:765] (1/8) Epoch 27, batch 100, train_loss[loss=3.58, NarTop10Accuracy=0.6076, over 7302.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6466, over 2375.60 frames. ], batch size: 31, lr: 3.19e-03 +2024-08-06 12:43:43,576 INFO [trainer.py:765] (1/8) Epoch 27, batch 200, train_loss[loss=3.626, NarTop10Accuracy=0.5877, over 6773.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6504, over 3864.41 frames. ], batch size: 17, lr: 3.18e-03 +2024-08-06 12:44:13,786 INFO [trainer.py:765] (1/8) Epoch 27, batch 300, train_loss[loss=3.178, NarTop10Accuracy=0.6838, over 7099.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6526, over 4684.12 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 12:44:50,461 INFO [trainer.py:765] (1/8) Epoch 27, batch 400, train_loss[loss=3.069, NarTop10Accuracy=0.7087, over 5223.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6513, over 5140.72 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 12:45:20,670 INFO [trainer.py:765] (1/8) Epoch 27, batch 500, train_loss[loss=3.183, NarTop10Accuracy=0.677, over 6091.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6512, over 5423.78 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 12:45:55,261 INFO [trainer.py:765] (1/8) Epoch 27, batch 600, train_loss[loss=3.312, NarTop10Accuracy=0.654, over 5775.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6491, over 5686.82 frames. ], batch size: 9, lr: 3.17e-03 +2024-08-06 12:46:26,747 INFO [trainer.py:765] (1/8) Epoch 27, batch 700, train_loss[loss=3.617, NarTop10Accuracy=0.6008, over 5052.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6496, over 5752.25 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:05,016 INFO [trainer.py:765] (1/8) Epoch 27, batch 800, train_loss[loss=3.331, NarTop10Accuracy=0.6561, over 5046.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6455, over 5797.62 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:32,742 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 12:47:42,765 INFO [trainer.py:811] (1/8) Epoch 27, validation: loss=3.258, NarTop10Accuracy=0.6695, over 1907754.00 frames. +2024-08-06 12:47:42,766 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 12:47:43,335 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 1.939e+02 2.100e+02 2.298e+02 4.859e+02, threshold=4.201e+02, percent-clipped=0.2 +2024-08-06 12:47:47,259 INFO [trainer.py:765] (1/8) Epoch 27, batch 900, train_loss[loss=3.29, NarTop10Accuracy=0.653, over 6360.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6449, over 5820.17 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 12:48:22,861 INFO [trainer.py:765] (1/8) Epoch 27, batch 1000, train_loss[loss=3.389, NarTop10Accuracy=0.6375, over 6084.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.645, over 5911.67 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 12:48:58,083 INFO [trainer.py:765] (1/8) Epoch 27, batch 1100, train_loss[loss=3.635, NarTop10Accuracy=0.585, over 6830.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6439, over 5947.76 frames. ], batch size: 17, lr: 3.16e-03 +2024-08-06 12:49:34,895 INFO [trainer.py:765] (1/8) Epoch 27, batch 1200, train_loss[loss=3.109, NarTop10Accuracy=0.6833, over 7068.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6435, over 5936.68 frames. ], batch size: 30, lr: 3.16e-03 +2024-08-06 12:50:06,240 INFO [trainer.py:765] (1/8) Epoch 27, batch 1300, train_loss[loss=3.261, NarTop10Accuracy=0.6581, over 5103.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6424, over 6007.23 frames. ], batch size: 6, lr: 3.16e-03 +2024-08-06 12:50:42,949 INFO [trainer.py:765] (1/8) Epoch 27, batch 1400, train_loss[loss=3.105, NarTop10Accuracy=0.6987, over 6125.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6408, over 6021.67 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 12:51:11,276 INFO [trainer.py:765] (1/8) Epoch 27, batch 1500, train_loss[loss=3.525, NarTop10Accuracy=0.6102, over 5586.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6409, over 5968.43 frames. ], batch size: 49, lr: 3.15e-03 +2024-08-06 12:51:39,351 INFO [trainer.py:765] (1/8) Epoch 27, batch 1600, train_loss[loss=3.433, NarTop10Accuracy=0.6344, over 7292.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6388, over 5944.53 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:52:06,060 INFO [trainer.py:765] (1/8) Epoch 27, batch 1700, train_loss[loss=3.452, NarTop10Accuracy=0.6289, over 6327.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6399, over 5934.65 frames. ], batch size: 13, lr: 3.15e-03 +2024-08-06 12:52:32,667 INFO [trainer.py:765] (1/8) Epoch 27, batch 1800, train_loss[loss=3.398, NarTop10Accuracy=0.6444, over 7142.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6414, over 5999.12 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:53:02,287 INFO [trainer.py:765] (1/8) Epoch 27, batch 1900, train_loss[loss=3.737, NarTop10Accuracy=0.5684, over 5856.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6382, over 6034.75 frames. ], batch size: 50, lr: 3.14e-03 +2024-08-06 12:53:27,997 INFO [trainer.py:765] (1/8) Epoch 27, batch 2000, train_loss[loss=3.517, NarTop10Accuracy=0.6152, over 6059.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6389, over 6016.10 frames. ], batch size: 49, lr: 3.14e-03 +2024-08-06 12:53:53,537 INFO [trainer.py:765] (1/8) Epoch 27, batch 2100, train_loss[loss=3.467, NarTop10Accuracy=0.6165, over 4836.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6381, over 5999.58 frames. ], batch size: 5, lr: 3.14e-03 +2024-08-06 12:54:18,996 INFO [trainer.py:765] (1/8) Epoch 27, batch 2200, train_loss[loss=3.329, NarTop10Accuracy=0.655, over 7415.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6388, over 6046.88 frames. ], batch size: 31, lr: 3.14e-03 +2024-08-06 12:54:44,479 INFO [trainer.py:765] (1/8) Epoch 27, batch 2300, train_loss[loss=3.085, NarTop10Accuracy=0.7001, over 5786.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6356, over 6076.49 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 12:55:09,217 INFO [trainer.py:765] (1/8) Epoch 27, batch 2400, train_loss[loss=3.623, NarTop10Accuracy=0.5887, over 5912.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.632, over 5890.64 frames. ], batch size: 50, lr: 3.13e-03 +2024-08-06 12:55:32,725 INFO [trainer.py:765] (1/8) Epoch 27, batch 2500, train_loss[loss=3.398, NarTop10Accuracy=0.6418, over 5063.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6385, over 5547.22 frames. ], batch size: 6, lr: 3.13e-03 +2024-08-06 12:55:54,037 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 12:56:46,803 INFO [trainer.py:765] (1/8) Epoch 28, batch 100, train_loss[loss=3.235, NarTop10Accuracy=0.6791, over 7374.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6542, over 2359.89 frames. ], batch size: 31, lr: 3.07e-03 +2024-08-06 12:57:23,204 INFO [trainer.py:765] (1/8) Epoch 28, batch 200, train_loss[loss=3.351, NarTop10Accuracy=0.6491, over 6996.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6498, over 3863.39 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 12:57:55,704 INFO [trainer.py:765] (1/8) Epoch 28, batch 300, train_loss[loss=3.433, NarTop10Accuracy=0.634, over 7063.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6483, over 4674.01 frames. ], batch size: 22, lr: 3.07e-03 +2024-08-06 12:57:56,457 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 12:58:06,828 INFO [trainer.py:811] (1/8) Epoch 28, validation: loss=3.275, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 12:58:06,828 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 12:58:07,334 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 1.944e+02 2.106e+02 2.298e+02 4.786e+02, threshold=4.211e+02, percent-clipped=0.1 +2024-08-06 12:58:34,932 INFO [trainer.py:765] (1/8) Epoch 28, batch 400, train_loss[loss=3.528, NarTop10Accuracy=0.6163, over 5234.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6467, over 5124.11 frames. ], batch size: 7, lr: 3.06e-03 +2024-08-06 12:59:11,437 INFO [trainer.py:765] (1/8) Epoch 28, batch 500, train_loss[loss=3.196, NarTop10Accuracy=0.6929, over 6289.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.649, over 5392.89 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 12:59:44,487 INFO [trainer.py:765] (1/8) Epoch 28, batch 600, train_loss[loss=3.382, NarTop10Accuracy=0.6443, over 5799.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6489, over 5679.19 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 13:00:20,012 INFO [trainer.py:765] (1/8) Epoch 28, batch 700, train_loss[loss=3.513, NarTop10Accuracy=0.6145, over 5080.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6471, over 5759.72 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 13:00:56,433 INFO [trainer.py:765] (1/8) Epoch 28, batch 800, train_loss[loss=3.292, NarTop10Accuracy=0.6483, over 5021.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6477, over 5807.41 frames. ], batch size: 6, lr: 3.05e-03 +2024-08-06 13:01:31,042 INFO [trainer.py:765] (1/8) Epoch 28, batch 900, train_loss[loss=3.08, NarTop10Accuracy=0.7002, over 6297.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.646, over 5816.83 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 13:02:06,494 INFO [trainer.py:765] (1/8) Epoch 28, batch 1000, train_loss[loss=3.444, NarTop10Accuracy=0.6202, over 6228.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.643, over 5909.26 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 13:02:41,229 INFO [trainer.py:765] (1/8) Epoch 28, batch 1100, train_loss[loss=3.325, NarTop10Accuracy=0.6478, over 6914.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6422, over 5943.80 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 13:03:16,895 INFO [trainer.py:765] (1/8) Epoch 28, batch 1200, train_loss[loss=3.48, NarTop10Accuracy=0.6158, over 7265.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6407, over 5936.90 frames. ], batch size: 30, lr: 3.05e-03 +2024-08-06 13:03:54,153 INFO [trainer.py:765] (1/8) Epoch 28, batch 1300, train_loss[loss=3.25, NarTop10Accuracy=0.6625, over 5089.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6417, over 6026.19 frames. ], batch size: 6, lr: 3.04e-03 +2024-08-06 13:04:28,712 INFO [trainer.py:765] (1/8) Epoch 28, batch 1400, train_loss[loss=3.321, NarTop10Accuracy=0.6411, over 6187.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.64, over 6054.31 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 13:05:02,348 INFO [trainer.py:765] (1/8) Epoch 28, batch 1500, train_loss[loss=3.465, NarTop10Accuracy=0.6316, over 6316.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.642, over 5981.94 frames. ], batch size: 49, lr: 3.04e-03 +2024-08-06 13:05:30,370 INFO [trainer.py:765] (1/8) Epoch 28, batch 1600, train_loss[loss=3.645, NarTop10Accuracy=0.5873, over 6960.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6402, over 5966.66 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 13:05:57,130 INFO [trainer.py:765] (1/8) Epoch 28, batch 1700, train_loss[loss=3.712, NarTop10Accuracy=0.5735, over 6211.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6418, over 5953.09 frames. ], batch size: 13, lr: 3.04e-03 +2024-08-06 13:06:23,732 INFO [trainer.py:765] (1/8) Epoch 28, batch 1800, train_loss[loss=3.564, NarTop10Accuracy=0.6116, over 7220.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6435, over 6023.31 frames. ], batch size: 22, lr: 3.03e-03 +2024-08-06 13:06:50,372 INFO [trainer.py:765] (1/8) Epoch 28, batch 1900, train_loss[loss=3.497, NarTop10Accuracy=0.615, over 5875.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6406, over 6058.11 frames. ], batch size: 49, lr: 3.03e-03 +2024-08-06 13:07:16,115 INFO [trainer.py:765] (1/8) Epoch 28, batch 2000, train_loss[loss=3.503, NarTop10Accuracy=0.6189, over 6270.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6434, over 6032.42 frames. ], batch size: 49, lr: 3.03e-03 +2024-08-06 13:07:41,546 INFO [trainer.py:765] (1/8) Epoch 28, batch 2100, train_loss[loss=3.852, NarTop10Accuracy=0.5468, over 3921.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.64, over 6011.45 frames. ], batch size: 4, lr: 3.03e-03 +2024-08-06 13:08:06,931 INFO [trainer.py:765] (1/8) Epoch 28, batch 2200, train_loss[loss=3.427, NarTop10Accuracy=0.6332, over 7298.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6389, over 6054.75 frames. ], batch size: 31, lr: 3.02e-03 +2024-08-06 13:08:32,387 INFO [trainer.py:765] (1/8) Epoch 28, batch 2300, train_loss[loss=3.39, NarTop10Accuracy=0.6474, over 5824.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6378, over 6077.81 frames. ], batch size: 9, lr: 3.02e-03 +2024-08-06 13:08:33,134 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 13:08:43,385 INFO [trainer.py:811] (1/8) Epoch 28, validation: loss=3.224, NarTop10Accuracy=0.676, over 1907754.00 frames. +2024-08-06 13:08:43,386 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 13:08:43,890 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 1.997e+02 2.131e+02 2.314e+02 6.875e+02, threshold=4.261e+02, percent-clipped=0.5 +2024-08-06 13:09:07,389 INFO [trainer.py:765] (1/8) Epoch 28, batch 2400, train_loss[loss=3.863, NarTop10Accuracy=0.5462, over 6486.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6362, over 5896.61 frames. ], batch size: 48, lr: 3.02e-03 +2024-08-06 13:09:30,781 INFO [trainer.py:765] (1/8) Epoch 28, batch 2500, train_loss[loss=3.478, NarTop10Accuracy=0.623, over 4981.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6407, over 5551.94 frames. ], batch size: 6, lr: 3.02e-03 +2024-08-06 13:09:51,898 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 13:10:48,193 INFO [trainer.py:765] (1/8) Epoch 29, batch 100, train_loss[loss=3.586, NarTop10Accuracy=0.5999, over 6997.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6568, over 2361.53 frames. ], batch size: 31, lr: 2.96e-03 +2024-08-06 13:11:20,841 INFO [trainer.py:765] (1/8) Epoch 29, batch 200, train_loss[loss=3.567, NarTop10Accuracy=0.6086, over 6883.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6541, over 3865.29 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 13:11:56,950 INFO [trainer.py:765] (1/8) Epoch 29, batch 300, train_loss[loss=3.226, NarTop10Accuracy=0.6838, over 7061.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6527, over 4669.10 frames. ], batch size: 22, lr: 2.96e-03 +2024-08-06 13:12:29,716 INFO [trainer.py:765] (1/8) Epoch 29, batch 400, train_loss[loss=3.43, NarTop10Accuracy=0.6285, over 5166.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6528, over 5129.23 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 13:12:59,921 INFO [trainer.py:765] (1/8) Epoch 29, batch 500, train_loss[loss=3.449, NarTop10Accuracy=0.6258, over 6160.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6518, over 5397.90 frames. ], batch size: 11, lr: 2.95e-03 +2024-08-06 13:13:33,547 INFO [trainer.py:765] (1/8) Epoch 29, batch 600, train_loss[loss=3.733, NarTop10Accuracy=0.5699, over 5792.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6493, over 5672.27 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 13:14:09,937 INFO [trainer.py:765] (1/8) Epoch 29, batch 700, train_loss[loss=3.65, NarTop10Accuracy=0.5889, over 5057.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.648, over 5747.72 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:14:46,676 INFO [trainer.py:765] (1/8) Epoch 29, batch 800, train_loss[loss=3.564, NarTop10Accuracy=0.6027, over 4996.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6415, over 5806.95 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:15:17,114 INFO [trainer.py:765] (1/8) Epoch 29, batch 900, train_loss[loss=3.178, NarTop10Accuracy=0.6823, over 6329.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6432, over 5816.56 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 13:15:59,363 INFO [trainer.py:765] (1/8) Epoch 29, batch 1000, train_loss[loss=3.564, NarTop10Accuracy=0.6025, over 6184.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6448, over 5918.73 frames. ], batch size: 13, lr: 2.94e-03 +2024-08-06 13:16:31,713 INFO [trainer.py:765] (1/8) Epoch 29, batch 1100, train_loss[loss=3.468, NarTop10Accuracy=0.6217, over 6479.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6423, over 5956.13 frames. ], batch size: 16, lr: 2.94e-03 +2024-08-06 13:17:04,933 INFO [trainer.py:765] (1/8) Epoch 29, batch 1200, train_loss[loss=3.507, NarTop10Accuracy=0.6226, over 7130.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6425, over 5939.86 frames. ], batch size: 30, lr: 2.94e-03 +2024-08-06 13:17:43,957 INFO [trainer.py:765] (1/8) Epoch 29, batch 1300, train_loss[loss=3.2, NarTop10Accuracy=0.6839, over 5165.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6436, over 6014.87 frames. ], batch size: 6, lr: 2.94e-03 +2024-08-06 13:18:17,924 INFO [trainer.py:765] (1/8) Epoch 29, batch 1400, train_loss[loss=3.702, NarTop10Accuracy=0.5711, over 6182.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.64, over 6034.77 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 13:18:48,306 INFO [trainer.py:765] (1/8) Epoch 29, batch 1500, train_loss[loss=3.812, NarTop10Accuracy=0.5517, over 6208.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6412, over 5979.98 frames. ], batch size: 50, lr: 2.93e-03 +2024-08-06 13:19:16,409 INFO [trainer.py:765] (1/8) Epoch 29, batch 1600, train_loss[loss=3.303, NarTop10Accuracy=0.666, over 7183.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6394, over 5954.54 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:19:43,242 INFO [trainer.py:765] (1/8) Epoch 29, batch 1700, train_loss[loss=3.269, NarTop10Accuracy=0.6688, over 6264.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6397, over 5929.50 frames. ], batch size: 13, lr: 2.93e-03 +2024-08-06 13:19:49,091 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 13:19:59,386 INFO [trainer.py:811] (1/8) Epoch 29, validation: loss=3.233, NarTop10Accuracy=0.6754, over 1907754.00 frames. +2024-08-06 13:19:59,387 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 13:19:59,903 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.964e+02 2.123e+02 2.299e+02 5.520e+02, threshold=4.246e+02, percent-clipped=0.2 +2024-08-06 13:20:20,108 INFO [trainer.py:765] (1/8) Epoch 29, batch 1800, train_loss[loss=3.416, NarTop10Accuracy=0.6369, over 7086.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6407, over 6005.15 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:20:46,844 INFO [trainer.py:765] (1/8) Epoch 29, batch 1900, train_loss[loss=3.466, NarTop10Accuracy=0.6209, over 5938.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6377, over 6048.21 frames. ], batch size: 49, lr: 2.93e-03 +2024-08-06 13:21:12,479 INFO [trainer.py:765] (1/8) Epoch 29, batch 2000, train_loss[loss=3.647, NarTop10Accuracy=0.5935, over 6578.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6391, over 6003.50 frames. ], batch size: 49, lr: 2.92e-03 +2024-08-06 13:21:37,983 INFO [trainer.py:765] (1/8) Epoch 29, batch 2100, train_loss[loss=3.447, NarTop10Accuracy=0.6258, over 3880.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6386, over 5978.66 frames. ], batch size: 4, lr: 2.92e-03 +2024-08-06 13:22:03,360 INFO [trainer.py:765] (1/8) Epoch 29, batch 2200, train_loss[loss=3.252, NarTop10Accuracy=0.6714, over 7259.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6392, over 6012.46 frames. ], batch size: 31, lr: 2.92e-03 +2024-08-06 13:22:28,831 INFO [trainer.py:765] (1/8) Epoch 29, batch 2300, train_loss[loss=3.428, NarTop10Accuracy=0.6312, over 5837.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6367, over 6060.26 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 13:22:53,621 INFO [trainer.py:765] (1/8) Epoch 29, batch 2400, train_loss[loss=3.421, NarTop10Accuracy=0.6291, over 5771.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6367, over 5866.90 frames. ], batch size: 8, lr: 2.92e-03 +2024-08-06 13:23:16,979 INFO [trainer.py:765] (1/8) Epoch 29, batch 2500, train_loss[loss=3.313, NarTop10Accuracy=0.6467, over 5163.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6394, over 5528.01 frames. ], batch size: 6, lr: 2.91e-03 +2024-08-06 13:23:38,351 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 13:24:38,391 INFO [trainer.py:765] (1/8) Epoch 30, batch 100, train_loss[loss=3.272, NarTop10Accuracy=0.6639, over 7385.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6612, over 2374.95 frames. ], batch size: 33, lr: 2.86e-03 +2024-08-06 13:25:14,782 INFO [trainer.py:765] (1/8) Epoch 30, batch 200, train_loss[loss=3.397, NarTop10Accuracy=0.637, over 6601.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6588, over 3866.22 frames. ], batch size: 17, lr: 2.86e-03 +2024-08-06 13:25:46,846 INFO [trainer.py:765] (1/8) Epoch 30, batch 300, train_loss[loss=3.189, NarTop10Accuracy=0.6807, over 6996.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.655, over 4668.30 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 13:26:17,538 INFO [trainer.py:765] (1/8) Epoch 30, batch 400, train_loss[loss=3.371, NarTop10Accuracy=0.6531, over 5296.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.652, over 5135.72 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 13:26:53,919 INFO [trainer.py:765] (1/8) Epoch 30, batch 500, train_loss[loss=3.299, NarTop10Accuracy=0.6705, over 6135.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6556, over 5409.30 frames. ], batch size: 11, lr: 2.85e-03 +2024-08-06 13:27:25,422 INFO [trainer.py:765] (1/8) Epoch 30, batch 600, train_loss[loss=3.253, NarTop10Accuracy=0.678, over 5808.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6534, over 5672.13 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 13:28:00,307 INFO [trainer.py:765] (1/8) Epoch 30, batch 700, train_loss[loss=3.877, NarTop10Accuracy=0.5425, over 4362.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6525, over 5731.46 frames. ], batch size: 5, lr: 2.85e-03 +2024-08-06 13:28:37,477 INFO [trainer.py:765] (1/8) Epoch 30, batch 800, train_loss[loss=3.453, NarTop10Accuracy=0.6212, over 5004.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.648, over 5797.63 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:29:10,425 INFO [trainer.py:765] (1/8) Epoch 30, batch 900, train_loss[loss=3.323, NarTop10Accuracy=0.6497, over 6625.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6454, over 5811.98 frames. ], batch size: 14, lr: 2.85e-03 +2024-08-06 13:29:45,913 INFO [trainer.py:765] (1/8) Epoch 30, batch 1000, train_loss[loss=3.3, NarTop10Accuracy=0.6584, over 6198.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6439, over 5917.32 frames. ], batch size: 13, lr: 2.84e-03 +2024-08-06 13:30:24,171 INFO [trainer.py:765] (1/8) Epoch 30, batch 1100, train_loss[loss=3.423, NarTop10Accuracy=0.6376, over 6842.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6432, over 5954.52 frames. ], batch size: 17, lr: 2.84e-03 +2024-08-06 13:30:38,001 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 13:30:48,195 INFO [trainer.py:811] (1/8) Epoch 30, validation: loss=3.239, NarTop10Accuracy=0.6729, over 1907754.00 frames. +2024-08-06 13:30:48,196 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 13:30:48,916 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 1.985e+02 2.139e+02 2.326e+02 4.628e+02, threshold=4.279e+02, percent-clipped=0.1 +2024-08-06 13:31:05,665 INFO [trainer.py:765] (1/8) Epoch 30, batch 1200, train_loss[loss=3.348, NarTop10Accuracy=0.6451, over 7234.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.641, over 5954.47 frames. ], batch size: 31, lr: 2.84e-03 +2024-08-06 13:31:43,020 INFO [trainer.py:765] (1/8) Epoch 30, batch 1300, train_loss[loss=3.342, NarTop10Accuracy=0.6526, over 5104.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6431, over 6033.30 frames. ], batch size: 6, lr: 2.84e-03 +2024-08-06 13:32:19,325 INFO [trainer.py:765] (1/8) Epoch 30, batch 1400, train_loss[loss=3.62, NarTop10Accuracy=0.5948, over 6132.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.641, over 6034.86 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 13:32:52,335 INFO [trainer.py:765] (1/8) Epoch 30, batch 1500, train_loss[loss=3.599, NarTop10Accuracy=0.6073, over 6150.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6409, over 5981.39 frames. ], batch size: 48, lr: 2.83e-03 +2024-08-06 13:33:20,408 INFO [trainer.py:765] (1/8) Epoch 30, batch 1600, train_loss[loss=3.472, NarTop10Accuracy=0.6266, over 6972.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6399, over 5955.81 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:33:47,200 INFO [trainer.py:765] (1/8) Epoch 30, batch 1700, train_loss[loss=3.68, NarTop10Accuracy=0.5838, over 6674.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6395, over 5931.46 frames. ], batch size: 14, lr: 2.83e-03 +2024-08-06 13:34:13,887 INFO [trainer.py:765] (1/8) Epoch 30, batch 1800, train_loss[loss=3.562, NarTop10Accuracy=0.598, over 7276.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6417, over 5999.50 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:34:40,548 INFO [trainer.py:765] (1/8) Epoch 30, batch 1900, train_loss[loss=3.69, NarTop10Accuracy=0.5816, over 6319.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.639, over 6032.48 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:35:06,315 INFO [trainer.py:765] (1/8) Epoch 30, batch 2000, train_loss[loss=3.669, NarTop10Accuracy=0.583, over 5821.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6407, over 6008.59 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:35:31,872 INFO [trainer.py:765] (1/8) Epoch 30, batch 2100, train_loss[loss=3.508, NarTop10Accuracy=0.6013, over 3862.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.64, over 5991.95 frames. ], batch size: 4, lr: 2.82e-03 +2024-08-06 13:36:00,553 INFO [trainer.py:765] (1/8) Epoch 30, batch 2200, train_loss[loss=3.433, NarTop10Accuracy=0.6334, over 7211.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6411, over 6024.00 frames. ], batch size: 31, lr: 2.82e-03 +2024-08-06 13:36:26,030 INFO [trainer.py:765] (1/8) Epoch 30, batch 2300, train_loss[loss=3.426, NarTop10Accuracy=0.6232, over 5732.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6404, over 6065.31 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 13:36:50,824 INFO [trainer.py:765] (1/8) Epoch 30, batch 2400, train_loss[loss=3.061, NarTop10Accuracy=0.7064, over 5128.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6383, over 5872.67 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 13:37:14,388 INFO [trainer.py:765] (1/8) Epoch 30, batch 2500, train_loss[loss=3.074, NarTop10Accuracy=0.7039, over 4812.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6428, over 5517.86 frames. ], batch size: 6, lr: 2.82e-03 +2024-08-06 13:37:35,887 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 13:38:28,438 INFO [trainer.py:765] (1/8) Epoch 31, batch 100, train_loss[loss=3.22, NarTop10Accuracy=0.6711, over 7327.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6554, over 2371.51 frames. ], batch size: 30, lr: 2.77e-03 +2024-08-06 13:39:02,651 INFO [trainer.py:765] (1/8) Epoch 31, batch 200, train_loss[loss=3.315, NarTop10Accuracy=0.6599, over 6818.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6546, over 3871.34 frames. ], batch size: 17, lr: 2.76e-03 +2024-08-06 13:39:34,676 INFO [trainer.py:765] (1/8) Epoch 31, batch 300, train_loss[loss=3.101, NarTop10Accuracy=0.6904, over 6853.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6557, over 4672.38 frames. ], batch size: 21, lr: 2.76e-03 +2024-08-06 13:40:07,363 INFO [trainer.py:765] (1/8) Epoch 31, batch 400, train_loss[loss=3.605, NarTop10Accuracy=0.5952, over 5120.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.652, over 5102.97 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 13:40:37,813 INFO [trainer.py:765] (1/8) Epoch 31, batch 500, train_loss[loss=3.19, NarTop10Accuracy=0.6747, over 6072.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6533, over 5378.04 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 13:40:58,298 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 13:41:08,777 INFO [trainer.py:811] (1/8) Epoch 31, validation: loss=3.268, NarTop10Accuracy=0.6673, over 1907754.00 frames. +2024-08-06 13:41:08,778 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 13:41:09,338 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 1.987e+02 2.143e+02 2.328e+02 4.341e+02, threshold=4.287e+02, percent-clipped=0.1 +2024-08-06 13:41:20,863 INFO [trainer.py:765] (1/8) Epoch 31, batch 600, train_loss[loss=3.387, NarTop10Accuracy=0.6357, over 5835.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6528, over 5647.48 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 13:41:54,260 INFO [trainer.py:765] (1/8) Epoch 31, batch 700, train_loss[loss=3.203, NarTop10Accuracy=0.67, over 5008.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6518, over 5726.51 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 13:42:32,158 INFO [trainer.py:765] (1/8) Epoch 31, batch 800, train_loss[loss=3.205, NarTop10Accuracy=0.6767, over 5099.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6505, over 5774.02 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:43:06,274 INFO [trainer.py:765] (1/8) Epoch 31, batch 900, train_loss[loss=3.224, NarTop10Accuracy=0.6601, over 6764.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6541, over 5789.48 frames. ], batch size: 14, lr: 2.75e-03 +2024-08-06 13:43:38,009 INFO [trainer.py:765] (1/8) Epoch 31, batch 1000, train_loss[loss=3.185, NarTop10Accuracy=0.6806, over 6244.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6517, over 5908.98 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 13:44:14,513 INFO [trainer.py:765] (1/8) Epoch 31, batch 1100, train_loss[loss=3.407, NarTop10Accuracy=0.6432, over 6897.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6489, over 5944.52 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 13:44:53,786 INFO [trainer.py:765] (1/8) Epoch 31, batch 1200, train_loss[loss=3.356, NarTop10Accuracy=0.6461, over 7317.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6456, over 5938.94 frames. ], batch size: 30, lr: 2.75e-03 +2024-08-06 13:45:25,076 INFO [trainer.py:765] (1/8) Epoch 31, batch 1300, train_loss[loss=3.243, NarTop10Accuracy=0.6621, over 5040.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6481, over 6015.97 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:45:58,741 INFO [trainer.py:765] (1/8) Epoch 31, batch 1400, train_loss[loss=3.061, NarTop10Accuracy=0.6916, over 6102.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6453, over 6031.31 frames. ], batch size: 11, lr: 2.74e-03 +2024-08-06 13:46:33,490 INFO [trainer.py:765] (1/8) Epoch 31, batch 1500, train_loss[loss=3.542, NarTop10Accuracy=0.6132, over 6011.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6444, over 5986.41 frames. ], batch size: 49, lr: 2.74e-03 +2024-08-06 13:47:04,658 INFO [trainer.py:765] (1/8) Epoch 31, batch 1600, train_loss[loss=3.214, NarTop10Accuracy=0.6782, over 6978.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6446, over 5966.84 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 13:47:31,424 INFO [trainer.py:765] (1/8) Epoch 31, batch 1700, train_loss[loss=3.585, NarTop10Accuracy=0.5887, over 6614.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6449, over 5944.11 frames. ], batch size: 14, lr: 2.74e-03 +2024-08-06 13:47:58,016 INFO [trainer.py:765] (1/8) Epoch 31, batch 1800, train_loss[loss=3.461, NarTop10Accuracy=0.6244, over 7077.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6434, over 5997.31 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 13:48:24,576 INFO [trainer.py:765] (1/8) Epoch 31, batch 1900, train_loss[loss=3.432, NarTop10Accuracy=0.6372, over 5681.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6419, over 6047.62 frames. ], batch size: 48, lr: 2.74e-03 +2024-08-06 13:48:50,258 INFO [trainer.py:765] (1/8) Epoch 31, batch 2000, train_loss[loss=3.685, NarTop10Accuracy=0.5871, over 5972.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6437, over 6025.79 frames. ], batch size: 49, lr: 2.73e-03 +2024-08-06 13:49:15,764 INFO [trainer.py:765] (1/8) Epoch 31, batch 2100, train_loss[loss=3.424, NarTop10Accuracy=0.6367, over 4060.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6433, over 6006.92 frames. ], batch size: 4, lr: 2.73e-03 +2024-08-06 13:49:41,278 INFO [trainer.py:765] (1/8) Epoch 31, batch 2200, train_loss[loss=3.401, NarTop10Accuracy=0.6487, over 7435.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6436, over 6048.24 frames. ], batch size: 31, lr: 2.73e-03 +2024-08-06 13:50:06,708 INFO [trainer.py:765] (1/8) Epoch 31, batch 2300, train_loss[loss=3.356, NarTop10Accuracy=0.6403, over 5869.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6387, over 6066.26 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 13:50:31,393 INFO [trainer.py:765] (1/8) Epoch 31, batch 2400, train_loss[loss=3.373, NarTop10Accuracy=0.6309, over 5024.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6368, over 5872.62 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 13:50:54,892 INFO [trainer.py:765] (1/8) Epoch 31, batch 2500, train_loss[loss=3.339, NarTop10Accuracy=0.6253, over 4982.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6414, over 5533.52 frames. ], batch size: 6, lr: 2.72e-03 +2024-08-06 13:51:08,995 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 13:51:19,070 INFO [trainer.py:811] (1/8) Epoch 31, validation: loss=3.234, NarTop10Accuracy=0.6746, over 1907754.00 frames. +2024-08-06 13:51:19,070 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 13:51:19,540 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.007e+02 2.182e+02 2.368e+02 4.565e+02, threshold=4.363e+02, percent-clipped=0.1 +2024-08-06 13:51:25,934 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 13:52:19,910 INFO [trainer.py:765] (1/8) Epoch 32, batch 100, train_loss[loss=3.238, NarTop10Accuracy=0.6668, over 7097.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6586, over 2373.25 frames. ], batch size: 30, lr: 2.68e-03 +2024-08-06 13:52:52,538 INFO [trainer.py:765] (1/8) Epoch 32, batch 200, train_loss[loss=3.664, NarTop10Accuracy=0.5909, over 6856.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6549, over 3873.88 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 13:53:28,093 INFO [trainer.py:765] (1/8) Epoch 32, batch 300, train_loss[loss=3.221, NarTop10Accuracy=0.6766, over 7193.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6545, over 4668.02 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 13:54:00,887 INFO [trainer.py:765] (1/8) Epoch 32, batch 400, train_loss[loss=3.556, NarTop10Accuracy=0.6092, over 5147.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6533, over 5118.16 frames. ], batch size: 7, lr: 2.67e-03 +2024-08-06 13:54:32,822 INFO [trainer.py:765] (1/8) Epoch 32, batch 500, train_loss[loss=2.873, NarTop10Accuracy=0.7344, over 6099.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6547, over 5405.13 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 13:55:01,773 INFO [trainer.py:765] (1/8) Epoch 32, batch 600, train_loss[loss=3.455, NarTop10Accuracy=0.6234, over 5742.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6563, over 5678.19 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 13:55:41,512 INFO [trainer.py:765] (1/8) Epoch 32, batch 700, train_loss[loss=3.211, NarTop10Accuracy=0.6779, over 5082.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6535, over 5728.50 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:13,172 INFO [trainer.py:765] (1/8) Epoch 32, batch 800, train_loss[loss=2.945, NarTop10Accuracy=0.7265, over 4933.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6524, over 5777.56 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:43,166 INFO [trainer.py:765] (1/8) Epoch 32, batch 900, train_loss[loss=3.432, NarTop10Accuracy=0.6239, over 6302.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.652, over 5811.33 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 13:57:24,521 INFO [trainer.py:765] (1/8) Epoch 32, batch 1000, train_loss[loss=3.583, NarTop10Accuracy=0.5969, over 6134.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6498, over 5911.94 frames. ], batch size: 13, lr: 2.66e-03 +2024-08-06 13:57:57,452 INFO [trainer.py:765] (1/8) Epoch 32, batch 1100, train_loss[loss=3.128, NarTop10Accuracy=0.6927, over 6694.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.647, over 5944.09 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 13:58:30,541 INFO [trainer.py:765] (1/8) Epoch 32, batch 1200, train_loss[loss=3.129, NarTop10Accuracy=0.6963, over 7187.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6468, over 5938.51 frames. ], batch size: 30, lr: 2.66e-03 +2024-08-06 13:59:08,260 INFO [trainer.py:765] (1/8) Epoch 32, batch 1300, train_loss[loss=3.08, NarTop10Accuracy=0.6893, over 5135.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6464, over 6006.60 frames. ], batch size: 6, lr: 2.66e-03 +2024-08-06 13:59:42,266 INFO [trainer.py:765] (1/8) Epoch 32, batch 1400, train_loss[loss=3.407, NarTop10Accuracy=0.638, over 6034.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6471, over 6013.48 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 14:00:12,976 INFO [trainer.py:765] (1/8) Epoch 32, batch 1500, train_loss[loss=3.773, NarTop10Accuracy=0.5569, over 6297.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6468, over 5971.49 frames. ], batch size: 49, lr: 2.66e-03 +2024-08-06 14:00:40,824 INFO [trainer.py:765] (1/8) Epoch 32, batch 1600, train_loss[loss=3.178, NarTop10Accuracy=0.6822, over 7250.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6464, over 5943.52 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:01:07,534 INFO [trainer.py:765] (1/8) Epoch 32, batch 1700, train_loss[loss=3.368, NarTop10Accuracy=0.6426, over 6692.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6443, over 5941.33 frames. ], batch size: 14, lr: 2.65e-03 +2024-08-06 14:01:34,089 INFO [trainer.py:765] (1/8) Epoch 32, batch 1800, train_loss[loss=3.175, NarTop10Accuracy=0.6853, over 7136.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6437, over 6002.78 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:02:00,636 INFO [trainer.py:765] (1/8) Epoch 32, batch 1900, train_loss[loss=3.472, NarTop10Accuracy=0.6249, over 6571.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.642, over 6021.80 frames. ], batch size: 52, lr: 2.65e-03 +2024-08-06 14:02:20,591 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 14:02:30,653 INFO [trainer.py:811] (1/8) Epoch 32, validation: loss=3.204, NarTop10Accuracy=0.6812, over 1907754.00 frames. +2024-08-06 14:02:30,653 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 14:02:31,152 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.032e+02 2.200e+02 2.392e+02 6.182e+02, threshold=4.401e+02, percent-clipped=0.1 +2024-08-06 14:02:36,384 INFO [trainer.py:765] (1/8) Epoch 32, batch 2000, train_loss[loss=3.525, NarTop10Accuracy=0.6103, over 6206.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6424, over 6004.76 frames. ], batch size: 48, lr: 2.65e-03 +2024-08-06 14:03:01,698 INFO [trainer.py:765] (1/8) Epoch 32, batch 2100, train_loss[loss=3.415, NarTop10Accuracy=0.635, over 3919.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6419, over 5991.67 frames. ], batch size: 4, lr: 2.65e-03 +2024-08-06 14:03:27,177 INFO [trainer.py:765] (1/8) Epoch 32, batch 2200, train_loss[loss=3.493, NarTop10Accuracy=0.6157, over 7111.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6417, over 6032.43 frames. ], batch size: 31, lr: 2.64e-03 +2024-08-06 14:03:52,586 INFO [trainer.py:765] (1/8) Epoch 32, batch 2300, train_loss[loss=3.378, NarTop10Accuracy=0.6346, over 5817.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6414, over 6053.92 frames. ], batch size: 9, lr: 2.64e-03 +2024-08-06 14:04:17,274 INFO [trainer.py:765] (1/8) Epoch 32, batch 2400, train_loss[loss=3.48, NarTop10Accuracy=0.6293, over 6374.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6389, over 5892.42 frames. ], batch size: 49, lr: 2.64e-03 +2024-08-06 14:04:40,635 INFO [trainer.py:765] (1/8) Epoch 32, batch 2500, train_loss[loss=2.934, NarTop10Accuracy=0.722, over 5048.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6466, over 5532.52 frames. ], batch size: 6, lr: 2.64e-03 +2024-08-06 14:05:02,275 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 14:06:02,906 INFO [trainer.py:765] (1/8) Epoch 33, batch 100, train_loss[loss=3.526, NarTop10Accuracy=0.6119, over 7370.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6555, over 2371.68 frames. ], batch size: 30, lr: 2.60e-03 +2024-08-06 14:06:36,079 INFO [trainer.py:765] (1/8) Epoch 33, batch 200, train_loss[loss=3.356, NarTop10Accuracy=0.6468, over 6836.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6563, over 3868.08 frames. ], batch size: 17, lr: 2.59e-03 +2024-08-06 14:07:12,146 INFO [trainer.py:765] (1/8) Epoch 33, batch 300, train_loss[loss=3.243, NarTop10Accuracy=0.6791, over 7174.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6568, over 4679.88 frames. ], batch size: 22, lr: 2.59e-03 +2024-08-06 14:07:48,256 INFO [trainer.py:765] (1/8) Epoch 33, batch 400, train_loss[loss=3.523, NarTop10Accuracy=0.6133, over 5224.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6562, over 5136.38 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 14:08:18,547 INFO [trainer.py:765] (1/8) Epoch 33, batch 500, train_loss[loss=3.124, NarTop10Accuracy=0.6994, over 6112.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6542, over 5409.92 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 14:08:49,792 INFO [trainer.py:765] (1/8) Epoch 33, batch 600, train_loss[loss=3.308, NarTop10Accuracy=0.6668, over 6020.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6517, over 5683.35 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 14:09:32,925 INFO [trainer.py:765] (1/8) Epoch 33, batch 700, train_loss[loss=3.204, NarTop10Accuracy=0.6699, over 5146.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6525, over 5758.46 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 14:10:04,596 INFO [trainer.py:765] (1/8) Epoch 33, batch 800, train_loss[loss=3.117, NarTop10Accuracy=0.6998, over 5088.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6485, over 5789.85 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 14:10:35,386 INFO [trainer.py:765] (1/8) Epoch 33, batch 900, train_loss[loss=3.146, NarTop10Accuracy=0.6887, over 6232.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6488, over 5826.25 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 14:11:15,068 INFO [trainer.py:765] (1/8) Epoch 33, batch 1000, train_loss[loss=3.161, NarTop10Accuracy=0.6775, over 6282.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6481, over 5936.48 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 14:11:47,302 INFO [trainer.py:765] (1/8) Epoch 33, batch 1100, train_loss[loss=3.536, NarTop10Accuracy=0.6063, over 6844.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6462, over 5961.58 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 14:12:20,928 INFO [trainer.py:765] (1/8) Epoch 33, batch 1200, train_loss[loss=3.535, NarTop10Accuracy=0.6102, over 6957.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6462, over 5952.83 frames. ], batch size: 30, lr: 2.58e-03 +2024-08-06 14:12:57,629 INFO [trainer.py:765] (1/8) Epoch 33, batch 1300, train_loss[loss=3.69, NarTop10Accuracy=0.5848, over 4977.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6485, over 6020.00 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 14:13:30,666 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 14:13:41,686 INFO [trainer.py:811] (1/8) Epoch 33, validation: loss=3.242, NarTop10Accuracy=0.6732, over 1907754.00 frames. +2024-08-06 14:13:41,687 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 14:13:42,264 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.031e+02 2.174e+02 2.363e+02 4.871e+02, threshold=4.347e+02, percent-clipped=0.1 +2024-08-06 14:13:42,802 INFO [trainer.py:765] (1/8) Epoch 33, batch 1400, train_loss[loss=3.177, NarTop10Accuracy=0.6793, over 6140.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6504, over 6049.21 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 14:14:11,245 INFO [trainer.py:765] (1/8) Epoch 33, batch 1500, train_loss[loss=3.4, NarTop10Accuracy=0.6426, over 6434.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6472, over 5983.48 frames. ], batch size: 48, lr: 2.57e-03 +2024-08-06 14:14:39,191 INFO [trainer.py:765] (1/8) Epoch 33, batch 1600, train_loss[loss=3.406, NarTop10Accuracy=0.6417, over 7082.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6447, over 5953.98 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:05,857 INFO [trainer.py:765] (1/8) Epoch 33, batch 1700, train_loss[loss=3.589, NarTop10Accuracy=0.606, over 6121.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6451, over 5942.71 frames. ], batch size: 13, lr: 2.57e-03 +2024-08-06 14:15:32,589 INFO [trainer.py:765] (1/8) Epoch 33, batch 1800, train_loss[loss=3.406, NarTop10Accuracy=0.6398, over 7181.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6468, over 6016.29 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:59,214 INFO [trainer.py:765] (1/8) Epoch 33, batch 1900, train_loss[loss=3.362, NarTop10Accuracy=0.6476, over 6341.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6459, over 6050.68 frames. ], batch size: 49, lr: 2.57e-03 +2024-08-06 14:16:24,894 INFO [trainer.py:765] (1/8) Epoch 33, batch 2000, train_loss[loss=3.543, NarTop10Accuracy=0.6062, over 5424.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6468, over 6021.92 frames. ], batch size: 48, lr: 2.57e-03 +2024-08-06 14:16:50,350 INFO [trainer.py:765] (1/8) Epoch 33, batch 2100, train_loss[loss=3.459, NarTop10Accuracy=0.6206, over 4910.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6445, over 6001.08 frames. ], batch size: 5, lr: 2.56e-03 +2024-08-06 14:17:15,825 INFO [trainer.py:765] (1/8) Epoch 33, batch 2200, train_loss[loss=3.42, NarTop10Accuracy=0.642, over 7212.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.646, over 6025.88 frames. ], batch size: 30, lr: 2.56e-03 +2024-08-06 14:17:41,309 INFO [trainer.py:765] (1/8) Epoch 33, batch 2300, train_loss[loss=3.27, NarTop10Accuracy=0.6634, over 5762.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6434, over 6047.74 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 14:18:10,143 INFO [trainer.py:765] (1/8) Epoch 33, batch 2400, train_loss[loss=3.87, NarTop10Accuracy=0.5433, over 5082.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6404, over 5853.97 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 14:18:33,706 INFO [trainer.py:765] (1/8) Epoch 33, batch 2500, train_loss[loss=3.283, NarTop10Accuracy=0.6408, over 5055.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6456, over 5527.58 frames. ], batch size: 6, lr: 2.56e-03 +2024-08-06 14:18:54,729 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 14:19:51,932 INFO [trainer.py:765] (1/8) Epoch 34, batch 100, train_loss[loss=3.192, NarTop10Accuracy=0.6875, over 7301.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6568, over 2386.91 frames. ], batch size: 31, lr: 2.52e-03 +2024-08-06 14:20:24,372 INFO [trainer.py:765] (1/8) Epoch 34, batch 200, train_loss[loss=3.418, NarTop10Accuracy=0.6307, over 6771.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6607, over 3884.20 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 14:21:00,841 INFO [trainer.py:765] (1/8) Epoch 34, batch 300, train_loss[loss=3.471, NarTop10Accuracy=0.6299, over 7153.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6588, over 4670.16 frames. ], batch size: 22, lr: 2.51e-03 +2024-08-06 14:21:31,449 INFO [trainer.py:765] (1/8) Epoch 34, batch 400, train_loss[loss=3.257, NarTop10Accuracy=0.6683, over 5154.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6588, over 5123.97 frames. ], batch size: 7, lr: 2.51e-03 +2024-08-06 14:22:01,875 INFO [trainer.py:765] (1/8) Epoch 34, batch 500, train_loss[loss=3.163, NarTop10Accuracy=0.6794, over 6087.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6565, over 5401.38 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 14:22:36,826 INFO [trainer.py:765] (1/8) Epoch 34, batch 600, train_loss[loss=3.387, NarTop10Accuracy=0.6376, over 5703.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6562, over 5682.04 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 14:23:14,605 INFO [trainer.py:765] (1/8) Epoch 34, batch 700, train_loss[loss=3.085, NarTop10Accuracy=0.6988, over 5112.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6538, over 5746.31 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:46,606 INFO [trainer.py:765] (1/8) Epoch 34, batch 800, train_loss[loss=3.263, NarTop10Accuracy=0.6595, over 4220.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.652, over 5799.84 frames. ], batch size: 5, lr: 2.51e-03 +2024-08-06 14:23:50,718 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 14:24:00,855 INFO [trainer.py:811] (1/8) Epoch 34, validation: loss=3.226, NarTop10Accuracy=0.6758, over 1907754.00 frames. +2024-08-06 14:24:00,856 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 14:24:01,413 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.033e+02 2.200e+02 2.391e+02 5.918e+02, threshold=4.399e+02, percent-clipped=0.1 +2024-08-06 14:24:28,899 INFO [trainer.py:765] (1/8) Epoch 34, batch 900, train_loss[loss=3.163, NarTop10Accuracy=0.6836, over 6297.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6522, over 5828.03 frames. ], batch size: 13, lr: 2.51e-03 +2024-08-06 14:25:05,287 INFO [trainer.py:765] (1/8) Epoch 34, batch 1000, train_loss[loss=3.316, NarTop10Accuracy=0.6508, over 6329.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6497, over 5921.40 frames. ], batch size: 13, lr: 2.50e-03 +2024-08-06 14:25:37,996 INFO [trainer.py:765] (1/8) Epoch 34, batch 1100, train_loss[loss=3.453, NarTop10Accuracy=0.6302, over 6826.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6475, over 5966.55 frames. ], batch size: 17, lr: 2.50e-03 +2024-08-06 14:26:13,975 INFO [trainer.py:765] (1/8) Epoch 34, batch 1200, train_loss[loss=3.587, NarTop10Accuracy=0.5984, over 7113.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6487, over 5949.71 frames. ], batch size: 31, lr: 2.50e-03 +2024-08-06 14:26:52,652 INFO [trainer.py:765] (1/8) Epoch 34, batch 1300, train_loss[loss=3.509, NarTop10Accuracy=0.6092, over 5178.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6487, over 6022.10 frames. ], batch size: 6, lr: 2.50e-03 +2024-08-06 14:27:24,383 INFO [trainer.py:765] (1/8) Epoch 34, batch 1400, train_loss[loss=3.045, NarTop10Accuracy=0.7052, over 6208.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6485, over 6042.95 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 14:27:52,726 INFO [trainer.py:765] (1/8) Epoch 34, batch 1500, train_loss[loss=3.749, NarTop10Accuracy=0.5733, over 6113.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6504, over 5965.83 frames. ], batch size: 48, lr: 2.50e-03 +2024-08-06 14:28:20,672 INFO [trainer.py:765] (1/8) Epoch 34, batch 1600, train_loss[loss=3.291, NarTop10Accuracy=0.6685, over 7042.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6483, over 5969.40 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 14:28:47,384 INFO [trainer.py:765] (1/8) Epoch 34, batch 1700, train_loss[loss=3.51, NarTop10Accuracy=0.6165, over 6687.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6454, over 5950.42 frames. ], batch size: 14, lr: 2.49e-03 +2024-08-06 14:29:14,010 INFO [trainer.py:765] (1/8) Epoch 34, batch 1800, train_loss[loss=3.54, NarTop10Accuracy=0.6109, over 7134.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6469, over 6009.48 frames. ], batch size: 22, lr: 2.49e-03 +2024-08-06 14:29:43,752 INFO [trainer.py:765] (1/8) Epoch 34, batch 1900, train_loss[loss=3.678, NarTop10Accuracy=0.5816, over 5922.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6441, over 6055.05 frames. ], batch size: 48, lr: 2.49e-03 +2024-08-06 14:30:09,515 INFO [trainer.py:765] (1/8) Epoch 34, batch 2000, train_loss[loss=3.495, NarTop10Accuracy=0.6238, over 6397.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6458, over 6039.46 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 14:30:35,016 INFO [trainer.py:765] (1/8) Epoch 34, batch 2100, train_loss[loss=3.162, NarTop10Accuracy=0.6804, over 4032.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6471, over 6011.82 frames. ], batch size: 4, lr: 2.49e-03 +2024-08-06 14:31:00,511 INFO [trainer.py:765] (1/8) Epoch 34, batch 2200, train_loss[loss=3.426, NarTop10Accuracy=0.6413, over 7116.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6451, over 6043.34 frames. ], batch size: 30, lr: 2.49e-03 +2024-08-06 14:31:25,979 INFO [trainer.py:765] (1/8) Epoch 34, batch 2300, train_loss[loss=3.066, NarTop10Accuracy=0.7098, over 5725.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6444, over 6068.78 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 14:31:50,751 INFO [trainer.py:765] (1/8) Epoch 34, batch 2400, train_loss[loss=3.714, NarTop10Accuracy=0.586, over 5203.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6431, over 5899.17 frames. ], batch size: 7, lr: 2.48e-03 +2024-08-06 14:32:14,249 INFO [trainer.py:765] (1/8) Epoch 34, batch 2500, train_loss[loss=3.327, NarTop10Accuracy=0.6556, over 5078.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6466, over 5551.93 frames. ], batch size: 6, lr: 2.48e-03 +2024-08-06 14:32:35,332 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 14:33:26,337 INFO [trainer.py:765] (1/8) Epoch 35, batch 100, train_loss[loss=3.292, NarTop10Accuracy=0.6661, over 7611.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6605, over 2396.00 frames. ], batch size: 31, lr: 2.44e-03 +2024-08-06 14:34:03,581 INFO [trainer.py:765] (1/8) Epoch 35, batch 200, train_loss[loss=3.513, NarTop10Accuracy=0.6113, over 6818.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.6588, over 3897.24 frames. ], batch size: 17, lr: 2.44e-03 +2024-08-06 14:34:13,185 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 14:34:23,574 INFO [trainer.py:811] (1/8) Epoch 35, validation: loss=3.163, NarTop10Accuracy=0.689, over 1907754.00 frames. +2024-08-06 14:34:23,575 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 14:34:24,110 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.042e+02 2.203e+02 2.360e+02 4.181e+02, threshold=4.406e+02, percent-clipped=0.0 +2024-08-06 14:34:44,665 INFO [trainer.py:765] (1/8) Epoch 35, batch 300, train_loss[loss=3.599, NarTop10Accuracy=0.5993, over 7143.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6575, over 4691.85 frames. ], batch size: 22, lr: 2.44e-03 +2024-08-06 14:35:13,543 INFO [trainer.py:765] (1/8) Epoch 35, batch 400, train_loss[loss=3.303, NarTop10Accuracy=0.659, over 5156.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6569, over 5132.49 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 14:35:48,188 INFO [trainer.py:765] (1/8) Epoch 35, batch 500, train_loss[loss=3.426, NarTop10Accuracy=0.6362, over 6175.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6554, over 5407.47 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 14:36:22,747 INFO [trainer.py:765] (1/8) Epoch 35, batch 600, train_loss[loss=3.151, NarTop10Accuracy=0.6903, over 5777.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6565, over 5678.22 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 14:36:57,827 INFO [trainer.py:765] (1/8) Epoch 35, batch 700, train_loss[loss=3.26, NarTop10Accuracy=0.655, over 5089.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6562, over 5743.83 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 14:37:29,769 INFO [trainer.py:765] (1/8) Epoch 35, batch 800, train_loss[loss=3.325, NarTop10Accuracy=0.6573, over 4989.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6544, over 5807.21 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:38:03,304 INFO [trainer.py:765] (1/8) Epoch 35, batch 900, train_loss[loss=2.903, NarTop10Accuracy=0.736, over 6264.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6545, over 5822.59 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 14:38:43,709 INFO [trainer.py:765] (1/8) Epoch 35, batch 1000, train_loss[loss=3.251, NarTop10Accuracy=0.6663, over 6274.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6544, over 5920.98 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 14:39:16,568 INFO [trainer.py:765] (1/8) Epoch 35, batch 1100, train_loss[loss=3.51, NarTop10Accuracy=0.6223, over 7006.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6535, over 5962.70 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 14:39:50,838 INFO [trainer.py:765] (1/8) Epoch 35, batch 1200, train_loss[loss=3.397, NarTop10Accuracy=0.644, over 7516.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6512, over 5956.23 frames. ], batch size: 32, lr: 2.43e-03 +2024-08-06 14:40:33,953 INFO [trainer.py:765] (1/8) Epoch 35, batch 1300, train_loss[loss=3.045, NarTop10Accuracy=0.7102, over 4966.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.652, over 6025.80 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:41:03,184 INFO [trainer.py:765] (1/8) Epoch 35, batch 1400, train_loss[loss=3.374, NarTop10Accuracy=0.6383, over 5952.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6481, over 6024.58 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 14:41:33,824 INFO [trainer.py:765] (1/8) Epoch 35, batch 1500, train_loss[loss=3.386, NarTop10Accuracy=0.6394, over 6112.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6471, over 5966.34 frames. ], batch size: 48, lr: 2.43e-03 +2024-08-06 14:42:01,777 INFO [trainer.py:765] (1/8) Epoch 35, batch 1600, train_loss[loss=3.679, NarTop10Accuracy=0.577, over 7387.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6485, over 5952.44 frames. ], batch size: 23, lr: 2.42e-03 +2024-08-06 14:42:28,467 INFO [trainer.py:765] (1/8) Epoch 35, batch 1700, train_loss[loss=3.405, NarTop10Accuracy=0.6354, over 6260.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6479, over 5938.85 frames. ], batch size: 13, lr: 2.42e-03 +2024-08-06 14:42:55,040 INFO [trainer.py:765] (1/8) Epoch 35, batch 1800, train_loss[loss=3.262, NarTop10Accuracy=0.6713, over 7061.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6464, over 5997.17 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 14:43:21,646 INFO [trainer.py:765] (1/8) Epoch 35, batch 1900, train_loss[loss=3.39, NarTop10Accuracy=0.6411, over 6181.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6455, over 6027.10 frames. ], batch size: 50, lr: 2.42e-03 +2024-08-06 14:43:47,367 INFO [trainer.py:765] (1/8) Epoch 35, batch 2000, train_loss[loss=3.517, NarTop10Accuracy=0.6194, over 6646.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6461, over 6005.25 frames. ], batch size: 49, lr: 2.42e-03 +2024-08-06 14:44:12,857 INFO [trainer.py:765] (1/8) Epoch 35, batch 2100, train_loss[loss=3.13, NarTop10Accuracy=0.6835, over 4901.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6466, over 5994.90 frames. ], batch size: 5, lr: 2.42e-03 +2024-08-06 14:44:38,389 INFO [trainer.py:765] (1/8) Epoch 35, batch 2200, train_loss[loss=3.622, NarTop10Accuracy=0.5904, over 7391.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6437, over 6039.16 frames. ], batch size: 30, lr: 2.42e-03 +2024-08-06 14:44:47,200 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 14:44:57,441 INFO [trainer.py:811] (1/8) Epoch 35, validation: loss=3.219, NarTop10Accuracy=0.6773, over 1907754.00 frames. +2024-08-06 14:44:57,441 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 14:44:57,973 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.083e+02 2.237e+02 2.412e+02 3.944e+02, threshold=4.474e+02, percent-clipped=0.0 +2024-08-06 14:45:14,100 INFO [trainer.py:765] (1/8) Epoch 35, batch 2300, train_loss[loss=3.44, NarTop10Accuracy=0.6362, over 5692.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.644, over 6073.75 frames. ], batch size: 9, lr: 2.41e-03 +2024-08-06 14:45:38,820 INFO [trainer.py:765] (1/8) Epoch 35, batch 2400, train_loss[loss=3.453, NarTop10Accuracy=0.6315, over 6255.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6423, over 5885.73 frames. ], batch size: 49, lr: 2.41e-03 +2024-08-06 14:46:02,146 INFO [trainer.py:765] (1/8) Epoch 35, batch 2500, train_loss[loss=3.557, NarTop10Accuracy=0.6246, over 5159.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6488, over 5549.96 frames. ], batch size: 6, lr: 2.41e-03 +2024-08-06 14:46:23,256 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 14:47:25,441 INFO [trainer.py:765] (1/8) Epoch 36, batch 100, train_loss[loss=3.201, NarTop10Accuracy=0.6794, over 7448.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6571, over 2362.15 frames. ], batch size: 30, lr: 2.38e-03 +2024-08-06 14:47:58,358 INFO [trainer.py:765] (1/8) Epoch 36, batch 200, train_loss[loss=3.227, NarTop10Accuracy=0.6749, over 6836.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6615, over 3863.30 frames. ], batch size: 17, lr: 2.37e-03 +2024-08-06 14:48:30,724 INFO [trainer.py:765] (1/8) Epoch 36, batch 300, train_loss[loss=3.246, NarTop10Accuracy=0.6728, over 7043.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6621, over 4671.39 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 14:49:04,814 INFO [trainer.py:765] (1/8) Epoch 36, batch 400, train_loss[loss=3.322, NarTop10Accuracy=0.6651, over 5075.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6613, over 5137.36 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 14:49:36,588 INFO [trainer.py:765] (1/8) Epoch 36, batch 500, train_loss[loss=3.507, NarTop10Accuracy=0.6177, over 6160.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6616, over 5402.38 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 14:50:09,654 INFO [trainer.py:765] (1/8) Epoch 36, batch 600, train_loss[loss=3.357, NarTop10Accuracy=0.6506, over 5591.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6587, over 5666.76 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 14:50:46,513 INFO [trainer.py:765] (1/8) Epoch 36, batch 700, train_loss[loss=3.189, NarTop10Accuracy=0.6839, over 5047.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6557, over 5746.99 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:23,703 INFO [trainer.py:765] (1/8) Epoch 36, batch 800, train_loss[loss=3.678, NarTop10Accuracy=0.5799, over 5154.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6554, over 5799.87 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:54,346 INFO [trainer.py:765] (1/8) Epoch 36, batch 900, train_loss[loss=3.132, NarTop10Accuracy=0.6934, over 6353.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6553, over 5808.67 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 14:52:30,324 INFO [trainer.py:765] (1/8) Epoch 36, batch 1000, train_loss[loss=3.292, NarTop10Accuracy=0.6621, over 6758.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6534, over 5921.35 frames. ], batch size: 14, lr: 2.36e-03 +2024-08-06 14:53:06,863 INFO [trainer.py:765] (1/8) Epoch 36, batch 1100, train_loss[loss=3.254, NarTop10Accuracy=0.6749, over 6830.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6499, over 5959.89 frames. ], batch size: 17, lr: 2.36e-03 +2024-08-06 14:53:40,248 INFO [trainer.py:765] (1/8) Epoch 36, batch 1200, train_loss[loss=3.498, NarTop10Accuracy=0.6148, over 7203.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.653, over 5966.55 frames. ], batch size: 30, lr: 2.36e-03 +2024-08-06 14:54:15,855 INFO [trainer.py:765] (1/8) Epoch 36, batch 1300, train_loss[loss=3.307, NarTop10Accuracy=0.655, over 5005.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6507, over 6033.47 frames. ], batch size: 6, lr: 2.36e-03 +2024-08-06 14:54:51,541 INFO [trainer.py:765] (1/8) Epoch 36, batch 1400, train_loss[loss=3.166, NarTop10Accuracy=0.6848, over 6136.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6513, over 6041.82 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 14:55:21,802 INFO [trainer.py:765] (1/8) Epoch 36, batch 1500, train_loss[loss=3.545, NarTop10Accuracy=0.6025, over 6223.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6502, over 5978.01 frames. ], batch size: 51, lr: 2.36e-03 +2024-08-06 14:55:49,902 INFO [trainer.py:765] (1/8) Epoch 36, batch 1600, train_loss[loss=3.171, NarTop10Accuracy=0.6762, over 7215.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.65, over 5961.82 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 14:56:04,132 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 14:56:14,600 INFO [trainer.py:811] (1/8) Epoch 36, validation: loss=3.22, NarTop10Accuracy=0.6784, over 1907754.00 frames. +2024-08-06 14:56:14,601 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 14:56:15,103 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.063e+02 2.224e+02 2.398e+02 5.290e+02, threshold=4.447e+02, percent-clipped=0.1 +2024-08-06 14:56:27,177 INFO [trainer.py:765] (1/8) Epoch 36, batch 1700, train_loss[loss=2.997, NarTop10Accuracy=0.7186, over 6646.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6509, over 5939.43 frames. ], batch size: 14, lr: 2.35e-03 +2024-08-06 14:56:53,759 INFO [trainer.py:765] (1/8) Epoch 36, batch 1800, train_loss[loss=3.457, NarTop10Accuracy=0.6322, over 7121.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6499, over 6008.96 frames. ], batch size: 22, lr: 2.35e-03 +2024-08-06 14:57:20,335 INFO [trainer.py:765] (1/8) Epoch 36, batch 1900, train_loss[loss=3.499, NarTop10Accuracy=0.6165, over 5359.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6497, over 6043.38 frames. ], batch size: 49, lr: 2.35e-03 +2024-08-06 14:57:46,057 INFO [trainer.py:765] (1/8) Epoch 36, batch 2000, train_loss[loss=3.769, NarTop10Accuracy=0.5624, over 5941.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6464, over 6009.34 frames. ], batch size: 49, lr: 2.35e-03 +2024-08-06 14:58:11,404 INFO [trainer.py:765] (1/8) Epoch 36, batch 2100, train_loss[loss=3.372, NarTop10Accuracy=0.6479, over 3939.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6458, over 6002.15 frames. ], batch size: 4, lr: 2.35e-03 +2024-08-06 14:58:36,832 INFO [trainer.py:765] (1/8) Epoch 36, batch 2200, train_loss[loss=3.653, NarTop10Accuracy=0.5835, over 7029.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6443, over 6035.28 frames. ], batch size: 30, lr: 2.35e-03 +2024-08-06 14:59:02,344 INFO [trainer.py:765] (1/8) Epoch 36, batch 2300, train_loss[loss=3.369, NarTop10Accuracy=0.6387, over 5692.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6422, over 6064.35 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 14:59:27,094 INFO [trainer.py:765] (1/8) Epoch 36, batch 2400, train_loss[loss=3.467, NarTop10Accuracy=0.6189, over 5125.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.642, over 5891.61 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 14:59:50,503 INFO [trainer.py:765] (1/8) Epoch 36, batch 2500, train_loss[loss=3.558, NarTop10Accuracy=0.602, over 5109.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6468, over 5544.90 frames. ], batch size: 6, lr: 2.34e-03 +2024-08-06 15:00:11,845 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 15:01:14,218 INFO [trainer.py:765] (1/8) Epoch 37, batch 100, train_loss[loss=3.316, NarTop10Accuracy=0.6594, over 7573.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6622, over 2367.16 frames. ], batch size: 31, lr: 2.31e-03 +2024-08-06 15:01:44,097 INFO [trainer.py:765] (1/8) Epoch 37, batch 200, train_loss[loss=3.287, NarTop10Accuracy=0.6634, over 6729.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6632, over 3863.04 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 15:02:17,382 INFO [trainer.py:765] (1/8) Epoch 37, batch 300, train_loss[loss=3.206, NarTop10Accuracy=0.6773, over 7121.00 frames. ], tot_loss[loss=3.279, NarTop10Accuracy=0.664, over 4671.18 frames. ], batch size: 22, lr: 2.31e-03 +2024-08-06 15:02:48,346 INFO [trainer.py:765] (1/8) Epoch 37, batch 400, train_loss[loss=3.277, NarTop10Accuracy=0.6647, over 5073.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6622, over 5125.42 frames. ], batch size: 7, lr: 2.31e-03 +2024-08-06 15:03:26,570 INFO [trainer.py:765] (1/8) Epoch 37, batch 500, train_loss[loss=3.277, NarTop10Accuracy=0.6654, over 6158.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6606, over 5406.50 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 15:03:58,032 INFO [trainer.py:765] (1/8) Epoch 37, batch 600, train_loss[loss=3.438, NarTop10Accuracy=0.6335, over 5736.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6578, over 5685.63 frames. ], batch size: 9, lr: 2.30e-03 +2024-08-06 15:04:30,247 INFO [trainer.py:765] (1/8) Epoch 37, batch 700, train_loss[loss=3.088, NarTop10Accuracy=0.7039, over 5095.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.655, over 5760.33 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:12,163 INFO [trainer.py:765] (1/8) Epoch 37, batch 800, train_loss[loss=3.503, NarTop10Accuracy=0.6274, over 5081.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6528, over 5805.30 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:40,606 INFO [trainer.py:765] (1/8) Epoch 37, batch 900, train_loss[loss=3.357, NarTop10Accuracy=0.6558, over 6334.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.654, over 5812.42 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 15:06:15,608 INFO [trainer.py:765] (1/8) Epoch 37, batch 1000, train_loss[loss=3.23, NarTop10Accuracy=0.6839, over 6264.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6514, over 5914.65 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 15:06:42,491 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 15:06:53,169 INFO [trainer.py:811] (1/8) Epoch 37, validation: loss=3.234, NarTop10Accuracy=0.6744, over 1907754.00 frames. +2024-08-06 15:06:53,169 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 15:06:53,809 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.068e+02 2.238e+02 2.409e+02 6.392e+02, threshold=4.475e+02, percent-clipped=0.1 +2024-08-06 15:07:01,306 INFO [trainer.py:765] (1/8) Epoch 37, batch 1100, train_loss[loss=3.476, NarTop10Accuracy=0.6234, over 6994.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6518, over 5936.65 frames. ], batch size: 17, lr: 2.30e-03 +2024-08-06 15:07:32,718 INFO [trainer.py:765] (1/8) Epoch 37, batch 1200, train_loss[loss=3.322, NarTop10Accuracy=0.6505, over 7184.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6527, over 5945.77 frames. ], batch size: 31, lr: 2.30e-03 +2024-08-06 15:08:04,777 INFO [trainer.py:765] (1/8) Epoch 37, batch 1300, train_loss[loss=3.308, NarTop10Accuracy=0.6682, over 5000.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6506, over 6015.00 frames. ], batch size: 6, lr: 2.29e-03 +2024-08-06 15:08:47,879 INFO [trainer.py:765] (1/8) Epoch 37, batch 1400, train_loss[loss=3.189, NarTop10Accuracy=0.6817, over 6144.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.649, over 6024.28 frames. ], batch size: 11, lr: 2.29e-03 +2024-08-06 15:09:16,180 INFO [trainer.py:765] (1/8) Epoch 37, batch 1500, train_loss[loss=3.691, NarTop10Accuracy=0.5892, over 6139.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6496, over 5971.53 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:09:44,190 INFO [trainer.py:765] (1/8) Epoch 37, batch 1600, train_loss[loss=3.399, NarTop10Accuracy=0.6349, over 7090.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6485, over 5952.76 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:10:11,082 INFO [trainer.py:765] (1/8) Epoch 37, batch 1700, train_loss[loss=3.431, NarTop10Accuracy=0.6361, over 6350.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6497, over 5946.30 frames. ], batch size: 13, lr: 2.29e-03 +2024-08-06 15:10:37,752 INFO [trainer.py:765] (1/8) Epoch 37, batch 1800, train_loss[loss=3.325, NarTop10Accuracy=0.6566, over 7247.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6513, over 6008.13 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:11:04,270 INFO [trainer.py:765] (1/8) Epoch 37, batch 1900, train_loss[loss=3.655, NarTop10Accuracy=0.5855, over 6546.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6491, over 6044.17 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:11:29,941 INFO [trainer.py:765] (1/8) Epoch 37, batch 2000, train_loss[loss=3.582, NarTop10Accuracy=0.5958, over 6548.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6468, over 6024.99 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:11:58,797 INFO [trainer.py:765] (1/8) Epoch 37, batch 2100, train_loss[loss=3.157, NarTop10Accuracy=0.6828, over 3841.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6477, over 6000.82 frames. ], batch size: 4, lr: 2.29e-03 +2024-08-06 15:12:24,312 INFO [trainer.py:765] (1/8) Epoch 37, batch 2200, train_loss[loss=3.248, NarTop10Accuracy=0.6624, over 7143.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6491, over 6052.86 frames. ], batch size: 30, lr: 2.28e-03 +2024-08-06 15:12:49,787 INFO [trainer.py:765] (1/8) Epoch 37, batch 2300, train_loss[loss=3.138, NarTop10Accuracy=0.6913, over 5757.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6495, over 6072.49 frames. ], batch size: 9, lr: 2.28e-03 +2024-08-06 15:13:14,526 INFO [trainer.py:765] (1/8) Epoch 37, batch 2400, train_loss[loss=3.318, NarTop10Accuracy=0.6586, over 5064.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6476, over 5876.88 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 15:13:37,942 INFO [trainer.py:765] (1/8) Epoch 37, batch 2500, train_loss[loss=3.452, NarTop10Accuracy=0.6257, over 5229.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6512, over 5516.36 frames. ], batch size: 6, lr: 2.28e-03 +2024-08-06 15:13:59,183 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 15:14:50,846 INFO [trainer.py:765] (1/8) Epoch 38, batch 100, train_loss[loss=3.403, NarTop10Accuracy=0.6296, over 7201.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6559, over 2378.73 frames. ], batch size: 30, lr: 2.25e-03 +2024-08-06 15:15:27,289 INFO [trainer.py:765] (1/8) Epoch 38, batch 200, train_loss[loss=3.138, NarTop10Accuracy=0.6907, over 6783.00 frames. ], tot_loss[loss=3.282, NarTop10Accuracy=0.6624, over 3865.44 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 15:16:01,281 INFO [trainer.py:765] (1/8) Epoch 38, batch 300, train_loss[loss=3.281, NarTop10Accuracy=0.654, over 6972.00 frames. ], tot_loss[loss=3.276, NarTop10Accuracy=0.6642, over 4680.42 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 15:16:32,595 INFO [trainer.py:765] (1/8) Epoch 38, batch 400, train_loss[loss=3.444, NarTop10Accuracy=0.6271, over 5209.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6625, over 5121.01 frames. ], batch size: 7, lr: 2.24e-03 +2024-08-06 15:17:04,258 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 15:17:14,104 INFO [trainer.py:811] (1/8) Epoch 38, validation: loss=3.229, NarTop10Accuracy=0.6755, over 1907754.00 frames. +2024-08-06 15:17:14,105 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 15:17:14,630 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.062e+02 2.214e+02 2.396e+02 3.845e+02, threshold=4.429e+02, percent-clipped=0.0 +2024-08-06 15:17:16,480 INFO [trainer.py:765] (1/8) Epoch 38, batch 500, train_loss[loss=3.297, NarTop10Accuracy=0.6548, over 6281.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6634, over 5409.71 frames. ], batch size: 11, lr: 2.24e-03 +2024-08-06 15:17:53,875 INFO [trainer.py:765] (1/8) Epoch 38, batch 600, train_loss[loss=3.067, NarTop10Accuracy=0.6974, over 5779.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6627, over 5673.66 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 15:18:26,466 INFO [trainer.py:765] (1/8) Epoch 38, batch 700, train_loss[loss=3.22, NarTop10Accuracy=0.6703, over 4881.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6596, over 5748.83 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:01,129 INFO [trainer.py:765] (1/8) Epoch 38, batch 800, train_loss[loss=3.378, NarTop10Accuracy=0.6554, over 5143.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6568, over 5797.67 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:36,540 INFO [trainer.py:765] (1/8) Epoch 38, batch 900, train_loss[loss=3.436, NarTop10Accuracy=0.6208, over 6277.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6553, over 5809.31 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 15:20:09,134 INFO [trainer.py:765] (1/8) Epoch 38, batch 1000, train_loss[loss=3.444, NarTop10Accuracy=0.6326, over 6300.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6549, over 5904.44 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 15:20:47,346 INFO [trainer.py:765] (1/8) Epoch 38, batch 1100, train_loss[loss=3.693, NarTop10Accuracy=0.5832, over 6838.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6518, over 5943.90 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 15:21:25,594 INFO [trainer.py:765] (1/8) Epoch 38, batch 1200, train_loss[loss=3.339, NarTop10Accuracy=0.6563, over 7024.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6504, over 5936.99 frames. ], batch size: 30, lr: 2.23e-03 +2024-08-06 15:21:57,556 INFO [trainer.py:765] (1/8) Epoch 38, batch 1300, train_loss[loss=3.294, NarTop10Accuracy=0.6588, over 5052.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6522, over 6009.82 frames. ], batch size: 6, lr: 2.23e-03 +2024-08-06 15:22:29,468 INFO [trainer.py:765] (1/8) Epoch 38, batch 1400, train_loss[loss=3.059, NarTop10Accuracy=0.7113, over 6311.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.65, over 6047.09 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 15:23:06,615 INFO [trainer.py:765] (1/8) Epoch 38, batch 1500, train_loss[loss=3.47, NarTop10Accuracy=0.6242, over 6071.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6487, over 5970.23 frames. ], batch size: 48, lr: 2.23e-03 +2024-08-06 15:23:34,640 INFO [trainer.py:765] (1/8) Epoch 38, batch 1600, train_loss[loss=3.481, NarTop10Accuracy=0.6156, over 7340.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6472, over 5945.12 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:01,433 INFO [trainer.py:765] (1/8) Epoch 38, batch 1700, train_loss[loss=3.284, NarTop10Accuracy=0.6634, over 6137.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6465, over 5923.28 frames. ], batch size: 13, lr: 2.23e-03 +2024-08-06 15:24:28,065 INFO [trainer.py:765] (1/8) Epoch 38, batch 1800, train_loss[loss=3.23, NarTop10Accuracy=0.6628, over 7134.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6462, over 6001.84 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:54,673 INFO [trainer.py:765] (1/8) Epoch 38, batch 1900, train_loss[loss=3.314, NarTop10Accuracy=0.653, over 6676.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6468, over 6054.68 frames. ], batch size: 51, lr: 2.23e-03 +2024-08-06 15:25:20,411 INFO [trainer.py:765] (1/8) Epoch 38, batch 2000, train_loss[loss=3.618, NarTop10Accuracy=0.594, over 6205.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6481, over 6026.47 frames. ], batch size: 49, lr: 2.23e-03 +2024-08-06 15:25:45,857 INFO [trainer.py:765] (1/8) Epoch 38, batch 2100, train_loss[loss=3.445, NarTop10Accuracy=0.6254, over 4884.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6483, over 6015.31 frames. ], batch size: 5, lr: 2.22e-03 +2024-08-06 15:26:11,317 INFO [trainer.py:765] (1/8) Epoch 38, batch 2200, train_loss[loss=3.483, NarTop10Accuracy=0.6215, over 7361.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6501, over 6061.71 frames. ], batch size: 30, lr: 2.22e-03 +2024-08-06 15:26:36,709 INFO [trainer.py:765] (1/8) Epoch 38, batch 2300, train_loss[loss=3.19, NarTop10Accuracy=0.6797, over 5696.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6468, over 6084.42 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 15:27:01,479 INFO [trainer.py:765] (1/8) Epoch 38, batch 2400, train_loss[loss=3.353, NarTop10Accuracy=0.6468, over 5258.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6446, over 5893.56 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 15:27:23,144 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 15:27:33,589 INFO [trainer.py:811] (1/8) Epoch 38, validation: loss=3.213, NarTop10Accuracy=0.6782, over 1907754.00 frames. +2024-08-06 15:27:33,590 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 15:27:34,076 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.098e+02 2.247e+02 2.437e+02 3.550e+02, threshold=4.494e+02, percent-clipped=0.0 +2024-08-06 15:27:35,515 INFO [trainer.py:765] (1/8) Epoch 38, batch 2500, train_loss[loss=3.266, NarTop10Accuracy=0.6674, over 5006.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6512, over 5547.01 frames. ], batch size: 6, lr: 2.22e-03 +2024-08-06 15:27:56,734 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 15:28:51,227 INFO [trainer.py:765] (1/8) Epoch 39, batch 100, train_loss[loss=3.247, NarTop10Accuracy=0.6654, over 7191.00 frames. ], tot_loss[loss=3.254, NarTop10Accuracy=0.6687, over 2355.94 frames. ], batch size: 30, lr: 2.19e-03 +2024-08-06 15:29:28,052 INFO [trainer.py:765] (1/8) Epoch 39, batch 200, train_loss[loss=3.534, NarTop10Accuracy=0.6142, over 6947.00 frames. ], tot_loss[loss=3.266, NarTop10Accuracy=0.6658, over 3853.37 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 15:30:02,018 INFO [trainer.py:765] (1/8) Epoch 39, batch 300, train_loss[loss=3.141, NarTop10Accuracy=0.6926, over 7060.00 frames. ], tot_loss[loss=3.271, NarTop10Accuracy=0.6643, over 4677.27 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 15:30:32,992 INFO [trainer.py:765] (1/8) Epoch 39, batch 400, train_loss[loss=3.128, NarTop10Accuracy=0.7016, over 5044.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6622, over 5125.73 frames. ], batch size: 7, lr: 2.19e-03 +2024-08-06 15:31:03,569 INFO [trainer.py:765] (1/8) Epoch 39, batch 500, train_loss[loss=3.033, NarTop10Accuracy=0.7111, over 6133.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6602, over 5411.98 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 15:31:40,850 INFO [trainer.py:765] (1/8) Epoch 39, batch 600, train_loss[loss=3.508, NarTop10Accuracy=0.6326, over 5781.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6605, over 5687.01 frames. ], batch size: 9, lr: 2.18e-03 +2024-08-06 15:32:14,451 INFO [trainer.py:765] (1/8) Epoch 39, batch 700, train_loss[loss=3.291, NarTop10Accuracy=0.6662, over 5271.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.6587, over 5751.46 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:32:44,165 INFO [trainer.py:765] (1/8) Epoch 39, batch 800, train_loss[loss=3.351, NarTop10Accuracy=0.6441, over 4896.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6573, over 5800.40 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:33:21,117 INFO [trainer.py:765] (1/8) Epoch 39, batch 900, train_loss[loss=3.117, NarTop10Accuracy=0.6908, over 6183.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6574, over 5825.34 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 15:34:02,655 INFO [trainer.py:765] (1/8) Epoch 39, batch 1000, train_loss[loss=3.08, NarTop10Accuracy=0.698, over 6666.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6566, over 5921.79 frames. ], batch size: 14, lr: 2.18e-03 +2024-08-06 15:34:33,094 INFO [trainer.py:765] (1/8) Epoch 39, batch 1100, train_loss[loss=3.006, NarTop10Accuracy=0.7196, over 6944.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6555, over 5956.66 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 15:35:09,244 INFO [trainer.py:765] (1/8) Epoch 39, batch 1200, train_loss[loss=3.258, NarTop10Accuracy=0.6652, over 7362.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6555, over 5964.64 frames. ], batch size: 31, lr: 2.18e-03 +2024-08-06 15:35:46,813 INFO [trainer.py:765] (1/8) Epoch 39, batch 1300, train_loss[loss=3.233, NarTop10Accuracy=0.6533, over 4448.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.655, over 6021.21 frames. ], batch size: 5, lr: 2.18e-03 +2024-08-06 15:36:18,850 INFO [trainer.py:765] (1/8) Epoch 39, batch 1400, train_loss[loss=3.165, NarTop10Accuracy=0.6801, over 6159.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.653, over 6050.37 frames. ], batch size: 11, lr: 2.17e-03 +2024-08-06 15:36:47,214 INFO [trainer.py:765] (1/8) Epoch 39, batch 1500, train_loss[loss=3.406, NarTop10Accuracy=0.635, over 5796.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6501, over 5967.59 frames. ], batch size: 48, lr: 2.17e-03 +2024-08-06 15:37:15,216 INFO [trainer.py:765] (1/8) Epoch 39, batch 1600, train_loss[loss=3.304, NarTop10Accuracy=0.6591, over 7109.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6492, over 5951.14 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:37:41,883 INFO [trainer.py:765] (1/8) Epoch 39, batch 1700, train_loss[loss=3.303, NarTop10Accuracy=0.6517, over 6311.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6472, over 5952.45 frames. ], batch size: 13, lr: 2.17e-03 +2024-08-06 15:38:08,509 INFO [trainer.py:765] (1/8) Epoch 39, batch 1800, train_loss[loss=3.225, NarTop10Accuracy=0.6711, over 7291.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6487, over 6011.11 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:38:35,253 INFO [trainer.py:765] (1/8) Epoch 39, batch 1900, train_loss[loss=3.37, NarTop10Accuracy=0.6473, over 5851.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6468, over 6028.05 frames. ], batch size: 49, lr: 2.17e-03 +2024-08-06 15:38:37,990 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 15:38:48,262 INFO [trainer.py:811] (1/8) Epoch 39, validation: loss=3.177, NarTop10Accuracy=0.6866, over 1907754.00 frames. +2024-08-06 15:38:48,263 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 15:38:48,768 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.106e+02 2.266e+02 2.462e+02 4.274e+02, threshold=4.532e+02, percent-clipped=0.0 +2024-08-06 15:39:11,227 INFO [trainer.py:765] (1/8) Epoch 39, batch 2000, train_loss[loss=3.485, NarTop10Accuracy=0.6175, over 6541.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6502, over 6002.94 frames. ], batch size: 48, lr: 2.17e-03 +2024-08-06 15:39:36,691 INFO [trainer.py:765] (1/8) Epoch 39, batch 2100, train_loss[loss=3.591, NarTop10Accuracy=0.608, over 4837.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.649, over 6008.66 frames. ], batch size: 5, lr: 2.17e-03 +2024-08-06 15:40:02,086 INFO [trainer.py:765] (1/8) Epoch 39, batch 2200, train_loss[loss=3.46, NarTop10Accuracy=0.6383, over 7402.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6514, over 6045.21 frames. ], batch size: 31, lr: 2.17e-03 +2024-08-06 15:40:27,496 INFO [trainer.py:765] (1/8) Epoch 39, batch 2300, train_loss[loss=3.397, NarTop10Accuracy=0.6445, over 5887.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.649, over 6065.47 frames. ], batch size: 9, lr: 2.16e-03 +2024-08-06 15:40:52,331 INFO [trainer.py:765] (1/8) Epoch 39, batch 2400, train_loss[loss=3.361, NarTop10Accuracy=0.6488, over 5752.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6489, over 5886.09 frames. ], batch size: 8, lr: 2.16e-03 +2024-08-06 15:41:15,695 INFO [trainer.py:765] (1/8) Epoch 39, batch 2500, train_loss[loss=3.352, NarTop10Accuracy=0.6386, over 5177.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6541, over 5518.84 frames. ], batch size: 6, lr: 2.16e-03 +2024-08-06 15:41:36,914 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 15:42:35,254 INFO [trainer.py:765] (1/8) Epoch 40, batch 100, train_loss[loss=3.534, NarTop10Accuracy=0.6157, over 7341.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6565, over 2366.31 frames. ], batch size: 31, lr: 2.13e-03 +2024-08-06 15:43:09,645 INFO [trainer.py:765] (1/8) Epoch 40, batch 200, train_loss[loss=3.477, NarTop10Accuracy=0.6175, over 6961.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.6605, over 3864.00 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 15:43:43,738 INFO [trainer.py:765] (1/8) Epoch 40, batch 300, train_loss[loss=3.319, NarTop10Accuracy=0.6386, over 7066.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.661, over 4660.91 frames. ], batch size: 22, lr: 2.13e-03 +2024-08-06 15:44:18,202 INFO [trainer.py:765] (1/8) Epoch 40, batch 400, train_loss[loss=3.267, NarTop10Accuracy=0.6743, over 5133.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.6626, over 5121.87 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 15:44:50,257 INFO [trainer.py:765] (1/8) Epoch 40, batch 500, train_loss[loss=3.14, NarTop10Accuracy=0.7009, over 6055.00 frames. ], tot_loss[loss=3.276, NarTop10Accuracy=0.6641, over 5396.37 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 15:45:25,431 INFO [trainer.py:765] (1/8) Epoch 40, batch 600, train_loss[loss=3.201, NarTop10Accuracy=0.6754, over 5710.00 frames. ], tot_loss[loss=3.288, NarTop10Accuracy=0.6614, over 5674.73 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 15:45:58,647 INFO [trainer.py:765] (1/8) Epoch 40, batch 700, train_loss[loss=3.107, NarTop10Accuracy=0.6822, over 5047.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.6583, over 5739.09 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 15:46:34,887 INFO [trainer.py:765] (1/8) Epoch 40, batch 800, train_loss[loss=3.288, NarTop10Accuracy=0.6612, over 5088.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6572, over 5792.27 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 15:47:07,290 INFO [trainer.py:765] (1/8) Epoch 40, batch 900, train_loss[loss=3.066, NarTop10Accuracy=0.6979, over 6761.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6578, over 5826.22 frames. ], batch size: 14, lr: 2.12e-03 +2024-08-06 15:47:43,510 INFO [trainer.py:765] (1/8) Epoch 40, batch 1000, train_loss[loss=3.46, NarTop10Accuracy=0.6279, over 6236.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6561, over 5923.09 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:48:18,710 INFO [trainer.py:765] (1/8) Epoch 40, batch 1100, train_loss[loss=3.374, NarTop10Accuracy=0.6416, over 6851.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6548, over 5925.14 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 15:48:52,094 INFO [trainer.py:765] (1/8) Epoch 40, batch 1200, train_loss[loss=3.308, NarTop10Accuracy=0.6559, over 7025.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6544, over 5940.96 frames. ], batch size: 30, lr: 2.12e-03 +2024-08-06 15:49:29,782 INFO [trainer.py:765] (1/8) Epoch 40, batch 1300, train_loss[loss=3.566, NarTop10Accuracy=0.6041, over 5061.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6553, over 6007.32 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 15:49:38,246 INFO [trainer.py:803] (1/8) Computing validation loss +2024-08-06 15:49:48,934 INFO [trainer.py:811] (1/8) Epoch 40, validation: loss=3.171, NarTop10Accuracy=0.6871, over 1907754.00 frames. +2024-08-06 15:49:48,935 INFO [trainer.py:814] (1/8) Maximum memory allocated so far is 30619MB +2024-08-06 15:49:49,615 INFO [optim.py:386] (1/8) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.095e+02 2.264e+02 2.441e+02 4.960e+02, threshold=4.528e+02, percent-clipped=0.1 +2024-08-06 15:50:12,460 INFO [trainer.py:765] (1/8) Epoch 40, batch 1400, train_loss[loss=3.18, NarTop10Accuracy=0.6834, over 6247.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.655, over 6024.68 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 15:50:45,930 INFO [trainer.py:765] (1/8) Epoch 40, batch 1500, train_loss[loss=3.479, NarTop10Accuracy=0.6162, over 6594.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6535, over 5968.11 frames. ], batch size: 49, lr: 2.12e-03 +2024-08-06 15:51:13,821 INFO [trainer.py:765] (1/8) Epoch 40, batch 1600, train_loss[loss=3.1, NarTop10Accuracy=0.7007, over 7156.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6554, over 5951.95 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:51:40,571 INFO [trainer.py:765] (1/8) Epoch 40, batch 1700, train_loss[loss=3.281, NarTop10Accuracy=0.6446, over 6217.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6551, over 5930.64 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:52:07,236 INFO [trainer.py:765] (1/8) Epoch 40, batch 1800, train_loss[loss=3.636, NarTop10Accuracy=0.5927, over 6940.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.654, over 5995.04 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:52:33,820 INFO [trainer.py:765] (1/8) Epoch 40, batch 1900, train_loss[loss=3.505, NarTop10Accuracy=0.6156, over 6048.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6513, over 6043.57 frames. ], batch size: 50, lr: 2.11e-03 +2024-08-06 15:52:59,511 INFO [trainer.py:765] (1/8) Epoch 40, batch 2000, train_loss[loss=3.39, NarTop10Accuracy=0.6366, over 6364.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6516, over 6019.08 frames. ], batch size: 49, lr: 2.11e-03 +2024-08-06 15:53:24,913 INFO [trainer.py:765] (1/8) Epoch 40, batch 2100, train_loss[loss=2.98, NarTop10Accuracy=0.7174, over 3996.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6526, over 5991.72 frames. ], batch size: 4, lr: 2.11e-03 +2024-08-06 15:53:50,419 INFO [trainer.py:765] (1/8) Epoch 40, batch 2200, train_loss[loss=3.338, NarTop10Accuracy=0.6509, over 7446.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6525, over 6045.66 frames. ], batch size: 31, lr: 2.11e-03 +2024-08-06 15:54:15,886 INFO [trainer.py:765] (1/8) Epoch 40, batch 2300, train_loss[loss=3.246, NarTop10Accuracy=0.6696, over 5677.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.652, over 6081.15 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 15:54:43,787 INFO [trainer.py:765] (1/8) Epoch 40, batch 2400, train_loss[loss=3.578, NarTop10Accuracy=0.6072, over 5134.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6501, over 5890.04 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 15:55:07,364 INFO [trainer.py:765] (1/8) Epoch 40, batch 2500, train_loss[loss=3.109, NarTop10Accuracy=0.712, over 5095.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.656, over 5549.56 frames. ], batch size: 6, lr: 2.11e-03 +2024-08-06 15:55:28,694 INFO [trainer.py:650] (1/8) Reaches end of dataloader. +2024-08-06 15:55:28,697 INFO [trainer.py:1069] (1/8) Done! diff --git a/libritts/log/log-train-2024-08-06-06-41-41-2 b/libritts/log/log-train-2024-08-06-06-41-41-2 new file mode 100644 index 0000000000000000000000000000000000000000..bb45d901df31370108890029a7cd852e5e038dc0 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-41-41-2 @@ -0,0 +1,1260 @@ +2024-08-06 06:41:41,489 INFO [trainer.py:870] (2/8) Training started +2024-08-06 06:41:41,490 INFO [trainer.py:889] (2/8) Device: cuda:2 +2024-08-06 06:41:41,490 INFO [trainer.py:890] (2/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:41:41,490 INFO [trainer.py:892] (2/8) About to create model +2024-08-06 06:41:42,404 INFO [trainer.py:899] (2/8) Number of model parameters: 367386628 +2024-08-06 06:41:42,405 INFO [checkpoint.py:112] (2/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 06:41:44,843 INFO [trainer.py:914] (2/8) Using DDP +2024-08-06 06:41:46,902 INFO [datamodule.py:427] (2/8) About to get train cuts +2024-08-06 06:41:46,905 INFO [datamodule.py:434] (2/8) About to get dev cuts +2024-08-06 06:41:46,906 INFO [datamodule.py:292] (2/8) Disable SpecAugment +2024-08-06 06:41:46,906 INFO [datamodule.py:294] (2/8) About to create train dataset +2024-08-06 06:41:46,907 INFO [datamodule.py:323] (2/8) Using DynamicBucketingSampler +2024-08-06 06:41:47,535 INFO [datamodule.py:344] (2/8) About to create train dataloader +2024-08-06 06:41:47,536 INFO [datamodule.py:367] (2/8) About to create dev dataset +2024-08-06 06:41:47,876 INFO [datamodule.py:388] (2/8) About to create dev dataloader +2024-08-06 06:42:36,136 INFO [trainer.py:765] (2/8) Epoch 1, batch 100, train_loss[loss=93.8, NarTop10Accuracy=0.01278, over 7111.00 frames. ], tot_loss[loss=81.17, NarTop10Accuracy=0.05125, over 2368.31 frames. ], batch size: 31, lr: 2.25e-02 +2024-08-06 06:43:05,819 INFO [trainer.py:765] (2/8) Epoch 1, batch 200, train_loss[loss=123.8, NarTop10Accuracy=0.02155, over 6988.00 frames. ], tot_loss[loss=99.49, NarTop10Accuracy=0.04404, over 3873.45 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 06:43:33,849 INFO [trainer.py:765] (2/8) Epoch 1, batch 300, train_loss[loss=73.31, NarTop10Accuracy=0.02368, over 7186.00 frames. ], tot_loss[loss=87.22, NarTop10Accuracy=0.04565, over 4675.63 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 06:44:05,251 INFO [trainer.py:765] (2/8) Epoch 1, batch 400, train_loss[loss=32.97, NarTop10Accuracy=0.05036, over 5169.00 frames. ], tot_loss[loss=67.97, NarTop10Accuracy=0.05071, over 5138.80 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 06:44:33,445 INFO [trainer.py:765] (2/8) Epoch 1, batch 500, train_loss[loss=16.85, NarTop10Accuracy=0.01895, over 6222.00 frames. ], tot_loss[loss=48.73, NarTop10Accuracy=0.05582, over 5419.18 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 06:45:02,925 INFO [trainer.py:765] (2/8) Epoch 1, batch 600, train_loss[loss=6.107, NarTop10Accuracy=0.1522, over 5816.00 frames. ], tot_loss[loss=33.35, NarTop10Accuracy=0.06247, over 5686.70 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 06:45:40,482 INFO [trainer.py:765] (2/8) Epoch 1, batch 700, train_loss[loss=6.943, NarTop10Accuracy=0.09554, over 5024.00 frames. ], tot_loss[loss=23.56, NarTop10Accuracy=0.07053, over 5751.07 frames. ], batch size: 6, lr: 2.99e-02 +2024-08-06 06:46:09,663 INFO [trainer.py:765] (2/8) Epoch 1, batch 800, train_loss[loss=6.693, NarTop10Accuracy=0.09752, over 5093.00 frames. ], tot_loss[loss=17.53, NarTop10Accuracy=0.08053, over 5815.40 frames. ], batch size: 6, lr: 2.98e-02 +2024-08-06 06:46:37,733 INFO [trainer.py:765] (2/8) Epoch 1, batch 900, train_loss[loss=6.063, NarTop10Accuracy=0.1511, over 6637.00 frames. ], tot_loss[loss=13.06, NarTop10Accuracy=0.1116, over 5810.91 frames. ], batch size: 14, lr: 2.98e-02 +2024-08-06 06:47:13,908 INFO [trainer.py:765] (2/8) Epoch 1, batch 1000, train_loss[loss=5.635, NarTop10Accuracy=0.2387, over 6348.00 frames. ], tot_loss[loss=10.18, NarTop10Accuracy=0.1379, over 5911.85 frames. ], batch size: 13, lr: 2.97e-02 +2024-08-06 06:47:47,142 INFO [trainer.py:765] (2/8) Epoch 1, batch 1100, train_loss[loss=5.48, NarTop10Accuracy=0.2127, over 6866.00 frames. ], tot_loss[loss=8.431, NarTop10Accuracy=0.1562, over 5943.12 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 06:48:15,710 INFO [trainer.py:765] (2/8) Epoch 1, batch 1200, train_loss[loss=6.3, NarTop10Accuracy=0.1144, over 7208.00 frames. ], tot_loss[loss=7.312, NarTop10Accuracy=0.1741, over 5947.46 frames. ], batch size: 31, lr: 2.96e-02 +2024-08-06 06:48:47,236 INFO [trainer.py:765] (2/8) Epoch 1, batch 1300, train_loss[loss=5.494, NarTop10Accuracy=0.2071, over 5078.00 frames. ], tot_loss[loss=6.622, NarTop10Accuracy=0.1839, over 6022.43 frames. ], batch size: 6, lr: 2.95e-02 +2024-08-06 06:49:23,568 INFO [trainer.py:765] (2/8) Epoch 1, batch 1400, train_loss[loss=5.467, NarTop10Accuracy=0.2033, over 6334.00 frames. ], tot_loss[loss=6.19, NarTop10Accuracy=0.1926, over 6037.55 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 06:49:51,507 INFO [trainer.py:765] (2/8) Epoch 1, batch 1500, train_loss[loss=5.602, NarTop10Accuracy=0.1932, over 6465.00 frames. ], tot_loss[loss=5.923, NarTop10Accuracy=0.1991, over 5970.45 frames. ], batch size: 50, lr: 2.94e-02 +2024-08-06 06:50:19,163 INFO [trainer.py:765] (2/8) Epoch 1, batch 1600, train_loss[loss=5.467, NarTop10Accuracy=0.2056, over 7201.00 frames. ], tot_loss[loss=5.75, NarTop10Accuracy=0.2047, over 5939.34 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 06:50:45,597 INFO [trainer.py:765] (2/8) Epoch 1, batch 1700, train_loss[loss=5.388, NarTop10Accuracy=0.2157, over 6141.00 frames. ], tot_loss[loss=5.627, NarTop10Accuracy=0.2113, over 5929.89 frames. ], batch size: 13, lr: 2.92e-02 +2024-08-06 06:51:11,955 INFO [trainer.py:765] (2/8) Epoch 1, batch 1800, train_loss[loss=5.613, NarTop10Accuracy=0.1934, over 7201.00 frames. ], tot_loss[loss=5.539, NarTop10Accuracy=0.2178, over 6001.98 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 06:51:38,223 INFO [trainer.py:765] (2/8) Epoch 1, batch 1900, train_loss[loss=5.44, NarTop10Accuracy=0.2143, over 6104.00 frames. ], tot_loss[loss=5.476, NarTop10Accuracy=0.2243, over 6031.41 frames. ], batch size: 50, lr: 2.90e-02 +2024-08-06 06:52:03,653 INFO [trainer.py:765] (2/8) Epoch 1, batch 2000, train_loss[loss=5.454, NarTop10Accuracy=0.2237, over 5840.00 frames. ], tot_loss[loss=5.427, NarTop10Accuracy=0.2304, over 6006.83 frames. ], batch size: 49, lr: 2.89e-02 +2024-08-06 06:52:03,654 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 06:52:13,994 INFO [trainer.py:811] (2/8) Epoch 1, validation: loss=5.351, NarTop10Accuracy=0.2423, over 1907754.00 frames. +2024-08-06 06:52:13,994 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 26927MB +2024-08-06 06:52:14,534 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 4.341e+01 2.262e+02 7.241e+02 2.074e+04 7.259e+05, threshold=1.448e+03, percent-clipped=0.0 +2024-08-06 06:52:39,586 INFO [trainer.py:765] (2/8) Epoch 1, batch 2100, train_loss[loss=5.38, NarTop10Accuracy=0.2261, over 3855.00 frames. ], tot_loss[loss=5.375, NarTop10Accuracy=0.2401, over 5992.40 frames. ], batch size: 4, lr: 2.88e-02 +2024-08-06 06:53:05,355 INFO [trainer.py:765] (2/8) Epoch 1, batch 2200, train_loss[loss=5.167, NarTop10Accuracy=0.2668, over 7331.00 frames. ], tot_loss[loss=5.349, NarTop10Accuracy=0.2432, over 6021.23 frames. ], batch size: 31, lr: 2.87e-02 +2024-08-06 06:53:30,701 INFO [trainer.py:765] (2/8) Epoch 1, batch 2300, train_loss[loss=5.259, NarTop10Accuracy=0.2645, over 5772.00 frames. ], tot_loss[loss=5.341, NarTop10Accuracy=0.2448, over 6064.98 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 06:53:55,359 INFO [trainer.py:765] (2/8) Epoch 1, batch 2400, train_loss[loss=5.207, NarTop10Accuracy=0.2651, over 5087.00 frames. ], tot_loss[loss=5.313, NarTop10Accuracy=0.2502, over 5872.06 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 06:54:18,659 INFO [trainer.py:765] (2/8) Epoch 1, batch 2500, train_loss[loss=5.081, NarTop10Accuracy=0.2891, over 5167.00 frames. ], tot_loss[loss=5.251, NarTop10Accuracy=0.2616, over 5543.60 frames. ], batch size: 6, lr: 2.84e-02 +2024-08-06 06:54:40,039 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 06:55:37,935 INFO [trainer.py:765] (2/8) Epoch 2, batch 100, train_loss[loss=5.181, NarTop10Accuracy=0.2738, over 6974.00 frames. ], tot_loss[loss=5.168, NarTop10Accuracy=0.2824, over 2362.85 frames. ], batch size: 30, lr: 2.77e-02 +2024-08-06 06:56:16,404 INFO [trainer.py:765] (2/8) Epoch 2, batch 200, train_loss[loss=4.833, NarTop10Accuracy=0.3429, over 6867.00 frames. ], tot_loss[loss=5.148, NarTop10Accuracy=0.2846, over 3873.33 frames. ], batch size: 17, lr: 2.76e-02 +2024-08-06 06:56:44,972 INFO [trainer.py:765] (2/8) Epoch 2, batch 300, train_loss[loss=5.218, NarTop10Accuracy=0.2758, over 7230.00 frames. ], tot_loss[loss=5.146, NarTop10Accuracy=0.2849, over 4671.56 frames. ], batch size: 22, lr: 2.75e-02 +2024-08-06 06:57:13,938 INFO [trainer.py:765] (2/8) Epoch 2, batch 400, train_loss[loss=5.4, NarTop10Accuracy=0.2188, over 5221.00 frames. ], tot_loss[loss=5.137, NarTop10Accuracy=0.287, over 5124.50 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 06:57:56,208 INFO [trainer.py:765] (2/8) Epoch 2, batch 500, train_loss[loss=4.822, NarTop10Accuracy=0.3496, over 6128.00 frames. ], tot_loss[loss=5.103, NarTop10Accuracy=0.2932, over 5408.62 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 06:58:25,424 INFO [trainer.py:765] (2/8) Epoch 2, batch 600, train_loss[loss=4.869, NarTop10Accuracy=0.3384, over 5883.00 frames. ], tot_loss[loss=5.088, NarTop10Accuracy=0.2967, over 5688.72 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 06:58:55,281 INFO [trainer.py:765] (2/8) Epoch 2, batch 700, train_loss[loss=4.853, NarTop10Accuracy=0.3478, over 5088.00 frames. ], tot_loss[loss=5.086, NarTop10Accuracy=0.2977, over 5755.10 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 06:59:31,888 INFO [trainer.py:765] (2/8) Epoch 2, batch 800, train_loss[loss=4.774, NarTop10Accuracy=0.3596, over 5062.00 frames. ], tot_loss[loss=5.085, NarTop10Accuracy=0.2973, over 5786.74 frames. ], batch size: 6, lr: 2.69e-02 +2024-08-06 07:00:03,182 INFO [trainer.py:765] (2/8) Epoch 2, batch 900, train_loss[loss=5.5, NarTop10Accuracy=0.2165, over 6313.00 frames. ], tot_loss[loss=5.05, NarTop10Accuracy=0.3038, over 5817.78 frames. ], batch size: 13, lr: 2.68e-02 +2024-08-06 07:00:33,141 INFO [trainer.py:765] (2/8) Epoch 2, batch 1000, train_loss[loss=4.648, NarTop10Accuracy=0.382, over 6170.00 frames. ], tot_loss[loss=5.011, NarTop10Accuracy=0.3112, over 5935.57 frames. ], batch size: 13, lr: 2.66e-02 +2024-08-06 07:01:05,572 INFO [trainer.py:765] (2/8) Epoch 2, batch 1100, train_loss[loss=4.872, NarTop10Accuracy=0.3408, over 6901.00 frames. ], tot_loss[loss=5.013, NarTop10Accuracy=0.311, over 5957.47 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 07:01:46,284 INFO [trainer.py:765] (2/8) Epoch 2, batch 1200, train_loss[loss=4.746, NarTop10Accuracy=0.3684, over 7021.00 frames. ], tot_loss[loss=5.008, NarTop10Accuracy=0.312, over 5963.35 frames. ], batch size: 30, lr: 2.64e-02 +2024-08-06 07:02:15,643 INFO [trainer.py:765] (2/8) Epoch 2, batch 1300, train_loss[loss=5.208, NarTop10Accuracy=0.262, over 4965.00 frames. ], tot_loss[loss=4.969, NarTop10Accuracy=0.32, over 6018.64 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 07:02:45,251 INFO [trainer.py:765] (2/8) Epoch 2, batch 1400, train_loss[loss=5.013, NarTop10Accuracy=0.3284, over 6051.00 frames. ], tot_loss[loss=4.955, NarTop10Accuracy=0.3224, over 6036.37 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 07:02:50,266 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 07:03:02,094 INFO [trainer.py:811] (2/8) Epoch 2, validation: loss=4.943, NarTop10Accuracy=0.3266, over 1907754.00 frames. +2024-08-06 07:03:02,095 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 26927MB +2024-08-06 07:03:02,638 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 5.429e+01 1.166e+02 1.425e+02 1.750e+02 6.435e+02, threshold=2.851e+02, percent-clipped=0.0 +2024-08-06 07:03:25,472 INFO [trainer.py:765] (2/8) Epoch 2, batch 1500, train_loss[loss=5.131, NarTop10Accuracy=0.2959, over 6179.00 frames. ], tot_loss[loss=4.936, NarTop10Accuracy=0.326, over 5988.52 frames. ], batch size: 49, lr: 2.60e-02 +2024-08-06 07:03:53,553 INFO [trainer.py:765] (2/8) Epoch 2, batch 1600, train_loss[loss=4.692, NarTop10Accuracy=0.3736, over 7161.00 frames. ], tot_loss[loss=4.914, NarTop10Accuracy=0.3305, over 5975.09 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 07:04:20,313 INFO [trainer.py:765] (2/8) Epoch 2, batch 1700, train_loss[loss=4.648, NarTop10Accuracy=0.3705, over 6365.00 frames. ], tot_loss[loss=4.906, NarTop10Accuracy=0.3324, over 5960.49 frames. ], batch size: 13, lr: 2.58e-02 +2024-08-06 07:04:46,887 INFO [trainer.py:765] (2/8) Epoch 2, batch 1800, train_loss[loss=4.589, NarTop10Accuracy=0.3931, over 7119.00 frames. ], tot_loss[loss=4.891, NarTop10Accuracy=0.3349, over 6017.87 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 07:05:13,586 INFO [trainer.py:765] (2/8) Epoch 2, batch 1900, train_loss[loss=5.027, NarTop10Accuracy=0.312, over 5800.00 frames. ], tot_loss[loss=4.869, NarTop10Accuracy=0.3397, over 6047.74 frames. ], batch size: 48, lr: 2.55e-02 +2024-08-06 07:05:39,285 INFO [trainer.py:765] (2/8) Epoch 2, batch 2000, train_loss[loss=4.76, NarTop10Accuracy=0.3625, over 6107.00 frames. ], tot_loss[loss=4.846, NarTop10Accuracy=0.344, over 6031.76 frames. ], batch size: 49, lr: 2.54e-02 +2024-08-06 07:06:04,829 INFO [trainer.py:765] (2/8) Epoch 2, batch 2100, train_loss[loss=4.38, NarTop10Accuracy=0.4474, over 3884.00 frames. ], tot_loss[loss=4.845, NarTop10Accuracy=0.3443, over 6002.21 frames. ], batch size: 4, lr: 2.52e-02 +2024-08-06 07:06:30,373 INFO [trainer.py:765] (2/8) Epoch 2, batch 2200, train_loss[loss=4.637, NarTop10Accuracy=0.3881, over 7355.00 frames. ], tot_loss[loss=4.813, NarTop10Accuracy=0.3507, over 6052.97 frames. ], batch size: 31, lr: 2.51e-02 +2024-08-06 07:06:55,874 INFO [trainer.py:765] (2/8) Epoch 2, batch 2300, train_loss[loss=4.54, NarTop10Accuracy=0.4022, over 5794.00 frames. ], tot_loss[loss=4.812, NarTop10Accuracy=0.3516, over 6086.69 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 07:07:20,576 INFO [trainer.py:765] (2/8) Epoch 2, batch 2400, train_loss[loss=4.862, NarTop10Accuracy=0.3302, over 5199.00 frames. ], tot_loss[loss=4.783, NarTop10Accuracy=0.3569, over 5881.31 frames. ], batch size: 7, lr: 2.49e-02 +2024-08-06 07:07:47,111 INFO [trainer.py:765] (2/8) Epoch 2, batch 2500, train_loss[loss=4.639, NarTop10Accuracy=0.3826, over 4892.00 frames. ], tot_loss[loss=4.751, NarTop10Accuracy=0.3634, over 5542.21 frames. ], batch size: 6, lr: 2.47e-02 +2024-08-06 07:08:08,143 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 07:09:08,537 INFO [trainer.py:765] (2/8) Epoch 3, batch 100, train_loss[loss=4.809, NarTop10Accuracy=0.3539, over 7391.00 frames. ], tot_loss[loss=4.644, NarTop10Accuracy=0.3871, over 2376.68 frames. ], batch size: 31, lr: 2.35e-02 +2024-08-06 07:09:41,499 INFO [trainer.py:765] (2/8) Epoch 3, batch 200, train_loss[loss=4.226, NarTop10Accuracy=0.4584, over 6740.00 frames. ], tot_loss[loss=4.604, NarTop10Accuracy=0.3939, over 3857.61 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 07:10:16,975 INFO [trainer.py:765] (2/8) Epoch 3, batch 300, train_loss[loss=4.412, NarTop10Accuracy=0.4324, over 7163.00 frames. ], tot_loss[loss=4.594, NarTop10Accuracy=0.3956, over 4648.12 frames. ], batch size: 23, lr: 2.33e-02 +2024-08-06 07:10:49,792 INFO [trainer.py:765] (2/8) Epoch 3, batch 400, train_loss[loss=4.441, NarTop10Accuracy=0.4277, over 5165.00 frames. ], tot_loss[loss=4.572, NarTop10Accuracy=0.3998, over 5106.83 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 07:11:18,179 INFO [trainer.py:765] (2/8) Epoch 3, batch 500, train_loss[loss=4.841, NarTop10Accuracy=0.3616, over 6179.00 frames. ], tot_loss[loss=4.572, NarTop10Accuracy=0.3996, over 5384.12 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 07:11:51,262 INFO [trainer.py:765] (2/8) Epoch 3, batch 600, train_loss[loss=4.597, NarTop10Accuracy=0.3973, over 5761.00 frames. ], tot_loss[loss=4.556, NarTop10Accuracy=0.4027, over 5667.78 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 07:12:32,101 INFO [trainer.py:765] (2/8) Epoch 3, batch 700, train_loss[loss=4.417, NarTop10Accuracy=0.4187, over 5167.00 frames. ], tot_loss[loss=4.545, NarTop10Accuracy=0.4044, over 5733.19 frames. ], batch size: 6, lr: 2.29e-02 +2024-08-06 07:13:01,919 INFO [trainer.py:765] (2/8) Epoch 3, batch 800, train_loss[loss=4.764, NarTop10Accuracy=0.3618, over 5070.00 frames. ], tot_loss[loss=4.534, NarTop10Accuracy=0.4063, over 5779.17 frames. ], batch size: 6, lr: 2.27e-02 +2024-08-06 07:13:12,668 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 07:13:22,883 INFO [trainer.py:811] (2/8) Epoch 3, validation: loss=4.43, NarTop10Accuracy=0.4285, over 1907754.00 frames. +2024-08-06 07:13:22,884 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 26927MB +2024-08-06 07:13:23,429 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 6.823e+01 1.318e+02 1.583e+02 1.978e+02 8.364e+02, threshold=3.166e+02, percent-clipped=5.2 +2024-08-06 07:13:42,435 INFO [trainer.py:765] (2/8) Epoch 3, batch 900, train_loss[loss=4.171, NarTop10Accuracy=0.46, over 6256.00 frames. ], tot_loss[loss=4.515, NarTop10Accuracy=0.4104, over 5801.44 frames. ], batch size: 13, lr: 2.26e-02 +2024-08-06 07:14:25,627 INFO [trainer.py:765] (2/8) Epoch 3, batch 1000, train_loss[loss=4.331, NarTop10Accuracy=0.4407, over 6737.00 frames. ], tot_loss[loss=4.494, NarTop10Accuracy=0.4143, over 5906.55 frames. ], batch size: 14, lr: 2.25e-02 +2024-08-06 07:14:56,325 INFO [trainer.py:765] (2/8) Epoch 3, batch 1100, train_loss[loss=4.519, NarTop10Accuracy=0.4019, over 6930.00 frames. ], tot_loss[loss=4.489, NarTop10Accuracy=0.4153, over 5935.50 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 07:15:29,867 INFO [trainer.py:765] (2/8) Epoch 3, batch 1200, train_loss[loss=4.388, NarTop10Accuracy=0.4345, over 7452.00 frames. ], tot_loss[loss=4.476, NarTop10Accuracy=0.4174, over 5943.85 frames. ], batch size: 30, lr: 2.23e-02 +2024-08-06 07:16:12,665 INFO [trainer.py:765] (2/8) Epoch 3, batch 1300, train_loss[loss=4.475, NarTop10Accuracy=0.4273, over 5169.00 frames. ], tot_loss[loss=4.46, NarTop10Accuracy=0.42, over 6022.88 frames. ], batch size: 6, lr: 2.22e-02 +2024-08-06 07:16:42,204 INFO [trainer.py:765] (2/8) Epoch 3, batch 1400, train_loss[loss=4.326, NarTop10Accuracy=0.4402, over 6335.00 frames. ], tot_loss[loss=4.446, NarTop10Accuracy=0.4228, over 6035.29 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 07:17:10,664 INFO [trainer.py:765] (2/8) Epoch 3, batch 1500, train_loss[loss=4.685, NarTop10Accuracy=0.3741, over 6157.00 frames. ], tot_loss[loss=4.445, NarTop10Accuracy=0.4232, over 5972.55 frames. ], batch size: 48, lr: 2.20e-02 +2024-08-06 07:17:38,769 INFO [trainer.py:765] (2/8) Epoch 3, batch 1600, train_loss[loss=4.317, NarTop10Accuracy=0.455, over 7095.00 frames. ], tot_loss[loss=4.422, NarTop10Accuracy=0.4274, over 5946.51 frames. ], batch size: 22, lr: 2.19e-02 +2024-08-06 07:18:05,505 INFO [trainer.py:765] (2/8) Epoch 3, batch 1700, train_loss[loss=4.268, NarTop10Accuracy=0.4598, over 6220.00 frames. ], tot_loss[loss=4.399, NarTop10Accuracy=0.4324, over 5941.05 frames. ], batch size: 13, lr: 2.18e-02 +2024-08-06 07:18:32,161 INFO [trainer.py:765] (2/8) Epoch 3, batch 1800, train_loss[loss=4.21, NarTop10Accuracy=0.465, over 7113.00 frames. ], tot_loss[loss=4.383, NarTop10Accuracy=0.435, over 6015.55 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 07:19:01,958 INFO [trainer.py:765] (2/8) Epoch 3, batch 1900, train_loss[loss=4.617, NarTop10Accuracy=0.393, over 6588.00 frames. ], tot_loss[loss=4.375, NarTop10Accuracy=0.4366, over 6040.71 frames. ], batch size: 49, lr: 2.16e-02 +2024-08-06 07:19:27,622 INFO [trainer.py:765] (2/8) Epoch 3, batch 2000, train_loss[loss=4.506, NarTop10Accuracy=0.4041, over 6235.00 frames. ], tot_loss[loss=4.362, NarTop10Accuracy=0.4389, over 6011.66 frames. ], batch size: 49, lr: 2.15e-02 +2024-08-06 07:19:53,070 INFO [trainer.py:765] (2/8) Epoch 3, batch 2100, train_loss[loss=3.447, NarTop10Accuracy=0.5997, over 3875.00 frames. ], tot_loss[loss=4.328, NarTop10Accuracy=0.4457, over 5993.88 frames. ], batch size: 4, lr: 2.14e-02 +2024-08-06 07:20:18,554 INFO [trainer.py:765] (2/8) Epoch 3, batch 2200, train_loss[loss=4.477, NarTop10Accuracy=0.4101, over 7070.00 frames. ], tot_loss[loss=4.309, NarTop10Accuracy=0.4499, over 6029.42 frames. ], batch size: 30, lr: 2.13e-02 +2024-08-06 07:20:44,051 INFO [trainer.py:765] (2/8) Epoch 3, batch 2300, train_loss[loss=3.946, NarTop10Accuracy=0.5263, over 5681.00 frames. ], tot_loss[loss=4.318, NarTop10Accuracy=0.4484, over 6052.82 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 07:21:08,678 INFO [trainer.py:765] (2/8) Epoch 3, batch 2400, train_loss[loss=4.041, NarTop10Accuracy=0.5033, over 5219.00 frames. ], tot_loss[loss=4.306, NarTop10Accuracy=0.4509, over 5868.98 frames. ], batch size: 7, lr: 2.11e-02 +2024-08-06 07:21:32,172 INFO [trainer.py:765] (2/8) Epoch 3, batch 2500, train_loss[loss=4.144, NarTop10Accuracy=0.4769, over 4271.00 frames. ], tot_loss[loss=4.262, NarTop10Accuracy=0.459, over 5539.85 frames. ], batch size: 5, lr: 2.10e-02 +2024-08-06 07:21:53,365 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 07:23:00,977 INFO [trainer.py:765] (2/8) Epoch 4, batch 100, train_loss[loss=4.203, NarTop10Accuracy=0.4746, over 7687.00 frames. ], tot_loss[loss=4.2, NarTop10Accuracy=0.4729, over 2368.56 frames. ], batch size: 31, lr: 1.97e-02 +2024-08-06 07:23:33,303 INFO [trainer.py:765] (2/8) Epoch 4, batch 200, train_loss[loss=4.319, NarTop10Accuracy=0.4483, over 7013.00 frames. ], tot_loss[loss=4.18, NarTop10Accuracy=0.4759, over 3866.82 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 07:23:51,466 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 07:24:01,517 INFO [trainer.py:811] (2/8) Epoch 4, validation: loss=4.035, NarTop10Accuracy=0.5085, over 1907754.00 frames. +2024-08-06 07:24:01,517 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 26927MB +2024-08-06 07:24:02,097 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 9.910e+01 1.530e+02 1.750e+02 2.064e+02 5.317e+02, threshold=3.500e+02, percent-clipped=3.3 +2024-08-06 07:24:14,362 INFO [trainer.py:765] (2/8) Epoch 4, batch 300, train_loss[loss=3.993, NarTop10Accuracy=0.5207, over 7297.00 frames. ], tot_loss[loss=4.169, NarTop10Accuracy=0.4788, over 4664.31 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 07:24:53,596 INFO [trainer.py:765] (2/8) Epoch 4, batch 400, train_loss[loss=3.72, NarTop10Accuracy=0.5614, over 5214.00 frames. ], tot_loss[loss=4.181, NarTop10Accuracy=0.4764, over 5142.10 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 07:25:25,295 INFO [trainer.py:765] (2/8) Epoch 4, batch 500, train_loss[loss=4.242, NarTop10Accuracy=0.4637, over 6168.00 frames. ], tot_loss[loss=4.155, NarTop10Accuracy=0.4813, over 5430.89 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 07:25:56,976 INFO [trainer.py:765] (2/8) Epoch 4, batch 600, train_loss[loss=4.262, NarTop10Accuracy=0.4639, over 5810.00 frames. ], tot_loss[loss=4.147, NarTop10Accuracy=0.483, over 5702.02 frames. ], batch size: 9, lr: 1.92e-02 +2024-08-06 07:26:37,607 INFO [trainer.py:765] (2/8) Epoch 4, batch 700, train_loss[loss=4.396, NarTop10Accuracy=0.4366, over 5099.00 frames. ], tot_loss[loss=4.152, NarTop10Accuracy=0.4823, over 5767.46 frames. ], batch size: 6, lr: 1.92e-02 +2024-08-06 07:27:07,433 INFO [trainer.py:765] (2/8) Epoch 4, batch 800, train_loss[loss=4.057, NarTop10Accuracy=0.5047, over 5279.00 frames. ], tot_loss[loss=4.142, NarTop10Accuracy=0.4842, over 5819.83 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 07:27:42,043 INFO [trainer.py:765] (2/8) Epoch 4, batch 900, train_loss[loss=4.061, NarTop10Accuracy=0.4987, over 6649.00 frames. ], tot_loss[loss=4.12, NarTop10Accuracy=0.4887, over 5819.96 frames. ], batch size: 14, lr: 1.90e-02 +2024-08-06 07:28:20,670 INFO [trainer.py:765] (2/8) Epoch 4, batch 1000, train_loss[loss=3.898, NarTop10Accuracy=0.5251, over 6710.00 frames. ], tot_loss[loss=4.115, NarTop10Accuracy=0.4898, over 5924.84 frames. ], batch size: 14, lr: 1.89e-02 +2024-08-06 07:28:54,071 INFO [trainer.py:765] (2/8) Epoch 4, batch 1100, train_loss[loss=4.024, NarTop10Accuracy=0.5103, over 6935.00 frames. ], tot_loss[loss=4.114, NarTop10Accuracy=0.49, over 5969.16 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 07:29:29,599 INFO [trainer.py:765] (2/8) Epoch 4, batch 1200, train_loss[loss=4.395, NarTop10Accuracy=0.4413, over 7413.00 frames. ], tot_loss[loss=4.106, NarTop10Accuracy=0.4911, over 5964.15 frames. ], batch size: 31, lr: 1.87e-02 +2024-08-06 07:30:04,991 INFO [trainer.py:765] (2/8) Epoch 4, batch 1300, train_loss[loss=3.823, NarTop10Accuracy=0.5433, over 5116.00 frames. ], tot_loss[loss=4.079, NarTop10Accuracy=0.4966, over 6018.68 frames. ], batch size: 6, lr: 1.87e-02 +2024-08-06 07:30:43,380 INFO [trainer.py:765] (2/8) Epoch 4, batch 1400, train_loss[loss=3.942, NarTop10Accuracy=0.5251, over 6219.00 frames. ], tot_loss[loss=4.077, NarTop10Accuracy=0.4969, over 6030.61 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 07:31:11,832 INFO [trainer.py:765] (2/8) Epoch 4, batch 1500, train_loss[loss=3.999, NarTop10Accuracy=0.5182, over 5648.00 frames. ], tot_loss[loss=4.071, NarTop10Accuracy=0.4984, over 5969.03 frames. ], batch size: 49, lr: 1.85e-02 +2024-08-06 07:31:39,960 INFO [trainer.py:765] (2/8) Epoch 4, batch 1600, train_loss[loss=4.186, NarTop10Accuracy=0.4764, over 7188.00 frames. ], tot_loss[loss=4.069, NarTop10Accuracy=0.4995, over 5965.09 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 07:32:06,854 INFO [trainer.py:765] (2/8) Epoch 4, batch 1700, train_loss[loss=4.177, NarTop10Accuracy=0.4776, over 6366.00 frames. ], tot_loss[loss=4.046, NarTop10Accuracy=0.5036, over 5934.07 frames. ], batch size: 13, lr: 1.84e-02 +2024-08-06 07:32:33,483 INFO [trainer.py:765] (2/8) Epoch 4, batch 1800, train_loss[loss=4.468, NarTop10Accuracy=0.4159, over 7109.00 frames. ], tot_loss[loss=4.04, NarTop10Accuracy=0.5049, over 5998.67 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 07:33:00,194 INFO [trainer.py:765] (2/8) Epoch 4, batch 1900, train_loss[loss=4.188, NarTop10Accuracy=0.4737, over 5626.00 frames. ], tot_loss[loss=4.062, NarTop10Accuracy=0.5008, over 6020.01 frames. ], batch size: 50, lr: 1.82e-02 +2024-08-06 07:33:25,990 INFO [trainer.py:765] (2/8) Epoch 4, batch 2000, train_loss[loss=4.351, NarTop10Accuracy=0.444, over 6376.00 frames. ], tot_loss[loss=4.044, NarTop10Accuracy=0.5044, over 5993.68 frames. ], batch size: 49, lr: 1.81e-02 +2024-08-06 07:33:51,512 INFO [trainer.py:765] (2/8) Epoch 4, batch 2100, train_loss[loss=3.818, NarTop10Accuracy=0.5528, over 4752.00 frames. ], tot_loss[loss=4.036, NarTop10Accuracy=0.5062, over 5987.65 frames. ], batch size: 5, lr: 1.81e-02 +2024-08-06 07:34:16,906 INFO [trainer.py:765] (2/8) Epoch 4, batch 2200, train_loss[loss=4.109, NarTop10Accuracy=0.4874, over 7153.00 frames. ], tot_loss[loss=4.041, NarTop10Accuracy=0.5051, over 6042.80 frames. ], batch size: 30, lr: 1.80e-02 +2024-08-06 07:34:31,432 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 07:34:41,462 INFO [trainer.py:811] (2/8) Epoch 4, validation: loss=3.858, NarTop10Accuracy=0.5445, over 1907754.00 frames. +2024-08-06 07:34:41,463 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27054MB +2024-08-06 07:34:41,980 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.721e+02 1.919e+02 2.225e+02 9.682e+02, threshold=3.839e+02, percent-clipped=2.3 +2024-08-06 07:34:52,442 INFO [trainer.py:765] (2/8) Epoch 4, batch 2300, train_loss[loss=4.046, NarTop10Accuracy=0.5035, over 5787.00 frames. ], tot_loss[loss=4.034, NarTop10Accuracy=0.5067, over 6059.96 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 07:35:17,166 INFO [trainer.py:765] (2/8) Epoch 4, batch 2400, train_loss[loss=3.879, NarTop10Accuracy=0.5427, over 5074.00 frames. ], tot_loss[loss=4.023, NarTop10Accuracy=0.5091, over 5876.06 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 07:35:40,623 INFO [trainer.py:765] (2/8) Epoch 4, batch 2500, train_loss[loss=4.172, NarTop10Accuracy=0.4706, over 5071.00 frames. ], tot_loss[loss=4.011, NarTop10Accuracy=0.5111, over 5527.68 frames. ], batch size: 6, lr: 1.78e-02 +2024-08-06 07:36:01,671 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 07:37:02,524 INFO [trainer.py:765] (2/8) Epoch 5, batch 100, train_loss[loss=4.055, NarTop10Accuracy=0.5001, over 7254.00 frames. ], tot_loss[loss=3.964, NarTop10Accuracy=0.5216, over 2368.38 frames. ], batch size: 31, lr: 1.66e-02 +2024-08-06 07:37:39,814 INFO [trainer.py:765] (2/8) Epoch 5, batch 200, train_loss[loss=4.199, NarTop10Accuracy=0.4763, over 6886.00 frames. ], tot_loss[loss=3.94, NarTop10Accuracy=0.527, over 3865.96 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 07:38:13,471 INFO [trainer.py:765] (2/8) Epoch 5, batch 300, train_loss[loss=4.01, NarTop10Accuracy=0.5075, over 7247.00 frames. ], tot_loss[loss=3.917, NarTop10Accuracy=0.5316, over 4662.39 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 07:38:42,429 INFO [trainer.py:765] (2/8) Epoch 5, batch 400, train_loss[loss=3.928, NarTop10Accuracy=0.5303, over 5005.00 frames. ], tot_loss[loss=3.922, NarTop10Accuracy=0.5304, over 5132.24 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 07:39:17,020 INFO [trainer.py:765] (2/8) Epoch 5, batch 500, train_loss[loss=3.885, NarTop10Accuracy=0.5349, over 6200.00 frames. ], tot_loss[loss=3.928, NarTop10Accuracy=0.5291, over 5421.38 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 07:39:51,944 INFO [trainer.py:765] (2/8) Epoch 5, batch 600, train_loss[loss=4.068, NarTop10Accuracy=0.5059, over 5703.00 frames. ], tot_loss[loss=3.919, NarTop10Accuracy=0.5308, over 5687.95 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 07:40:28,626 INFO [trainer.py:765] (2/8) Epoch 5, batch 700, train_loss[loss=3.717, NarTop10Accuracy=0.5761, over 5148.00 frames. ], tot_loss[loss=3.919, NarTop10Accuracy=0.5306, over 5756.47 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:02,366 INFO [trainer.py:765] (2/8) Epoch 5, batch 800, train_loss[loss=4.309, NarTop10Accuracy=0.4526, over 5212.00 frames. ], tot_loss[loss=3.924, NarTop10Accuracy=0.5294, over 5808.52 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:37,937 INFO [trainer.py:765] (2/8) Epoch 5, batch 900, train_loss[loss=4.181, NarTop10Accuracy=0.4763, over 6394.00 frames. ], tot_loss[loss=3.905, NarTop10Accuracy=0.5332, over 5811.51 frames. ], batch size: 13, lr: 1.61e-02 +2024-08-06 07:42:13,846 INFO [trainer.py:765] (2/8) Epoch 5, batch 1000, train_loss[loss=3.957, NarTop10Accuracy=0.5251, over 6288.00 frames. ], tot_loss[loss=3.892, NarTop10Accuracy=0.5358, over 5909.89 frames. ], batch size: 13, lr: 1.60e-02 +2024-08-06 07:42:46,468 INFO [trainer.py:765] (2/8) Epoch 5, batch 1100, train_loss[loss=3.717, NarTop10Accuracy=0.5597, over 6833.00 frames. ], tot_loss[loss=3.895, NarTop10Accuracy=0.5349, over 5950.02 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 07:43:25,226 INFO [trainer.py:765] (2/8) Epoch 5, batch 1200, train_loss[loss=4.112, NarTop10Accuracy=0.498, over 7411.00 frames. ], tot_loss[loss=3.903, NarTop10Accuracy=0.5333, over 5957.63 frames. ], batch size: 30, lr: 1.59e-02 +2024-08-06 07:44:00,556 INFO [trainer.py:765] (2/8) Epoch 5, batch 1300, train_loss[loss=4.087, NarTop10Accuracy=0.513, over 5034.00 frames. ], tot_loss[loss=3.899, NarTop10Accuracy=0.5344, over 6020.23 frames. ], batch size: 6, lr: 1.59e-02 +2024-08-06 07:44:30,238 INFO [trainer.py:765] (2/8) Epoch 5, batch 1400, train_loss[loss=3.83, NarTop10Accuracy=0.5491, over 6234.00 frames. ], tot_loss[loss=3.901, NarTop10Accuracy=0.5343, over 6041.53 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 07:45:02,845 INFO [trainer.py:765] (2/8) Epoch 5, batch 1500, train_loss[loss=3.993, NarTop10Accuracy=0.5193, over 5775.00 frames. ], tot_loss[loss=3.907, NarTop10Accuracy=0.5329, over 5981.31 frames. ], batch size: 48, lr: 1.57e-02 +2024-08-06 07:45:31,008 INFO [trainer.py:765] (2/8) Epoch 5, batch 1600, train_loss[loss=4.063, NarTop10Accuracy=0.498, over 7126.00 frames. ], tot_loss[loss=3.914, NarTop10Accuracy=0.5312, over 5948.55 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 07:45:51,057 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 07:46:01,621 INFO [trainer.py:811] (2/8) Epoch 5, validation: loss=3.749, NarTop10Accuracy=0.5672, over 1907754.00 frames. +2024-08-06 07:46:01,622 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27054MB +2024-08-06 07:46:02,123 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.669e+02 1.884e+02 2.190e+02 6.243e+02, threshold=3.768e+02, percent-clipped=1.8 +2024-08-06 07:46:08,362 INFO [trainer.py:765] (2/8) Epoch 5, batch 1700, train_loss[loss=3.719, NarTop10Accuracy=0.5686, over 6224.00 frames. ], tot_loss[loss=3.91, NarTop10Accuracy=0.5318, over 5945.60 frames. ], batch size: 13, lr: 1.56e-02 +2024-08-06 07:46:34,967 INFO [trainer.py:765] (2/8) Epoch 5, batch 1800, train_loss[loss=3.853, NarTop10Accuracy=0.5413, over 7024.00 frames. ], tot_loss[loss=3.9, NarTop10Accuracy=0.534, over 6001.47 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 07:47:01,489 INFO [trainer.py:765] (2/8) Epoch 5, batch 1900, train_loss[loss=3.971, NarTop10Accuracy=0.5219, over 6254.00 frames. ], tot_loss[loss=3.905, NarTop10Accuracy=0.5334, over 6049.08 frames. ], batch size: 50, lr: 1.55e-02 +2024-08-06 07:47:27,147 INFO [trainer.py:765] (2/8) Epoch 5, batch 2000, train_loss[loss=3.944, NarTop10Accuracy=0.5318, over 5940.00 frames. ], tot_loss[loss=3.904, NarTop10Accuracy=0.5337, over 6032.92 frames. ], batch size: 48, lr: 1.55e-02 +2024-08-06 07:47:52,619 INFO [trainer.py:765] (2/8) Epoch 5, batch 2100, train_loss[loss=3.876, NarTop10Accuracy=0.549, over 4033.00 frames. ], tot_loss[loss=3.902, NarTop10Accuracy=0.5342, over 6008.06 frames. ], batch size: 4, lr: 1.54e-02 +2024-08-06 07:48:17,993 INFO [trainer.py:765] (2/8) Epoch 5, batch 2200, train_loss[loss=3.902, NarTop10Accuracy=0.5383, over 7322.00 frames. ], tot_loss[loss=3.887, NarTop10Accuracy=0.5378, over 6049.61 frames. ], batch size: 30, lr: 1.54e-02 +2024-08-06 07:48:43,421 INFO [trainer.py:765] (2/8) Epoch 5, batch 2300, train_loss[loss=3.95, NarTop10Accuracy=0.5304, over 5779.00 frames. ], tot_loss[loss=3.899, NarTop10Accuracy=0.5351, over 6071.41 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 07:49:08,169 INFO [trainer.py:765] (2/8) Epoch 5, batch 2400, train_loss[loss=4.026, NarTop10Accuracy=0.5078, over 5900.00 frames. ], tot_loss[loss=3.897, NarTop10Accuracy=0.5353, over 5891.76 frames. ], batch size: 50, lr: 1.53e-02 +2024-08-06 07:49:31,645 INFO [trainer.py:765] (2/8) Epoch 5, batch 2500, train_loss[loss=3.679, NarTop10Accuracy=0.5825, over 4964.00 frames. ], tot_loss[loss=3.859, NarTop10Accuracy=0.5427, over 5525.28 frames. ], batch size: 6, lr: 1.52e-02 +2024-08-06 07:49:53,558 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 07:50:58,969 INFO [trainer.py:765] (2/8) Epoch 6, batch 100, train_loss[loss=3.647, NarTop10Accuracy=0.5911, over 7183.00 frames. ], tot_loss[loss=3.784, NarTop10Accuracy=0.5591, over 2383.50 frames. ], batch size: 30, lr: 1.42e-02 +2024-08-06 07:51:31,789 INFO [trainer.py:765] (2/8) Epoch 6, batch 200, train_loss[loss=3.676, NarTop10Accuracy=0.5778, over 6917.00 frames. ], tot_loss[loss=3.793, NarTop10Accuracy=0.5569, over 3874.56 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 07:52:04,696 INFO [trainer.py:765] (2/8) Epoch 6, batch 300, train_loss[loss=3.746, NarTop10Accuracy=0.575, over 7181.00 frames. ], tot_loss[loss=3.793, NarTop10Accuracy=0.5575, over 4679.65 frames. ], batch size: 22, lr: 1.41e-02 +2024-08-06 07:52:36,200 INFO [trainer.py:765] (2/8) Epoch 6, batch 400, train_loss[loss=3.87, NarTop10Accuracy=0.547, over 5176.00 frames. ], tot_loss[loss=3.796, NarTop10Accuracy=0.5571, over 5122.13 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 07:53:06,102 INFO [trainer.py:765] (2/8) Epoch 6, batch 500, train_loss[loss=3.944, NarTop10Accuracy=0.5288, over 6057.00 frames. ], tot_loss[loss=3.776, NarTop10Accuracy=0.5611, over 5379.06 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 07:53:43,285 INFO [trainer.py:765] (2/8) Epoch 6, batch 600, train_loss[loss=3.783, NarTop10Accuracy=0.5588, over 5794.00 frames. ], tot_loss[loss=3.782, NarTop10Accuracy=0.5597, over 5643.20 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 07:54:15,439 INFO [trainer.py:765] (2/8) Epoch 6, batch 700, train_loss[loss=3.962, NarTop10Accuracy=0.5268, over 5133.00 frames. ], tot_loss[loss=3.787, NarTop10Accuracy=0.5581, over 5711.01 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:54:49,526 INFO [trainer.py:765] (2/8) Epoch 6, batch 800, train_loss[loss=3.586, NarTop10Accuracy=0.6106, over 4340.00 frames. ], tot_loss[loss=3.795, NarTop10Accuracy=0.5566, over 5762.85 frames. ], batch size: 5, lr: 1.39e-02 +2024-08-06 07:55:21,984 INFO [trainer.py:765] (2/8) Epoch 6, batch 900, train_loss[loss=3.521, NarTop10Accuracy=0.6016, over 6223.00 frames. ], tot_loss[loss=3.783, NarTop10Accuracy=0.5583, over 5792.05 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 07:56:00,804 INFO [trainer.py:765] (2/8) Epoch 6, batch 1000, train_loss[loss=3.723, NarTop10Accuracy=0.5802, over 6627.00 frames. ], tot_loss[loss=3.801, NarTop10Accuracy=0.5547, over 5907.31 frames. ], batch size: 14, lr: 1.38e-02 +2024-08-06 07:56:34,171 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 07:56:44,742 INFO [trainer.py:811] (2/8) Epoch 6, validation: loss=3.634, NarTop10Accuracy=0.5919, over 1907754.00 frames. +2024-08-06 07:56:44,743 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27161MB +2024-08-06 07:56:45,277 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.300e+02 1.714e+02 1.918e+02 2.211e+02 6.360e+02, threshold=3.836e+02, percent-clipped=1.6 +2024-08-06 07:56:46,639 INFO [trainer.py:765] (2/8) Epoch 6, batch 1100, train_loss[loss=3.516, NarTop10Accuracy=0.6091, over 6835.00 frames. ], tot_loss[loss=3.799, NarTop10Accuracy=0.5556, over 5949.90 frames. ], batch size: 17, lr: 1.37e-02 +2024-08-06 07:57:24,888 INFO [trainer.py:765] (2/8) Epoch 6, batch 1200, train_loss[loss=3.996, NarTop10Accuracy=0.5283, over 7674.00 frames. ], tot_loss[loss=3.794, NarTop10Accuracy=0.5566, over 5960.25 frames. ], batch size: 32, lr: 1.37e-02 +2024-08-06 07:57:56,612 INFO [trainer.py:765] (2/8) Epoch 6, batch 1300, train_loss[loss=3.465, NarTop10Accuracy=0.6168, over 5024.00 frames. ], tot_loss[loss=3.796, NarTop10Accuracy=0.5558, over 6028.18 frames. ], batch size: 6, lr: 1.37e-02 +2024-08-06 07:58:30,736 INFO [trainer.py:765] (2/8) Epoch 6, batch 1400, train_loss[loss=4.139, NarTop10Accuracy=0.4877, over 6148.00 frames. ], tot_loss[loss=3.798, NarTop10Accuracy=0.5553, over 6048.95 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 07:59:00,998 INFO [trainer.py:765] (2/8) Epoch 6, batch 1500, train_loss[loss=3.941, NarTop10Accuracy=0.5305, over 6132.00 frames. ], tot_loss[loss=3.802, NarTop10Accuracy=0.5546, over 5985.77 frames. ], batch size: 49, lr: 1.36e-02 +2024-08-06 07:59:28,934 INFO [trainer.py:765] (2/8) Epoch 6, batch 1600, train_loss[loss=3.536, NarTop10Accuracy=0.6173, over 7106.00 frames. ], tot_loss[loss=3.785, NarTop10Accuracy=0.5582, over 5968.54 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 07:59:55,617 INFO [trainer.py:765] (2/8) Epoch 6, batch 1700, train_loss[loss=3.774, NarTop10Accuracy=0.5502, over 6191.00 frames. ], tot_loss[loss=3.782, NarTop10Accuracy=0.5586, over 5947.48 frames. ], batch size: 13, lr: 1.35e-02 +2024-08-06 08:00:22,187 INFO [trainer.py:765] (2/8) Epoch 6, batch 1800, train_loss[loss=3.803, NarTop10Accuracy=0.5566, over 7068.00 frames. ], tot_loss[loss=3.795, NarTop10Accuracy=0.5568, over 6014.23 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 08:00:48,795 INFO [trainer.py:765] (2/8) Epoch 6, batch 1900, train_loss[loss=4.002, NarTop10Accuracy=0.519, over 5290.00 frames. ], tot_loss[loss=3.825, NarTop10Accuracy=0.5507, over 6043.47 frames. ], batch size: 49, lr: 1.34e-02 +2024-08-06 08:01:14,462 INFO [trainer.py:765] (2/8) Epoch 6, batch 2000, train_loss[loss=3.924, NarTop10Accuracy=0.5292, over 5759.00 frames. ], tot_loss[loss=3.799, NarTop10Accuracy=0.5559, over 6016.50 frames. ], batch size: 49, lr: 1.34e-02 +2024-08-06 08:01:43,134 INFO [trainer.py:765] (2/8) Epoch 6, batch 2100, train_loss[loss=3.593, NarTop10Accuracy=0.5957, over 4726.00 frames. ], tot_loss[loss=3.803, NarTop10Accuracy=0.5552, over 6005.09 frames. ], batch size: 5, lr: 1.33e-02 +2024-08-06 08:02:08,518 INFO [trainer.py:765] (2/8) Epoch 6, batch 2200, train_loss[loss=3.639, NarTop10Accuracy=0.5917, over 7226.00 frames. ], tot_loss[loss=3.803, NarTop10Accuracy=0.5551, over 6049.08 frames. ], batch size: 30, lr: 1.33e-02 +2024-08-06 08:02:33,916 INFO [trainer.py:765] (2/8) Epoch 6, batch 2300, train_loss[loss=3.683, NarTop10Accuracy=0.5681, over 5820.00 frames. ], tot_loss[loss=3.809, NarTop10Accuracy=0.5537, over 6085.86 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 08:02:58,617 INFO [trainer.py:765] (2/8) Epoch 6, batch 2400, train_loss[loss=3.619, NarTop10Accuracy=0.5902, over 5166.00 frames. ], tot_loss[loss=3.802, NarTop10Accuracy=0.555, over 5894.95 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 08:03:21,940 INFO [trainer.py:765] (2/8) Epoch 6, batch 2500, train_loss[loss=4.239, NarTop10Accuracy=0.4639, over 4939.00 frames. ], tot_loss[loss=3.786, NarTop10Accuracy=0.5581, over 5540.34 frames. ], batch size: 6, lr: 1.32e-02 +2024-08-06 08:03:43,030 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 08:04:42,817 INFO [trainer.py:765] (2/8) Epoch 7, batch 100, train_loss[loss=3.966, NarTop10Accuracy=0.5237, over 7378.00 frames. ], tot_loss[loss=3.683, NarTop10Accuracy=0.5812, over 2366.79 frames. ], batch size: 31, lr: 1.23e-02 +2024-08-06 08:05:18,347 INFO [trainer.py:765] (2/8) Epoch 7, batch 200, train_loss[loss=3.874, NarTop10Accuracy=0.5424, over 6793.00 frames. ], tot_loss[loss=3.707, NarTop10Accuracy=0.576, over 3864.16 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 08:05:46,773 INFO [trainer.py:765] (2/8) Epoch 7, batch 300, train_loss[loss=3.478, NarTop10Accuracy=0.6226, over 7234.00 frames. ], tot_loss[loss=3.724, NarTop10Accuracy=0.5723, over 4669.61 frames. ], batch size: 22, lr: 1.23e-02 +2024-08-06 08:06:22,091 INFO [trainer.py:765] (2/8) Epoch 7, batch 400, train_loss[loss=3.744, NarTop10Accuracy=0.5628, over 5187.00 frames. ], tot_loss[loss=3.713, NarTop10Accuracy=0.5739, over 5147.57 frames. ], batch size: 7, lr: 1.22e-02 +2024-08-06 08:06:52,316 INFO [trainer.py:765] (2/8) Epoch 7, batch 500, train_loss[loss=3.735, NarTop10Accuracy=0.5648, over 6112.00 frames. ], tot_loss[loss=3.709, NarTop10Accuracy=0.5743, over 5427.39 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 08:06:56,086 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 08:07:06,251 INFO [trainer.py:811] (2/8) Epoch 7, validation: loss=3.56, NarTop10Accuracy=0.6069, over 1907754.00 frames. +2024-08-06 08:07:06,252 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27197MB +2024-08-06 08:07:06,837 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 1.760e+02 1.958e+02 2.227e+02 5.399e+02, threshold=3.916e+02, percent-clipped=0.8 +2024-08-06 08:07:33,151 INFO [trainer.py:765] (2/8) Epoch 7, batch 600, train_loss[loss=3.498, NarTop10Accuracy=0.6221, over 5791.00 frames. ], tot_loss[loss=3.71, NarTop10Accuracy=0.5739, over 5687.50 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 08:08:11,333 INFO [trainer.py:765] (2/8) Epoch 7, batch 700, train_loss[loss=3.623, NarTop10Accuracy=0.6092, over 5188.00 frames. ], tot_loss[loss=3.717, NarTop10Accuracy=0.5723, over 5762.37 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 08:08:45,557 INFO [trainer.py:765] (2/8) Epoch 7, batch 800, train_loss[loss=3.348, NarTop10Accuracy=0.6429, over 5041.00 frames. ], tot_loss[loss=3.702, NarTop10Accuracy=0.5752, over 5794.84 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 08:09:17,739 INFO [trainer.py:765] (2/8) Epoch 7, batch 900, train_loss[loss=3.723, NarTop10Accuracy=0.5874, over 6279.00 frames. ], tot_loss[loss=3.714, NarTop10Accuracy=0.5732, over 5819.81 frames. ], batch size: 13, lr: 1.21e-02 +2024-08-06 08:09:54,191 INFO [trainer.py:765] (2/8) Epoch 7, batch 1000, train_loss[loss=3.756, NarTop10Accuracy=0.5639, over 6214.00 frames. ], tot_loss[loss=3.718, NarTop10Accuracy=0.572, over 5915.96 frames. ], batch size: 13, lr: 1.20e-02 +2024-08-06 08:10:29,570 INFO [trainer.py:765] (2/8) Epoch 7, batch 1100, train_loss[loss=3.773, NarTop10Accuracy=0.5648, over 6810.00 frames. ], tot_loss[loss=3.723, NarTop10Accuracy=0.5713, over 5952.53 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 08:11:02,492 INFO [trainer.py:765] (2/8) Epoch 7, batch 1200, train_loss[loss=4.01, NarTop10Accuracy=0.5103, over 7357.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.5711, over 5956.50 frames. ], batch size: 31, lr: 1.20e-02 +2024-08-06 08:11:33,447 INFO [trainer.py:765] (2/8) Epoch 7, batch 1300, train_loss[loss=3.606, NarTop10Accuracy=0.588, over 5115.00 frames. ], tot_loss[loss=3.719, NarTop10Accuracy=0.5714, over 6011.06 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 08:12:10,912 INFO [trainer.py:765] (2/8) Epoch 7, batch 1400, train_loss[loss=3.777, NarTop10Accuracy=0.5529, over 6179.00 frames. ], tot_loss[loss=3.723, NarTop10Accuracy=0.5709, over 6028.27 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 08:12:42,109 INFO [trainer.py:765] (2/8) Epoch 7, batch 1500, train_loss[loss=3.763, NarTop10Accuracy=0.5709, over 6162.00 frames. ], tot_loss[loss=3.712, NarTop10Accuracy=0.5732, over 5970.99 frames. ], batch size: 48, lr: 1.19e-02 +2024-08-06 08:13:13,238 INFO [trainer.py:765] (2/8) Epoch 7, batch 1600, train_loss[loss=3.652, NarTop10Accuracy=0.5888, over 7089.00 frames. ], tot_loss[loss=3.717, NarTop10Accuracy=0.5727, over 5956.76 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:13:40,016 INFO [trainer.py:765] (2/8) Epoch 7, batch 1700, train_loss[loss=3.791, NarTop10Accuracy=0.5505, over 6722.00 frames. ], tot_loss[loss=3.732, NarTop10Accuracy=0.5694, over 5960.79 frames. ], batch size: 14, lr: 1.18e-02 +2024-08-06 08:14:06,583 INFO [trainer.py:765] (2/8) Epoch 7, batch 1800, train_loss[loss=3.801, NarTop10Accuracy=0.5619, over 6974.00 frames. ], tot_loss[loss=3.738, NarTop10Accuracy=0.5681, over 6016.06 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:14:33,223 INFO [trainer.py:765] (2/8) Epoch 7, batch 1900, train_loss[loss=4.098, NarTop10Accuracy=0.4973, over 5990.00 frames. ], tot_loss[loss=3.745, NarTop10Accuracy=0.5671, over 6046.74 frames. ], batch size: 48, lr: 1.17e-02 +2024-08-06 08:14:58,994 INFO [trainer.py:765] (2/8) Epoch 7, batch 2000, train_loss[loss=3.608, NarTop10Accuracy=0.5946, over 6062.00 frames. ], tot_loss[loss=3.734, NarTop10Accuracy=0.5689, over 6024.02 frames. ], batch size: 49, lr: 1.17e-02 +2024-08-06 08:15:24,423 INFO [trainer.py:765] (2/8) Epoch 7, batch 2100, train_loss[loss=4.006, NarTop10Accuracy=0.5123, over 3869.00 frames. ], tot_loss[loss=3.731, NarTop10Accuracy=0.5695, over 6004.40 frames. ], batch size: 4, lr: 1.17e-02 +2024-08-06 08:15:49,961 INFO [trainer.py:765] (2/8) Epoch 7, batch 2200, train_loss[loss=3.945, NarTop10Accuracy=0.5265, over 7049.00 frames. ], tot_loss[loss=3.734, NarTop10Accuracy=0.5689, over 6042.49 frames. ], batch size: 30, lr: 1.17e-02 +2024-08-06 08:16:15,490 INFO [trainer.py:765] (2/8) Epoch 7, batch 2300, train_loss[loss=3.661, NarTop10Accuracy=0.5716, over 5713.00 frames. ], tot_loss[loss=3.745, NarTop10Accuracy=0.5666, over 6069.97 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 08:16:40,319 INFO [trainer.py:765] (2/8) Epoch 7, batch 2400, train_loss[loss=3.606, NarTop10Accuracy=0.5943, over 5243.00 frames. ], tot_loss[loss=3.75, NarTop10Accuracy=0.5659, over 5892.45 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 08:17:03,739 INFO [trainer.py:765] (2/8) Epoch 7, batch 2500, train_loss[loss=3.249, NarTop10Accuracy=0.6481, over 5095.00 frames. ], tot_loss[loss=3.726, NarTop10Accuracy=0.5702, over 5538.96 frames. ], batch size: 6, lr: 1.16e-02 +2024-08-06 08:17:06,844 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 08:17:17,433 INFO [trainer.py:811] (2/8) Epoch 7, validation: loss=3.591, NarTop10Accuracy=0.6002, over 1907754.00 frames. +2024-08-06 08:17:17,433 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27197MB +2024-08-06 08:17:17,902 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.794e+02 1.981e+02 2.246e+02 4.644e+02, threshold=3.962e+02, percent-clipped=1.0 +2024-08-06 08:17:35,208 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 08:18:36,192 INFO [trainer.py:765] (2/8) Epoch 8, batch 100, train_loss[loss=3.743, NarTop10Accuracy=0.5765, over 7551.00 frames. ], tot_loss[loss=3.651, NarTop10Accuracy=0.5868, over 2384.67 frames. ], batch size: 32, lr: 1.09e-02 +2024-08-06 08:19:15,018 INFO [trainer.py:765] (2/8) Epoch 8, batch 200, train_loss[loss=3.621, NarTop10Accuracy=0.5964, over 6960.00 frames. ], tot_loss[loss=3.651, NarTop10Accuracy=0.5864, over 3866.96 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 08:19:43,559 INFO [trainer.py:765] (2/8) Epoch 8, batch 300, train_loss[loss=3.707, NarTop10Accuracy=0.5765, over 7074.00 frames. ], tot_loss[loss=3.656, NarTop10Accuracy=0.586, over 4684.12 frames. ], batch size: 22, lr: 1.08e-02 +2024-08-06 08:20:16,267 INFO [trainer.py:765] (2/8) Epoch 8, batch 400, train_loss[loss=3.351, NarTop10Accuracy=0.647, over 5173.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.5841, over 5121.92 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 08:20:48,420 INFO [trainer.py:765] (2/8) Epoch 8, batch 500, train_loss[loss=3.595, NarTop10Accuracy=0.6063, over 6021.00 frames. ], tot_loss[loss=3.658, NarTop10Accuracy=0.5855, over 5402.92 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 08:21:23,736 INFO [trainer.py:765] (2/8) Epoch 8, batch 600, train_loss[loss=3.711, NarTop10Accuracy=0.5746, over 5843.00 frames. ], tot_loss[loss=3.667, NarTop10Accuracy=0.5828, over 5661.17 frames. ], batch size: 9, lr: 1.07e-02 +2024-08-06 08:21:57,605 INFO [trainer.py:765] (2/8) Epoch 8, batch 700, train_loss[loss=3.689, NarTop10Accuracy=0.575, over 5051.00 frames. ], tot_loss[loss=3.672, NarTop10Accuracy=0.5821, over 5727.86 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:22:27,340 INFO [trainer.py:765] (2/8) Epoch 8, batch 800, train_loss[loss=3.448, NarTop10Accuracy=0.6306, over 4814.00 frames. ], tot_loss[loss=3.677, NarTop10Accuracy=0.5812, over 5783.05 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:23:06,891 INFO [trainer.py:765] (2/8) Epoch 8, batch 900, train_loss[loss=3.45, NarTop10Accuracy=0.6201, over 6307.00 frames. ], tot_loss[loss=3.658, NarTop10Accuracy=0.5846, over 5814.37 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 08:23:42,943 INFO [trainer.py:765] (2/8) Epoch 8, batch 1000, train_loss[loss=3.464, NarTop10Accuracy=0.6217, over 6690.00 frames. ], tot_loss[loss=3.663, NarTop10Accuracy=0.5837, over 5916.72 frames. ], batch size: 14, lr: 1.06e-02 +2024-08-06 08:24:15,105 INFO [trainer.py:765] (2/8) Epoch 8, batch 1100, train_loss[loss=3.775, NarTop10Accuracy=0.5554, over 6931.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5828, over 5940.99 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 08:24:57,338 INFO [trainer.py:765] (2/8) Epoch 8, batch 1200, train_loss[loss=3.503, NarTop10Accuracy=0.6202, over 7421.00 frames. ], tot_loss[loss=3.672, NarTop10Accuracy=0.5822, over 5938.76 frames. ], batch size: 31, lr: 1.06e-02 +2024-08-06 08:25:26,603 INFO [trainer.py:765] (2/8) Epoch 8, batch 1300, train_loss[loss=3.613, NarTop10Accuracy=0.6006, over 5002.00 frames. ], tot_loss[loss=3.662, NarTop10Accuracy=0.584, over 6012.26 frames. ], batch size: 6, lr: 1.06e-02 +2024-08-06 08:26:00,603 INFO [trainer.py:765] (2/8) Epoch 8, batch 1400, train_loss[loss=3.637, NarTop10Accuracy=0.5849, over 6224.00 frames. ], tot_loss[loss=3.676, NarTop10Accuracy=0.5808, over 6037.30 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 08:26:28,985 INFO [trainer.py:765] (2/8) Epoch 8, batch 1500, train_loss[loss=3.728, NarTop10Accuracy=0.5733, over 6000.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5822, over 5972.47 frames. ], batch size: 49, lr: 1.05e-02 +2024-08-06 08:26:56,931 INFO [trainer.py:765] (2/8) Epoch 8, batch 1600, train_loss[loss=3.641, NarTop10Accuracy=0.5801, over 7324.00 frames. ], tot_loss[loss=3.66, NarTop10Accuracy=0.5839, over 5955.22 frames. ], batch size: 23, lr: 1.05e-02 +2024-08-06 08:27:23,761 INFO [trainer.py:765] (2/8) Epoch 8, batch 1700, train_loss[loss=3.548, NarTop10Accuracy=0.6138, over 6772.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5819, over 5940.95 frames. ], batch size: 14, lr: 1.05e-02 +2024-08-06 08:27:50,460 INFO [trainer.py:765] (2/8) Epoch 8, batch 1800, train_loss[loss=3.697, NarTop10Accuracy=0.5727, over 7359.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.5827, over 5998.24 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 08:28:17,178 INFO [trainer.py:765] (2/8) Epoch 8, batch 1900, train_loss[loss=4.046, NarTop10Accuracy=0.5081, over 6389.00 frames. ], tot_loss[loss=3.663, NarTop10Accuracy=0.5832, over 6046.34 frames. ], batch size: 48, lr: 1.04e-02 +2024-08-06 08:28:25,163 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 08:28:35,290 INFO [trainer.py:811] (2/8) Epoch 8, validation: loss=3.507, NarTop10Accuracy=0.6181, over 1907754.00 frames. +2024-08-06 08:28:35,291 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27197MB +2024-08-06 08:28:35,795 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.304e+02 1.789e+02 1.988e+02 2.230e+02 4.452e+02, threshold=3.975e+02, percent-clipped=0.5 +2024-08-06 08:28:52,983 INFO [trainer.py:765] (2/8) Epoch 8, batch 2000, train_loss[loss=3.89, NarTop10Accuracy=0.534, over 6608.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5826, over 6017.20 frames. ], batch size: 48, lr: 1.04e-02 +2024-08-06 08:29:18,485 INFO [trainer.py:765] (2/8) Epoch 8, batch 2100, train_loss[loss=3.597, NarTop10Accuracy=0.5945, over 4017.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.5813, over 5989.58 frames. ], batch size: 4, lr: 1.04e-02 +2024-08-06 08:29:43,790 INFO [trainer.py:765] (2/8) Epoch 8, batch 2200, train_loss[loss=3.869, NarTop10Accuracy=0.5407, over 7283.00 frames. ], tot_loss[loss=3.681, NarTop10Accuracy=0.5796, over 6026.99 frames. ], batch size: 30, lr: 1.03e-02 +2024-08-06 08:30:09,134 INFO [trainer.py:765] (2/8) Epoch 8, batch 2300, train_loss[loss=3.686, NarTop10Accuracy=0.5902, over 5846.00 frames. ], tot_loss[loss=3.688, NarTop10Accuracy=0.5781, over 6067.32 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 08:30:33,791 INFO [trainer.py:765] (2/8) Epoch 8, batch 2400, train_loss[loss=3.44, NarTop10Accuracy=0.6259, over 5051.00 frames. ], tot_loss[loss=3.694, NarTop10Accuracy=0.5772, over 5892.94 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 08:30:57,139 INFO [trainer.py:765] (2/8) Epoch 8, batch 2500, train_loss[loss=3.41, NarTop10Accuracy=0.643, over 5059.00 frames. ], tot_loss[loss=3.675, NarTop10Accuracy=0.5807, over 5542.44 frames. ], batch size: 6, lr: 1.03e-02 +2024-08-06 08:31:18,228 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 08:32:19,098 INFO [trainer.py:765] (2/8) Epoch 9, batch 100, train_loss[loss=3.924, NarTop10Accuracy=0.5318, over 7186.00 frames. ], tot_loss[loss=3.602, NarTop10Accuracy=0.5975, over 2358.76 frames. ], batch size: 30, lr: 9.71e-03 +2024-08-06 08:32:51,461 INFO [trainer.py:765] (2/8) Epoch 9, batch 200, train_loss[loss=3.399, NarTop10Accuracy=0.6231, over 6777.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6008, over 3863.23 frames. ], batch size: 17, lr: 9.69e-03 +2024-08-06 08:33:27,115 INFO [trainer.py:765] (2/8) Epoch 9, batch 300, train_loss[loss=3.72, NarTop10Accuracy=0.5705, over 7121.00 frames. ], tot_loss[loss=3.589, NarTop10Accuracy=0.6, over 4675.89 frames. ], batch size: 22, lr: 9.67e-03 +2024-08-06 08:34:00,964 INFO [trainer.py:765] (2/8) Epoch 9, batch 400, train_loss[loss=3.381, NarTop10Accuracy=0.6461, over 5209.00 frames. ], tot_loss[loss=3.591, NarTop10Accuracy=0.5998, over 5114.22 frames. ], batch size: 7, lr: 9.64e-03 +2024-08-06 08:34:32,880 INFO [trainer.py:765] (2/8) Epoch 9, batch 500, train_loss[loss=3.634, NarTop10Accuracy=0.5864, over 5955.00 frames. ], tot_loss[loss=3.577, NarTop10Accuracy=0.6023, over 5402.62 frames. ], batch size: 11, lr: 9.62e-03 +2024-08-06 08:35:07,498 INFO [trainer.py:765] (2/8) Epoch 9, batch 600, train_loss[loss=3.362, NarTop10Accuracy=0.6498, over 5743.00 frames. ], tot_loss[loss=3.58, NarTop10Accuracy=0.6016, over 5662.94 frames. ], batch size: 9, lr: 9.60e-03 +2024-08-06 08:35:42,824 INFO [trainer.py:765] (2/8) Epoch 9, batch 700, train_loss[loss=3.789, NarTop10Accuracy=0.5586, over 5034.00 frames. ], tot_loss[loss=3.59, NarTop10Accuracy=0.599, over 5729.14 frames. ], batch size: 6, lr: 9.58e-03 +2024-08-06 08:36:14,821 INFO [trainer.py:765] (2/8) Epoch 9, batch 800, train_loss[loss=3.398, NarTop10Accuracy=0.6288, over 5162.00 frames. ], tot_loss[loss=3.613, NarTop10Accuracy=0.5943, over 5790.85 frames. ], batch size: 6, lr: 9.56e-03 +2024-08-06 08:36:46,455 INFO [trainer.py:765] (2/8) Epoch 9, batch 900, train_loss[loss=3.563, NarTop10Accuracy=0.6068, over 6712.00 frames. ], tot_loss[loss=3.611, NarTop10Accuracy=0.5941, over 5810.70 frames. ], batch size: 14, lr: 9.54e-03 +2024-08-06 08:37:26,564 INFO [trainer.py:765] (2/8) Epoch 9, batch 1000, train_loss[loss=3.446, NarTop10Accuracy=0.6418, over 6290.00 frames. ], tot_loss[loss=3.619, NarTop10Accuracy=0.5927, over 5927.64 frames. ], batch size: 13, lr: 9.52e-03 +2024-08-06 08:37:59,421 INFO [trainer.py:765] (2/8) Epoch 9, batch 1100, train_loss[loss=3.766, NarTop10Accuracy=0.5697, over 6733.00 frames. ], tot_loss[loss=3.635, NarTop10Accuracy=0.5889, over 5962.27 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 08:38:31,995 INFO [trainer.py:765] (2/8) Epoch 9, batch 1200, train_loss[loss=3.687, NarTop10Accuracy=0.575, over 7072.00 frames. ], tot_loss[loss=3.639, NarTop10Accuracy=0.5879, over 5947.63 frames. ], batch size: 30, lr: 9.48e-03 +2024-08-06 08:39:11,841 INFO [trainer.py:765] (2/8) Epoch 9, batch 1300, train_loss[loss=3.719, NarTop10Accuracy=0.5782, over 4994.00 frames. ], tot_loss[loss=3.629, NarTop10Accuracy=0.5901, over 6017.63 frames. ], batch size: 6, lr: 9.46e-03 +2024-08-06 08:39:27,117 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 08:39:38,197 INFO [trainer.py:811] (2/8) Epoch 9, validation: loss=3.495, NarTop10Accuracy=0.6214, over 1907754.00 frames. +2024-08-06 08:39:38,197 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27197MB +2024-08-06 08:39:38,759 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 1.781e+02 1.970e+02 2.189e+02 6.315e+02, threshold=3.940e+02, percent-clipped=0.6 +2024-08-06 08:39:52,278 INFO [trainer.py:765] (2/8) Epoch 9, batch 1400, train_loss[loss=3.605, NarTop10Accuracy=0.5939, over 6219.00 frames. ], tot_loss[loss=3.623, NarTop10Accuracy=0.5915, over 6022.18 frames. ], batch size: 11, lr: 9.43e-03 +2024-08-06 08:40:22,332 INFO [trainer.py:765] (2/8) Epoch 9, batch 1500, train_loss[loss=3.823, NarTop10Accuracy=0.5556, over 6205.00 frames. ], tot_loss[loss=3.631, NarTop10Accuracy=0.5902, over 5953.53 frames. ], batch size: 49, lr: 9.41e-03 +2024-08-06 08:40:50,368 INFO [trainer.py:765] (2/8) Epoch 9, batch 1600, train_loss[loss=3.886, NarTop10Accuracy=0.5463, over 7053.00 frames. ], tot_loss[loss=3.636, NarTop10Accuracy=0.589, over 5946.63 frames. ], batch size: 22, lr: 9.39e-03 +2024-08-06 08:41:17,152 INFO [trainer.py:765] (2/8) Epoch 9, batch 1700, train_loss[loss=3.45, NarTop10Accuracy=0.6156, over 6266.00 frames. ], tot_loss[loss=3.643, NarTop10Accuracy=0.5879, over 5921.36 frames. ], batch size: 13, lr: 9.37e-03 +2024-08-06 08:41:43,812 INFO [trainer.py:765] (2/8) Epoch 9, batch 1800, train_loss[loss=3.921, NarTop10Accuracy=0.5373, over 7279.00 frames. ], tot_loss[loss=3.63, NarTop10Accuracy=0.5905, over 6002.22 frames. ], batch size: 22, lr: 9.35e-03 +2024-08-06 08:42:10,495 INFO [trainer.py:765] (2/8) Epoch 9, batch 1900, train_loss[loss=3.705, NarTop10Accuracy=0.576, over 6440.00 frames. ], tot_loss[loss=3.642, NarTop10Accuracy=0.5883, over 6030.71 frames. ], batch size: 49, lr: 9.33e-03 +2024-08-06 08:42:36,203 INFO [trainer.py:765] (2/8) Epoch 9, batch 2000, train_loss[loss=3.979, NarTop10Accuracy=0.5271, over 5918.00 frames. ], tot_loss[loss=3.646, NarTop10Accuracy=0.5872, over 6025.99 frames. ], batch size: 49, lr: 9.31e-03 +2024-08-06 08:43:01,668 INFO [trainer.py:765] (2/8) Epoch 9, batch 2100, train_loss[loss=3.623, NarTop10Accuracy=0.595, over 4950.00 frames. ], tot_loss[loss=3.643, NarTop10Accuracy=0.5881, over 5998.21 frames. ], batch size: 5, lr: 9.30e-03 +2024-08-06 08:43:27,178 INFO [trainer.py:765] (2/8) Epoch 9, batch 2200, train_loss[loss=3.687, NarTop10Accuracy=0.5857, over 7280.00 frames. ], tot_loss[loss=3.652, NarTop10Accuracy=0.5864, over 6036.55 frames. ], batch size: 30, lr: 9.28e-03 +2024-08-06 08:43:52,671 INFO [trainer.py:765] (2/8) Epoch 9, batch 2300, train_loss[loss=3.612, NarTop10Accuracy=0.5946, over 5813.00 frames. ], tot_loss[loss=3.662, NarTop10Accuracy=0.5844, over 6085.21 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 08:44:20,550 INFO [trainer.py:765] (2/8) Epoch 9, batch 2400, train_loss[loss=3.693, NarTop10Accuracy=0.5705, over 5559.00 frames. ], tot_loss[loss=3.661, NarTop10Accuracy=0.5848, over 5894.93 frames. ], batch size: 48, lr: 9.24e-03 +2024-08-06 08:44:44,002 INFO [trainer.py:765] (2/8) Epoch 9, batch 2500, train_loss[loss=3.83, NarTop10Accuracy=0.5468, over 5101.00 frames. ], tot_loss[loss=3.641, NarTop10Accuracy=0.5885, over 5539.45 frames. ], batch size: 6, lr: 9.22e-03 +2024-08-06 08:45:04,929 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 08:46:09,064 INFO [trainer.py:765] (2/8) Epoch 10, batch 100, train_loss[loss=3.563, NarTop10Accuracy=0.6182, over 7242.00 frames. ], tot_loss[loss=3.585, NarTop10Accuracy=0.6011, over 2374.75 frames. ], batch size: 30, lr: 8.75e-03 +2024-08-06 08:46:44,074 INFO [trainer.py:765] (2/8) Epoch 10, batch 200, train_loss[loss=3.533, NarTop10Accuracy=0.6109, over 6953.00 frames. ], tot_loss[loss=3.569, NarTop10Accuracy=0.6042, over 3857.78 frames. ], batch size: 17, lr: 8.73e-03 +2024-08-06 08:47:14,444 INFO [trainer.py:765] (2/8) Epoch 10, batch 300, train_loss[loss=3.586, NarTop10Accuracy=0.5991, over 7026.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.6016, over 4671.98 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 08:47:46,119 INFO [trainer.py:765] (2/8) Epoch 10, batch 400, train_loss[loss=3.882, NarTop10Accuracy=0.5292, over 5188.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6028, over 5122.92 frames. ], batch size: 7, lr: 8.70e-03 +2024-08-06 08:48:22,370 INFO [trainer.py:765] (2/8) Epoch 10, batch 500, train_loss[loss=3.349, NarTop10Accuracy=0.6481, over 6121.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6027, over 5390.95 frames. ], batch size: 11, lr: 8.68e-03 +2024-08-06 08:48:53,460 INFO [trainer.py:765] (2/8) Epoch 10, batch 600, train_loss[loss=3.49, NarTop10Accuracy=0.6202, over 5772.00 frames. ], tot_loss[loss=3.588, NarTop10Accuracy=0.5993, over 5656.95 frames. ], batch size: 9, lr: 8.66e-03 +2024-08-06 08:49:26,707 INFO [trainer.py:765] (2/8) Epoch 10, batch 700, train_loss[loss=3.373, NarTop10Accuracy=0.6436, over 5157.00 frames. ], tot_loss[loss=3.598, NarTop10Accuracy=0.5974, over 5708.43 frames. ], batch size: 6, lr: 8.65e-03 +2024-08-06 08:49:49,164 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 08:50:00,983 INFO [trainer.py:811] (2/8) Epoch 10, validation: loss=3.46, NarTop10Accuracy=0.6279, over 1907754.00 frames. +2024-08-06 08:50:00,984 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 08:50:01,725 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.353e+02 1.818e+02 1.985e+02 2.213e+02 4.843e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 08:50:09,801 INFO [trainer.py:765] (2/8) Epoch 10, batch 800, train_loss[loss=3.671, NarTop10Accuracy=0.5916, over 5042.00 frames. ], tot_loss[loss=3.59, NarTop10Accuracy=0.5993, over 5765.48 frames. ], batch size: 6, lr: 8.63e-03 +2024-08-06 08:50:42,890 INFO [trainer.py:765] (2/8) Epoch 10, batch 900, train_loss[loss=3.489, NarTop10Accuracy=0.6328, over 6639.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.6019, over 5795.89 frames. ], batch size: 14, lr: 8.61e-03 +2024-08-06 08:51:18,460 INFO [trainer.py:765] (2/8) Epoch 10, batch 1000, train_loss[loss=3.97, NarTop10Accuracy=0.5152, over 6721.00 frames. ], tot_loss[loss=3.595, NarTop10Accuracy=0.5979, over 5895.76 frames. ], batch size: 14, lr: 8.59e-03 +2024-08-06 08:51:57,362 INFO [trainer.py:765] (2/8) Epoch 10, batch 1100, train_loss[loss=3.48, NarTop10Accuracy=0.628, over 6862.00 frames. ], tot_loss[loss=3.605, NarTop10Accuracy=0.5956, over 5956.55 frames. ], batch size: 17, lr: 8.58e-03 +2024-08-06 08:52:32,048 INFO [trainer.py:765] (2/8) Epoch 10, batch 1200, train_loss[loss=3.547, NarTop10Accuracy=0.5997, over 7009.00 frames. ], tot_loss[loss=3.598, NarTop10Accuracy=0.5965, over 5942.32 frames. ], batch size: 30, lr: 8.56e-03 +2024-08-06 08:53:06,607 INFO [trainer.py:765] (2/8) Epoch 10, batch 1300, train_loss[loss=3.628, NarTop10Accuracy=0.5816, over 5145.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.5974, over 6022.57 frames. ], batch size: 6, lr: 8.54e-03 +2024-08-06 08:53:46,880 INFO [trainer.py:765] (2/8) Epoch 10, batch 1400, train_loss[loss=3.536, NarTop10Accuracy=0.6065, over 6188.00 frames. ], tot_loss[loss=3.605, NarTop10Accuracy=0.5953, over 6028.97 frames. ], batch size: 11, lr: 8.53e-03 +2024-08-06 08:54:17,501 INFO [trainer.py:765] (2/8) Epoch 10, batch 1500, train_loss[loss=3.618, NarTop10Accuracy=0.5974, over 6061.00 frames. ], tot_loss[loss=3.591, NarTop10Accuracy=0.5981, over 5978.94 frames. ], batch size: 48, lr: 8.51e-03 +2024-08-06 08:54:45,525 INFO [trainer.py:765] (2/8) Epoch 10, batch 1600, train_loss[loss=3.484, NarTop10Accuracy=0.6198, over 7233.00 frames. ], tot_loss[loss=3.594, NarTop10Accuracy=0.5971, over 5954.70 frames. ], batch size: 22, lr: 8.49e-03 +2024-08-06 08:55:12,300 INFO [trainer.py:765] (2/8) Epoch 10, batch 1700, train_loss[loss=3.671, NarTop10Accuracy=0.575, over 6289.00 frames. ], tot_loss[loss=3.6, NarTop10Accuracy=0.5957, over 5948.92 frames. ], batch size: 13, lr: 8.48e-03 +2024-08-06 08:55:41,989 INFO [trainer.py:765] (2/8) Epoch 10, batch 1800, train_loss[loss=3.341, NarTop10Accuracy=0.6467, over 7069.00 frames. ], tot_loss[loss=3.596, NarTop10Accuracy=0.5969, over 6006.74 frames. ], batch size: 22, lr: 8.46e-03 +2024-08-06 08:56:08,572 INFO [trainer.py:765] (2/8) Epoch 10, batch 1900, train_loss[loss=3.944, NarTop10Accuracy=0.5211, over 6472.00 frames. ], tot_loss[loss=3.596, NarTop10Accuracy=0.5971, over 6055.67 frames. ], batch size: 49, lr: 8.45e-03 +2024-08-06 08:56:34,287 INFO [trainer.py:765] (2/8) Epoch 10, batch 2000, train_loss[loss=3.826, NarTop10Accuracy=0.558, over 5996.00 frames. ], tot_loss[loss=3.6, NarTop10Accuracy=0.5968, over 6021.58 frames. ], batch size: 49, lr: 8.43e-03 +2024-08-06 08:56:59,752 INFO [trainer.py:765] (2/8) Epoch 10, batch 2100, train_loss[loss=3.587, NarTop10Accuracy=0.603, over 3958.00 frames. ], tot_loss[loss=3.612, NarTop10Accuracy=0.5944, over 6006.72 frames. ], batch size: 4, lr: 8.41e-03 +2024-08-06 08:57:25,280 INFO [trainer.py:765] (2/8) Epoch 10, batch 2200, train_loss[loss=3.609, NarTop10Accuracy=0.5894, over 6962.00 frames. ], tot_loss[loss=3.613, NarTop10Accuracy=0.5944, over 6050.54 frames. ], batch size: 30, lr: 8.40e-03 +2024-08-06 08:57:50,682 INFO [trainer.py:765] (2/8) Epoch 10, batch 2300, train_loss[loss=3.424, NarTop10Accuracy=0.6271, over 5820.00 frames. ], tot_loss[loss=3.619, NarTop10Accuracy=0.5933, over 6094.97 frames. ], batch size: 9, lr: 8.38e-03 +2024-08-06 08:58:15,344 INFO [trainer.py:765] (2/8) Epoch 10, batch 2400, train_loss[loss=3.482, NarTop10Accuracy=0.6275, over 5073.00 frames. ], tot_loss[loss=3.614, NarTop10Accuracy=0.594, over 5905.63 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 08:58:38,809 INFO [trainer.py:765] (2/8) Epoch 10, batch 2500, train_loss[loss=3.39, NarTop10Accuracy=0.6428, over 5009.00 frames. ], tot_loss[loss=3.6, NarTop10Accuracy=0.5962, over 5557.05 frames. ], batch size: 6, lr: 8.35e-03 +2024-08-06 08:58:59,874 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 09:00:03,681 INFO [trainer.py:765] (2/8) Epoch 11, batch 100, train_loss[loss=3.458, NarTop10Accuracy=0.6285, over 7118.00 frames. ], tot_loss[loss=3.531, NarTop10Accuracy=0.6121, over 2367.85 frames. ], batch size: 31, lr: 7.96e-03 +2024-08-06 09:00:30,916 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 09:00:41,217 INFO [trainer.py:811] (2/8) Epoch 11, validation: loss=3.404, NarTop10Accuracy=0.6396, over 1907754.00 frames. +2024-08-06 09:00:41,218 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 09:00:41,774 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 1.800e+02 1.980e+02 2.200e+02 4.491e+02, threshold=3.959e+02, percent-clipped=0.2 +2024-08-06 09:00:46,859 INFO [trainer.py:765] (2/8) Epoch 11, batch 200, train_loss[loss=3.817, NarTop10Accuracy=0.5524, over 6956.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6131, over 3863.44 frames. ], batch size: 17, lr: 7.94e-03 +2024-08-06 09:01:17,853 INFO [trainer.py:765] (2/8) Epoch 11, batch 300, train_loss[loss=3.389, NarTop10Accuracy=0.6398, over 7169.00 frames. ], tot_loss[loss=3.535, NarTop10Accuracy=0.6107, over 4665.81 frames. ], batch size: 22, lr: 7.93e-03 +2024-08-06 09:01:50,535 INFO [trainer.py:765] (2/8) Epoch 11, batch 400, train_loss[loss=3.262, NarTop10Accuracy=0.6625, over 5136.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6098, over 5121.52 frames. ], batch size: 7, lr: 7.91e-03 +2024-08-06 09:02:21,239 INFO [trainer.py:765] (2/8) Epoch 11, batch 500, train_loss[loss=3.277, NarTop10Accuracy=0.6612, over 6151.00 frames. ], tot_loss[loss=3.549, NarTop10Accuracy=0.6074, over 5400.76 frames. ], batch size: 11, lr: 7.90e-03 +2024-08-06 09:03:01,742 INFO [trainer.py:765] (2/8) Epoch 11, batch 600, train_loss[loss=3.359, NarTop10Accuracy=0.6447, over 5796.00 frames. ], tot_loss[loss=3.548, NarTop10Accuracy=0.6077, over 5677.84 frames. ], batch size: 9, lr: 7.88e-03 +2024-08-06 09:03:38,237 INFO [trainer.py:765] (2/8) Epoch 11, batch 700, train_loss[loss=3.273, NarTop10Accuracy=0.6643, over 4972.00 frames. ], tot_loss[loss=3.543, NarTop10Accuracy=0.6088, over 5756.47 frames. ], batch size: 6, lr: 7.87e-03 +2024-08-06 09:04:10,756 INFO [trainer.py:765] (2/8) Epoch 11, batch 800, train_loss[loss=3.138, NarTop10Accuracy=0.684, over 5108.00 frames. ], tot_loss[loss=3.563, NarTop10Accuracy=0.6048, over 5807.80 frames. ], batch size: 6, lr: 7.86e-03 +2024-08-06 09:04:50,083 INFO [trainer.py:765] (2/8) Epoch 11, batch 900, train_loss[loss=3.507, NarTop10Accuracy=0.6112, over 6747.00 frames. ], tot_loss[loss=3.555, NarTop10Accuracy=0.6059, over 5811.79 frames. ], batch size: 14, lr: 7.84e-03 +2024-08-06 09:05:27,012 INFO [trainer.py:765] (2/8) Epoch 11, batch 1000, train_loss[loss=3.455, NarTop10Accuracy=0.6259, over 6264.00 frames. ], tot_loss[loss=3.551, NarTop10Accuracy=0.6065, over 5912.02 frames. ], batch size: 13, lr: 7.83e-03 +2024-08-06 09:06:00,351 INFO [trainer.py:765] (2/8) Epoch 11, batch 1100, train_loss[loss=3.46, NarTop10Accuracy=0.6276, over 6797.00 frames. ], tot_loss[loss=3.559, NarTop10Accuracy=0.6046, over 5952.90 frames. ], batch size: 17, lr: 7.81e-03 +2024-08-06 09:06:40,946 INFO [trainer.py:765] (2/8) Epoch 11, batch 1200, train_loss[loss=3.477, NarTop10Accuracy=0.6156, over 7141.00 frames. ], tot_loss[loss=3.565, NarTop10Accuracy=0.6032, over 5949.23 frames. ], batch size: 30, lr: 7.80e-03 +2024-08-06 09:07:15,494 INFO [trainer.py:765] (2/8) Epoch 11, batch 1300, train_loss[loss=3.496, NarTop10Accuracy=0.6292, over 4956.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6024, over 6026.63 frames. ], batch size: 6, lr: 7.79e-03 +2024-08-06 09:07:47,628 INFO [trainer.py:765] (2/8) Epoch 11, batch 1400, train_loss[loss=3.313, NarTop10Accuracy=0.6637, over 6161.00 frames. ], tot_loss[loss=3.579, NarTop10Accuracy=0.6008, over 6027.04 frames. ], batch size: 11, lr: 7.77e-03 +2024-08-06 09:08:18,987 INFO [trainer.py:765] (2/8) Epoch 11, batch 1500, train_loss[loss=3.689, NarTop10Accuracy=0.5822, over 6341.00 frames. ], tot_loss[loss=3.58, NarTop10Accuracy=0.6007, over 5980.69 frames. ], batch size: 50, lr: 7.76e-03 +2024-08-06 09:08:47,149 INFO [trainer.py:765] (2/8) Epoch 11, batch 1600, train_loss[loss=3.473, NarTop10Accuracy=0.6215, over 7269.00 frames. ], tot_loss[loss=3.579, NarTop10Accuracy=0.6009, over 5960.03 frames. ], batch size: 22, lr: 7.74e-03 +2024-08-06 09:09:13,951 INFO [trainer.py:765] (2/8) Epoch 11, batch 1700, train_loss[loss=3.527, NarTop10Accuracy=0.6207, over 6226.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.6007, over 5948.97 frames. ], batch size: 13, lr: 7.73e-03 +2024-08-06 09:09:40,732 INFO [trainer.py:765] (2/8) Epoch 11, batch 1800, train_loss[loss=3.559, NarTop10Accuracy=0.5998, over 7129.00 frames. ], tot_loss[loss=3.586, NarTop10Accuracy=0.5996, over 6004.80 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 09:10:07,342 INFO [trainer.py:765] (2/8) Epoch 11, batch 1900, train_loss[loss=3.838, NarTop10Accuracy=0.549, over 5661.00 frames. ], tot_loss[loss=3.604, NarTop10Accuracy=0.5964, over 6043.18 frames. ], batch size: 49, lr: 7.70e-03 +2024-08-06 09:10:33,040 INFO [trainer.py:765] (2/8) Epoch 11, batch 2000, train_loss[loss=3.675, NarTop10Accuracy=0.5817, over 6173.00 frames. ], tot_loss[loss=3.595, NarTop10Accuracy=0.5977, over 6014.23 frames. ], batch size: 48, lr: 7.69e-03 +2024-08-06 09:10:58,442 INFO [trainer.py:765] (2/8) Epoch 11, batch 2100, train_loss[loss=3.569, NarTop10Accuracy=0.5966, over 4672.00 frames. ], tot_loss[loss=3.572, NarTop10Accuracy=0.6026, over 6002.53 frames. ], batch size: 5, lr: 7.68e-03 +2024-08-06 09:11:20,709 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 09:11:31,457 INFO [trainer.py:811] (2/8) Epoch 11, validation: loss=3.372, NarTop10Accuracy=0.6462, over 1907754.00 frames. +2024-08-06 09:11:31,458 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 09:11:31,930 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.800e+02 1.966e+02 2.160e+02 4.000e+02, threshold=3.933e+02, percent-clipped=0.1 +2024-08-06 09:11:34,519 INFO [trainer.py:765] (2/8) Epoch 11, batch 2200, train_loss[loss=3.401, NarTop10Accuracy=0.6476, over 7240.00 frames. ], tot_loss[loss=3.566, NarTop10Accuracy=0.6037, over 6048.40 frames. ], batch size: 31, lr: 7.66e-03 +2024-08-06 09:11:59,940 INFO [trainer.py:765] (2/8) Epoch 11, batch 2300, train_loss[loss=3.584, NarTop10Accuracy=0.6, over 5680.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.6014, over 6070.37 frames. ], batch size: 9, lr: 7.65e-03 +2024-08-06 09:12:24,697 INFO [trainer.py:765] (2/8) Epoch 11, batch 2400, train_loss[loss=3.598, NarTop10Accuracy=0.5869, over 5092.00 frames. ], tot_loss[loss=3.599, NarTop10Accuracy=0.5968, over 5872.57 frames. ], batch size: 7, lr: 7.64e-03 +2024-08-06 09:12:47,879 INFO [trainer.py:765] (2/8) Epoch 11, batch 2500, train_loss[loss=3.669, NarTop10Accuracy=0.5828, over 5111.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6021, over 5508.74 frames. ], batch size: 6, lr: 7.62e-03 +2024-08-06 09:13:09,161 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 09:14:12,278 INFO [trainer.py:765] (2/8) Epoch 12, batch 100, train_loss[loss=3.366, NarTop10Accuracy=0.6459, over 7063.00 frames. ], tot_loss[loss=3.516, NarTop10Accuracy=0.6141, over 2368.95 frames. ], batch size: 30, lr: 7.29e-03 +2024-08-06 09:14:48,095 INFO [trainer.py:765] (2/8) Epoch 12, batch 200, train_loss[loss=3.324, NarTop10Accuracy=0.6606, over 6880.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6171, over 3861.41 frames. ], batch size: 17, lr: 7.28e-03 +2024-08-06 09:15:20,021 INFO [trainer.py:765] (2/8) Epoch 12, batch 300, train_loss[loss=3.424, NarTop10Accuracy=0.6362, over 7211.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6174, over 4659.91 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 09:15:52,633 INFO [trainer.py:765] (2/8) Epoch 12, batch 400, train_loss[loss=3.576, NarTop10Accuracy=0.5935, over 5071.00 frames. ], tot_loss[loss=3.513, NarTop10Accuracy=0.6148, over 5116.18 frames. ], batch size: 7, lr: 7.25e-03 +2024-08-06 09:16:26,433 INFO [trainer.py:765] (2/8) Epoch 12, batch 500, train_loss[loss=3.601, NarTop10Accuracy=0.5953, over 6140.00 frames. ], tot_loss[loss=3.51, NarTop10Accuracy=0.6157, over 5386.48 frames. ], batch size: 11, lr: 7.24e-03 +2024-08-06 09:16:59,239 INFO [trainer.py:765] (2/8) Epoch 12, batch 600, train_loss[loss=3.482, NarTop10Accuracy=0.6193, over 5767.00 frames. ], tot_loss[loss=3.528, NarTop10Accuracy=0.6122, over 5665.76 frames. ], batch size: 9, lr: 7.23e-03 +2024-08-06 09:17:36,318 INFO [trainer.py:765] (2/8) Epoch 12, batch 700, train_loss[loss=3.489, NarTop10Accuracy=0.6283, over 5248.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6136, over 5732.52 frames. ], batch size: 6, lr: 7.22e-03 +2024-08-06 09:18:07,752 INFO [trainer.py:765] (2/8) Epoch 12, batch 800, train_loss[loss=3.687, NarTop10Accuracy=0.5766, over 5155.00 frames. ], tot_loss[loss=3.525, NarTop10Accuracy=0.6125, over 5793.78 frames. ], batch size: 6, lr: 7.21e-03 +2024-08-06 09:18:43,779 INFO [trainer.py:765] (2/8) Epoch 12, batch 900, train_loss[loss=3.658, NarTop10Accuracy=0.5782, over 6157.00 frames. ], tot_loss[loss=3.524, NarTop10Accuracy=0.6125, over 5814.04 frames. ], batch size: 13, lr: 7.19e-03 +2024-08-06 09:19:17,689 INFO [trainer.py:765] (2/8) Epoch 12, batch 1000, train_loss[loss=3.594, NarTop10Accuracy=0.5955, over 6710.00 frames. ], tot_loss[loss=3.527, NarTop10Accuracy=0.6116, over 5921.37 frames. ], batch size: 14, lr: 7.18e-03 +2024-08-06 09:19:52,427 INFO [trainer.py:765] (2/8) Epoch 12, batch 1100, train_loss[loss=3.657, NarTop10Accuracy=0.5719, over 6737.00 frames. ], tot_loss[loss=3.53, NarTop10Accuracy=0.6114, over 5971.63 frames. ], batch size: 17, lr: 7.17e-03 +2024-08-06 09:20:29,443 INFO [trainer.py:765] (2/8) Epoch 12, batch 1200, train_loss[loss=3.412, NarTop10Accuracy=0.6396, over 7476.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6096, over 5955.58 frames. ], batch size: 31, lr: 7.16e-03 +2024-08-06 09:21:02,826 INFO [trainer.py:765] (2/8) Epoch 12, batch 1300, train_loss[loss=3.587, NarTop10Accuracy=0.5867, over 5642.00 frames. ], tot_loss[loss=3.552, NarTop10Accuracy=0.6064, over 6034.76 frames. ], batch size: 7, lr: 7.15e-03 +2024-08-06 09:21:36,981 INFO [trainer.py:765] (2/8) Epoch 12, batch 1400, train_loss[loss=3.287, NarTop10Accuracy=0.6601, over 6231.00 frames. ], tot_loss[loss=3.552, NarTop10Accuracy=0.6066, over 6050.36 frames. ], batch size: 11, lr: 7.13e-03 +2024-08-06 09:22:09,920 INFO [trainer.py:765] (2/8) Epoch 12, batch 1500, train_loss[loss=3.644, NarTop10Accuracy=0.5913, over 5132.00 frames. ], tot_loss[loss=3.549, NarTop10Accuracy=0.607, over 5980.93 frames. ], batch size: 50, lr: 7.12e-03 +2024-08-06 09:22:38,026 INFO [trainer.py:765] (2/8) Epoch 12, batch 1600, train_loss[loss=3.701, NarTop10Accuracy=0.5787, over 7199.00 frames. ], tot_loss[loss=3.555, NarTop10Accuracy=0.6061, over 5957.12 frames. ], batch size: 22, lr: 7.11e-03 +2024-08-06 09:22:39,860 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 09:22:49,889 INFO [trainer.py:811] (2/8) Epoch 12, validation: loss=3.364, NarTop10Accuracy=0.6481, over 1907754.00 frames. +2024-08-06 09:22:49,889 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 09:22:50,413 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.796e+02 1.978e+02 2.176e+02 4.603e+02, threshold=3.957e+02, percent-clipped=0.2 +2024-08-06 09:23:14,785 INFO [trainer.py:765] (2/8) Epoch 12, batch 1700, train_loss[loss=3.404, NarTop10Accuracy=0.6353, over 6150.00 frames. ], tot_loss[loss=3.564, NarTop10Accuracy=0.6042, over 5935.15 frames. ], batch size: 13, lr: 7.10e-03 +2024-08-06 09:23:41,387 INFO [trainer.py:765] (2/8) Epoch 12, batch 1800, train_loss[loss=3.405, NarTop10Accuracy=0.6457, over 7149.00 frames. ], tot_loss[loss=3.557, NarTop10Accuracy=0.6057, over 6002.25 frames. ], batch size: 22, lr: 7.09e-03 +2024-08-06 09:24:07,957 INFO [trainer.py:765] (2/8) Epoch 12, batch 1900, train_loss[loss=3.717, NarTop10Accuracy=0.5738, over 5940.00 frames. ], tot_loss[loss=3.561, NarTop10Accuracy=0.6046, over 6037.79 frames. ], batch size: 51, lr: 7.08e-03 +2024-08-06 09:24:33,619 INFO [trainer.py:765] (2/8) Epoch 12, batch 2000, train_loss[loss=3.588, NarTop10Accuracy=0.603, over 5971.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.6032, over 6021.24 frames. ], batch size: 49, lr: 7.07e-03 +2024-08-06 09:24:59,038 INFO [trainer.py:765] (2/8) Epoch 12, batch 2100, train_loss[loss=3.876, NarTop10Accuracy=0.5524, over 3851.00 frames. ], tot_loss[loss=3.569, NarTop10Accuracy=0.6032, over 5999.01 frames. ], batch size: 4, lr: 7.05e-03 +2024-08-06 09:25:24,509 INFO [trainer.py:765] (2/8) Epoch 12, batch 2200, train_loss[loss=3.48, NarTop10Accuracy=0.6246, over 7322.00 frames. ], tot_loss[loss=3.569, NarTop10Accuracy=0.6032, over 6031.44 frames. ], batch size: 30, lr: 7.04e-03 +2024-08-06 09:25:49,926 INFO [trainer.py:765] (2/8) Epoch 12, batch 2300, train_loss[loss=3.49, NarTop10Accuracy=0.6099, over 5727.00 frames. ], tot_loss[loss=3.569, NarTop10Accuracy=0.6027, over 6061.74 frames. ], batch size: 9, lr: 7.03e-03 +2024-08-06 09:26:14,656 INFO [trainer.py:765] (2/8) Epoch 12, batch 2400, train_loss[loss=3.429, NarTop10Accuracy=0.6314, over 5164.00 frames. ], tot_loss[loss=3.573, NarTop10Accuracy=0.6021, over 5867.86 frames. ], batch size: 7, lr: 7.02e-03 +2024-08-06 09:26:38,155 INFO [trainer.py:765] (2/8) Epoch 12, batch 2500, train_loss[loss=3.609, NarTop10Accuracy=0.5971, over 5051.00 frames. ], tot_loss[loss=3.555, NarTop10Accuracy=0.6051, over 5533.08 frames. ], batch size: 6, lr: 7.01e-03 +2024-08-06 09:26:59,555 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 09:28:03,611 INFO [trainer.py:765] (2/8) Epoch 13, batch 100, train_loss[loss=3.488, NarTop10Accuracy=0.6213, over 7432.00 frames. ], tot_loss[loss=3.51, NarTop10Accuracy=0.6151, over 2377.25 frames. ], batch size: 31, lr: 6.72e-03 +2024-08-06 09:28:36,906 INFO [trainer.py:765] (2/8) Epoch 13, batch 200, train_loss[loss=3.294, NarTop10Accuracy=0.6653, over 7023.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6176, over 3872.16 frames. ], batch size: 17, lr: 6.71e-03 +2024-08-06 09:29:07,170 INFO [trainer.py:765] (2/8) Epoch 13, batch 300, train_loss[loss=3.259, NarTop10Accuracy=0.6654, over 7149.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6191, over 4687.26 frames. ], batch size: 22, lr: 6.70e-03 +2024-08-06 09:29:41,038 INFO [trainer.py:765] (2/8) Epoch 13, batch 400, train_loss[loss=3.4, NarTop10Accuracy=0.6468, over 5213.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6198, over 5136.26 frames. ], batch size: 7, lr: 6.69e-03 +2024-08-06 09:30:13,730 INFO [trainer.py:765] (2/8) Epoch 13, batch 500, train_loss[loss=3.729, NarTop10Accuracy=0.5782, over 6083.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6213, over 5406.09 frames. ], batch size: 11, lr: 6.68e-03 +2024-08-06 09:30:47,198 INFO [trainer.py:765] (2/8) Epoch 13, batch 600, train_loss[loss=3.647, NarTop10Accuracy=0.5924, over 5791.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.618, over 5684.24 frames. ], batch size: 9, lr: 6.67e-03 +2024-08-06 09:31:23,821 INFO [trainer.py:765] (2/8) Epoch 13, batch 700, train_loss[loss=3.538, NarTop10Accuracy=0.6114, over 4339.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.6173, over 5757.31 frames. ], batch size: 5, lr: 6.66e-03 +2024-08-06 09:31:58,208 INFO [trainer.py:765] (2/8) Epoch 13, batch 800, train_loss[loss=3.412, NarTop10Accuracy=0.6332, over 4891.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6147, over 5801.10 frames. ], batch size: 6, lr: 6.65e-03 +2024-08-06 09:32:29,193 INFO [trainer.py:765] (2/8) Epoch 13, batch 900, train_loss[loss=3.369, NarTop10Accuracy=0.6429, over 6653.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.6171, over 5831.44 frames. ], batch size: 14, lr: 6.64e-03 +2024-08-06 09:33:03,133 INFO [trainer.py:765] (2/8) Epoch 13, batch 1000, train_loss[loss=3.708, NarTop10Accuracy=0.5657, over 6838.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6143, over 5916.48 frames. ], batch size: 14, lr: 6.63e-03 +2024-08-06 09:33:14,218 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 09:33:24,525 INFO [trainer.py:811] (2/8) Epoch 13, validation: loss=3.389, NarTop10Accuracy=0.6428, over 1907754.00 frames. +2024-08-06 09:33:24,525 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 09:33:25,132 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.457e+02 1.794e+02 1.964e+02 2.145e+02 3.608e+02, threshold=3.929e+02, percent-clipped=0.0 +2024-08-06 09:33:51,714 INFO [trainer.py:765] (2/8) Epoch 13, batch 1100, train_loss[loss=3.697, NarTop10Accuracy=0.5702, over 6893.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.6104, over 5945.41 frames. ], batch size: 17, lr: 6.62e-03 +2024-08-06 09:34:25,485 INFO [trainer.py:765] (2/8) Epoch 13, batch 1200, train_loss[loss=3.54, NarTop10Accuracy=0.6103, over 7259.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6125, over 5955.78 frames. ], batch size: 31, lr: 6.61e-03 +2024-08-06 09:35:05,085 INFO [trainer.py:765] (2/8) Epoch 13, batch 1300, train_loss[loss=3.508, NarTop10Accuracy=0.6133, over 5141.00 frames. ], tot_loss[loss=3.532, NarTop10Accuracy=0.611, over 6014.35 frames. ], batch size: 6, lr: 6.60e-03 +2024-08-06 09:35:36,404 INFO [trainer.py:765] (2/8) Epoch 13, batch 1400, train_loss[loss=3.469, NarTop10Accuracy=0.6247, over 6157.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.6083, over 6048.90 frames. ], batch size: 11, lr: 6.59e-03 +2024-08-06 09:36:07,320 INFO [trainer.py:765] (2/8) Epoch 13, batch 1500, train_loss[loss=3.753, NarTop10Accuracy=0.5743, over 5630.00 frames. ], tot_loss[loss=3.543, NarTop10Accuracy=0.6082, over 5971.48 frames. ], batch size: 48, lr: 6.58e-03 +2024-08-06 09:36:35,389 INFO [trainer.py:765] (2/8) Epoch 13, batch 1600, train_loss[loss=3.706, NarTop10Accuracy=0.5712, over 6952.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6092, over 5948.58 frames. ], batch size: 22, lr: 6.57e-03 +2024-08-06 09:37:02,143 INFO [trainer.py:765] (2/8) Epoch 13, batch 1700, train_loss[loss=3.468, NarTop10Accuracy=0.6204, over 6294.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6093, over 5938.61 frames. ], batch size: 13, lr: 6.56e-03 +2024-08-06 09:37:28,778 INFO [trainer.py:765] (2/8) Epoch 13, batch 1800, train_loss[loss=3.341, NarTop10Accuracy=0.6442, over 7260.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6097, over 6009.50 frames. ], batch size: 22, lr: 6.55e-03 +2024-08-06 09:37:55,386 INFO [trainer.py:765] (2/8) Epoch 13, batch 1900, train_loss[loss=3.572, NarTop10Accuracy=0.6033, over 6131.00 frames. ], tot_loss[loss=3.546, NarTop10Accuracy=0.6081, over 6037.98 frames. ], batch size: 49, lr: 6.54e-03 +2024-08-06 09:38:21,122 INFO [trainer.py:765] (2/8) Epoch 13, batch 2000, train_loss[loss=3.652, NarTop10Accuracy=0.584, over 6372.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6095, over 6022.85 frames. ], batch size: 48, lr: 6.53e-03 +2024-08-06 09:38:49,691 INFO [trainer.py:765] (2/8) Epoch 13, batch 2100, train_loss[loss=3.194, NarTop10Accuracy=0.6805, over 4809.00 frames. ], tot_loss[loss=3.542, NarTop10Accuracy=0.6085, over 6007.64 frames. ], batch size: 5, lr: 6.52e-03 +2024-08-06 09:39:15,107 INFO [trainer.py:765] (2/8) Epoch 13, batch 2200, train_loss[loss=3.688, NarTop10Accuracy=0.5899, over 7294.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.6082, over 6057.59 frames. ], batch size: 30, lr: 6.51e-03 +2024-08-06 09:39:40,618 INFO [trainer.py:765] (2/8) Epoch 13, batch 2300, train_loss[loss=3.439, NarTop10Accuracy=0.6241, over 5786.00 frames. ], tot_loss[loss=3.541, NarTop10Accuracy=0.6088, over 6073.87 frames. ], batch size: 9, lr: 6.50e-03 +2024-08-06 09:40:05,343 INFO [trainer.py:765] (2/8) Epoch 13, batch 2400, train_loss[loss=3.569, NarTop10Accuracy=0.5983, over 5265.00 frames. ], tot_loss[loss=3.554, NarTop10Accuracy=0.6064, over 5901.30 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 09:40:28,767 INFO [trainer.py:765] (2/8) Epoch 13, batch 2500, train_loss[loss=3.467, NarTop10Accuracy=0.6242, over 5158.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6123, over 5547.12 frames. ], batch size: 6, lr: 6.48e-03 +2024-08-06 09:40:49,960 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 09:41:48,980 INFO [trainer.py:765] (2/8) Epoch 14, batch 100, train_loss[loss=3.327, NarTop10Accuracy=0.6562, over 7092.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6217, over 2367.62 frames. ], batch size: 31, lr: 6.24e-03 +2024-08-06 09:42:22,937 INFO [trainer.py:765] (2/8) Epoch 14, batch 200, train_loss[loss=3.529, NarTop10Accuracy=0.6149, over 6985.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6233, over 3869.11 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 09:42:58,414 INFO [trainer.py:765] (2/8) Epoch 14, batch 300, train_loss[loss=3.679, NarTop10Accuracy=0.5712, over 7238.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6204, over 4684.38 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 09:43:30,439 INFO [trainer.py:765] (2/8) Epoch 14, batch 400, train_loss[loss=3.228, NarTop10Accuracy=0.6741, over 5085.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.62, over 5117.27 frames. ], batch size: 7, lr: 6.21e-03 +2024-08-06 09:43:42,487 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 09:43:53,651 INFO [trainer.py:811] (2/8) Epoch 14, validation: loss=3.321, NarTop10Accuracy=0.6566, over 1907754.00 frames. +2024-08-06 09:43:53,651 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 09:43:54,211 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.805e+02 1.968e+02 2.158e+02 4.264e+02, threshold=3.936e+02, percent-clipped=0.2 +2024-08-06 09:44:11,700 INFO [trainer.py:765] (2/8) Epoch 14, batch 500, train_loss[loss=3.466, NarTop10Accuracy=0.629, over 6036.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6203, over 5393.29 frames. ], batch size: 11, lr: 6.20e-03 +2024-08-06 09:44:47,166 INFO [trainer.py:765] (2/8) Epoch 14, batch 600, train_loss[loss=3.589, NarTop10Accuracy=0.6083, over 5786.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.6215, over 5660.06 frames. ], batch size: 9, lr: 6.19e-03 +2024-08-06 09:45:19,803 INFO [trainer.py:765] (2/8) Epoch 14, batch 700, train_loss[loss=3.661, NarTop10Accuracy=0.5926, over 5142.00 frames. ], tot_loss[loss=3.482, NarTop10Accuracy=0.6219, over 5738.67 frames. ], batch size: 6, lr: 6.18e-03 +2024-08-06 09:45:58,435 INFO [trainer.py:765] (2/8) Epoch 14, batch 800, train_loss[loss=3.721, NarTop10Accuracy=0.571, over 5091.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6187, over 5797.30 frames. ], batch size: 6, lr: 6.17e-03 +2024-08-06 09:46:35,420 INFO [trainer.py:765] (2/8) Epoch 14, batch 900, train_loss[loss=3.87, NarTop10Accuracy=0.5468, over 6159.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6187, over 5818.40 frames. ], batch size: 13, lr: 6.17e-03 +2024-08-06 09:47:08,399 INFO [trainer.py:765] (2/8) Epoch 14, batch 1000, train_loss[loss=3.593, NarTop10Accuracy=0.5921, over 6231.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6179, over 5926.26 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 09:47:47,663 INFO [trainer.py:765] (2/8) Epoch 14, batch 1100, train_loss[loss=3.127, NarTop10Accuracy=0.6966, over 6912.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6168, over 5950.14 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 09:48:23,500 INFO [trainer.py:765] (2/8) Epoch 14, batch 1200, train_loss[loss=3.469, NarTop10Accuracy=0.6283, over 7391.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6175, over 5946.33 frames. ], batch size: 30, lr: 6.14e-03 +2024-08-06 09:48:57,971 INFO [trainer.py:765] (2/8) Epoch 14, batch 1300, train_loss[loss=3.534, NarTop10Accuracy=0.6093, over 5159.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6174, over 6030.98 frames. ], batch size: 6, lr: 6.13e-03 +2024-08-06 09:49:30,234 INFO [trainer.py:765] (2/8) Epoch 14, batch 1400, train_loss[loss=3.45, NarTop10Accuracy=0.6222, over 6157.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6144, over 6048.57 frames. ], batch size: 11, lr: 6.12e-03 +2024-08-06 09:50:07,531 INFO [trainer.py:765] (2/8) Epoch 14, batch 1500, train_loss[loss=3.62, NarTop10Accuracy=0.5939, over 5655.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6143, over 5981.77 frames. ], batch size: 49, lr: 6.11e-03 +2024-08-06 09:50:35,637 INFO [trainer.py:765] (2/8) Epoch 14, batch 1600, train_loss[loss=3.617, NarTop10Accuracy=0.5958, over 7199.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.6147, over 5955.10 frames. ], batch size: 22, lr: 6.10e-03 +2024-08-06 09:51:02,377 INFO [trainer.py:765] (2/8) Epoch 14, batch 1700, train_loss[loss=3.474, NarTop10Accuracy=0.6326, over 6175.00 frames. ], tot_loss[loss=3.508, NarTop10Accuracy=0.6151, over 5937.72 frames. ], batch size: 13, lr: 6.10e-03 +2024-08-06 09:51:28,993 INFO [trainer.py:765] (2/8) Epoch 14, batch 1800, train_loss[loss=3.515, NarTop10Accuracy=0.6182, over 6925.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6173, over 6002.88 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 09:51:55,729 INFO [trainer.py:765] (2/8) Epoch 14, batch 1900, train_loss[loss=3.75, NarTop10Accuracy=0.5681, over 5999.00 frames. ], tot_loss[loss=3.519, NarTop10Accuracy=0.6134, over 6044.88 frames. ], batch size: 49, lr: 6.08e-03 +2024-08-06 09:52:21,503 INFO [trainer.py:765] (2/8) Epoch 14, batch 2000, train_loss[loss=3.598, NarTop10Accuracy=0.5972, over 5995.00 frames. ], tot_loss[loss=3.521, NarTop10Accuracy=0.6128, over 6012.19 frames. ], batch size: 49, lr: 6.07e-03 +2024-08-06 09:52:47,011 INFO [trainer.py:765] (2/8) Epoch 14, batch 2100, train_loss[loss=3.632, NarTop10Accuracy=0.5828, over 4845.00 frames. ], tot_loss[loss=3.515, NarTop10Accuracy=0.614, over 6000.19 frames. ], batch size: 5, lr: 6.06e-03 +2024-08-06 09:53:12,480 INFO [trainer.py:765] (2/8) Epoch 14, batch 2200, train_loss[loss=3.521, NarTop10Accuracy=0.6149, over 7339.00 frames. ], tot_loss[loss=3.519, NarTop10Accuracy=0.6133, over 6035.21 frames. ], batch size: 31, lr: 6.05e-03 +2024-08-06 09:53:37,975 INFO [trainer.py:765] (2/8) Epoch 14, batch 2300, train_loss[loss=3.589, NarTop10Accuracy=0.6032, over 5692.00 frames. ], tot_loss[loss=3.536, NarTop10Accuracy=0.6099, over 6062.40 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 09:54:02,717 INFO [trainer.py:765] (2/8) Epoch 14, batch 2400, train_loss[loss=3.564, NarTop10Accuracy=0.6075, over 5042.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6098, over 5884.31 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 09:54:12,820 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 09:54:24,304 INFO [trainer.py:811] (2/8) Epoch 14, validation: loss=3.364, NarTop10Accuracy=0.6477, over 1907754.00 frames. +2024-08-06 09:54:24,304 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 09:54:24,752 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.815e+02 1.970e+02 2.165e+02 3.684e+02, threshold=3.939e+02, percent-clipped=0.0 +2024-08-06 09:54:37,619 INFO [trainer.py:765] (2/8) Epoch 14, batch 2500, train_loss[loss=3.637, NarTop10Accuracy=0.5807, over 4830.00 frames. ], tot_loss[loss=3.507, NarTop10Accuracy=0.6156, over 5520.66 frames. ], batch size: 6, lr: 6.03e-03 +2024-08-06 09:54:58,736 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 09:56:03,097 INFO [trainer.py:765] (2/8) Epoch 15, batch 100, train_loss[loss=3.499, NarTop10Accuracy=0.6164, over 7741.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6276, over 2389.46 frames. ], batch size: 32, lr: 5.81e-03 +2024-08-06 09:56:35,980 INFO [trainer.py:765] (2/8) Epoch 15, batch 200, train_loss[loss=3.399, NarTop10Accuracy=0.652, over 6832.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6292, over 3878.33 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 09:57:07,653 INFO [trainer.py:765] (2/8) Epoch 15, batch 300, train_loss[loss=3.417, NarTop10Accuracy=0.6433, over 7143.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6283, over 4675.97 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 09:57:38,463 INFO [trainer.py:765] (2/8) Epoch 15, batch 400, train_loss[loss=3.757, NarTop10Accuracy=0.5788, over 5165.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6261, over 5117.47 frames. ], batch size: 7, lr: 5.79e-03 +2024-08-06 09:58:12,234 INFO [trainer.py:765] (2/8) Epoch 15, batch 500, train_loss[loss=3.549, NarTop10Accuracy=0.6074, over 6140.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6236, over 5395.33 frames. ], batch size: 11, lr: 5.78e-03 +2024-08-06 09:58:47,542 INFO [trainer.py:765] (2/8) Epoch 15, batch 600, train_loss[loss=3.616, NarTop10Accuracy=0.5985, over 5835.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.6241, over 5671.63 frames. ], batch size: 9, lr: 5.77e-03 +2024-08-06 09:59:17,061 INFO [trainer.py:765] (2/8) Epoch 15, batch 700, train_loss[loss=3.147, NarTop10Accuracy=0.6805, over 4927.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.621, over 5756.27 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 09:59:55,587 INFO [trainer.py:765] (2/8) Epoch 15, batch 800, train_loss[loss=3.635, NarTop10Accuracy=0.5886, over 5045.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6197, over 5821.30 frames. ], batch size: 6, lr: 5.76e-03 +2024-08-06 10:00:32,023 INFO [trainer.py:765] (2/8) Epoch 15, batch 900, train_loss[loss=3.473, NarTop10Accuracy=0.6212, over 6373.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6216, over 5825.97 frames. ], batch size: 13, lr: 5.75e-03 +2024-08-06 10:01:05,538 INFO [trainer.py:765] (2/8) Epoch 15, batch 1000, train_loss[loss=3.307, NarTop10Accuracy=0.6666, over 6147.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6221, over 5938.10 frames. ], batch size: 13, lr: 5.74e-03 +2024-08-06 10:01:45,153 INFO [trainer.py:765] (2/8) Epoch 15, batch 1100, train_loss[loss=3.573, NarTop10Accuracy=0.6047, over 6795.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6178, over 5969.56 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 10:02:18,756 INFO [trainer.py:765] (2/8) Epoch 15, batch 1200, train_loss[loss=3.669, NarTop10Accuracy=0.577, over 7322.00 frames. ], tot_loss[loss=3.482, NarTop10Accuracy=0.6207, over 5967.35 frames. ], batch size: 30, lr: 5.73e-03 +2024-08-06 10:02:51,921 INFO [trainer.py:765] (2/8) Epoch 15, batch 1300, train_loss[loss=3.467, NarTop10Accuracy=0.6354, over 4987.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6202, over 6026.33 frames. ], batch size: 6, lr: 5.72e-03 +2024-08-06 10:03:25,435 INFO [trainer.py:765] (2/8) Epoch 15, batch 1400, train_loss[loss=3.623, NarTop10Accuracy=0.5863, over 6142.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6184, over 6034.37 frames. ], batch size: 11, lr: 5.71e-03 +2024-08-06 10:03:59,041 INFO [trainer.py:765] (2/8) Epoch 15, batch 1500, train_loss[loss=3.631, NarTop10Accuracy=0.6024, over 5535.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6177, over 5968.38 frames. ], batch size: 49, lr: 5.71e-03 +2024-08-06 10:04:27,105 INFO [trainer.py:765] (2/8) Epoch 15, batch 1600, train_loss[loss=3.699, NarTop10Accuracy=0.5697, over 7124.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6197, over 5950.09 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 10:04:53,907 INFO [trainer.py:765] (2/8) Epoch 15, batch 1700, train_loss[loss=3.799, NarTop10Accuracy=0.5555, over 6126.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6196, over 5929.50 frames. ], batch size: 13, lr: 5.69e-03 +2024-08-06 10:05:20,728 INFO [trainer.py:765] (2/8) Epoch 15, batch 1800, train_loss[loss=3.839, NarTop10Accuracy=0.5463, over 7126.00 frames. ], tot_loss[loss=3.505, NarTop10Accuracy=0.6159, over 6004.83 frames. ], batch size: 22, lr: 5.68e-03 +2024-08-06 10:05:37,266 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 10:05:47,411 INFO [trainer.py:811] (2/8) Epoch 15, validation: loss=3.325, NarTop10Accuracy=0.6551, over 1907754.00 frames. +2024-08-06 10:05:47,412 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 10:05:47,919 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.835e+02 1.986e+02 2.156e+02 4.531e+02, threshold=3.972e+02, percent-clipped=0.1 +2024-08-06 10:05:57,568 INFO [trainer.py:765] (2/8) Epoch 15, batch 1900, train_loss[loss=3.702, NarTop10Accuracy=0.5844, over 6524.00 frames. ], tot_loss[loss=3.515, NarTop10Accuracy=0.6142, over 6042.66 frames. ], batch size: 49, lr: 5.68e-03 +2024-08-06 10:06:23,370 INFO [trainer.py:765] (2/8) Epoch 15, batch 2000, train_loss[loss=3.598, NarTop10Accuracy=0.596, over 6414.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6165, over 6020.63 frames. ], batch size: 49, lr: 5.67e-03 +2024-08-06 10:06:48,757 INFO [trainer.py:765] (2/8) Epoch 15, batch 2100, train_loss[loss=3.33, NarTop10Accuracy=0.6635, over 3922.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6148, over 5999.16 frames. ], batch size: 4, lr: 5.66e-03 +2024-08-06 10:07:14,170 INFO [trainer.py:765] (2/8) Epoch 15, batch 2200, train_loss[loss=3.272, NarTop10Accuracy=0.6631, over 7166.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6162, over 6027.62 frames. ], batch size: 31, lr: 5.65e-03 +2024-08-06 10:07:39,627 INFO [trainer.py:765] (2/8) Epoch 15, batch 2300, train_loss[loss=3.213, NarTop10Accuracy=0.6634, over 5795.00 frames. ], tot_loss[loss=3.513, NarTop10Accuracy=0.6142, over 6055.12 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 10:08:04,360 INFO [trainer.py:765] (2/8) Epoch 15, batch 2400, train_loss[loss=3.52, NarTop10Accuracy=0.6199, over 5177.00 frames. ], tot_loss[loss=3.516, NarTop10Accuracy=0.6142, over 5887.00 frames. ], batch size: 7, lr: 5.64e-03 +2024-08-06 10:08:27,712 INFO [trainer.py:765] (2/8) Epoch 15, batch 2500, train_loss[loss=3.666, NarTop10Accuracy=0.5898, over 5014.00 frames. ], tot_loss[loss=3.502, NarTop10Accuracy=0.617, over 5542.45 frames. ], batch size: 6, lr: 5.63e-03 +2024-08-06 10:08:48,611 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 10:09:44,183 INFO [trainer.py:765] (2/8) Epoch 16, batch 100, train_loss[loss=3.574, NarTop10Accuracy=0.6054, over 7089.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6268, over 2369.65 frames. ], batch size: 30, lr: 5.44e-03 +2024-08-06 10:10:23,207 INFO [trainer.py:765] (2/8) Epoch 16, batch 200, train_loss[loss=3.329, NarTop10Accuracy=0.654, over 6856.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6307, over 3856.32 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 10:10:58,841 INFO [trainer.py:765] (2/8) Epoch 16, batch 300, train_loss[loss=3.182, NarTop10Accuracy=0.6752, over 7182.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6285, over 4670.13 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 10:11:29,594 INFO [trainer.py:765] (2/8) Epoch 16, batch 400, train_loss[loss=3.429, NarTop10Accuracy=0.638, over 5127.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6276, over 5117.71 frames. ], batch size: 7, lr: 5.42e-03 +2024-08-06 10:12:02,297 INFO [trainer.py:765] (2/8) Epoch 16, batch 500, train_loss[loss=3.875, NarTop10Accuracy=0.5405, over 6152.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6272, over 5402.89 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 10:12:42,340 INFO [trainer.py:765] (2/8) Epoch 16, batch 600, train_loss[loss=3.394, NarTop10Accuracy=0.6406, over 5769.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.628, over 5687.58 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 10:13:13,950 INFO [trainer.py:765] (2/8) Epoch 16, batch 700, train_loss[loss=3.11, NarTop10Accuracy=0.6904, over 5157.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6253, over 5736.98 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:13:46,285 INFO [trainer.py:765] (2/8) Epoch 16, batch 800, train_loss[loss=3.34, NarTop10Accuracy=0.6404, over 5040.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6253, over 5806.04 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:14:23,295 INFO [trainer.py:765] (2/8) Epoch 16, batch 900, train_loss[loss=3.539, NarTop10Accuracy=0.6131, over 6243.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6266, over 5819.55 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 10:15:00,058 INFO [trainer.py:765] (2/8) Epoch 16, batch 1000, train_loss[loss=3.779, NarTop10Accuracy=0.5766, over 6203.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.6233, over 5907.73 frames. ], batch size: 13, lr: 5.38e-03 +2024-08-06 10:15:30,509 INFO [trainer.py:765] (2/8) Epoch 16, batch 1100, train_loss[loss=3.409, NarTop10Accuracy=0.6327, over 6858.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6216, over 5931.67 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 10:16:11,384 INFO [trainer.py:765] (2/8) Epoch 16, batch 1200, train_loss[loss=3.623, NarTop10Accuracy=0.5978, over 7349.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.6226, over 5943.48 frames. ], batch size: 30, lr: 5.37e-03 +2024-08-06 10:16:39,396 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 10:16:49,676 INFO [trainer.py:811] (2/8) Epoch 16, validation: loss=3.375, NarTop10Accuracy=0.6455, over 1907754.00 frames. +2024-08-06 10:16:49,676 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 10:16:52,482 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 1.814e+02 1.975e+02 2.151e+02 4.776e+02, threshold=3.950e+02, percent-clipped=0.2 +2024-08-06 10:16:58,041 INFO [trainer.py:765] (2/8) Epoch 16, batch 1300, train_loss[loss=3.631, NarTop10Accuracy=0.5825, over 5168.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6222, over 6011.22 frames. ], batch size: 6, lr: 5.36e-03 +2024-08-06 10:17:29,375 INFO [trainer.py:765] (2/8) Epoch 16, batch 1400, train_loss[loss=3.476, NarTop10Accuracy=0.622, over 6075.00 frames. ], tot_loss[loss=3.466, NarTop10Accuracy=0.6228, over 6026.16 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 10:18:02,354 INFO [trainer.py:765] (2/8) Epoch 16, batch 1500, train_loss[loss=3.628, NarTop10Accuracy=0.599, over 6162.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6216, over 5972.57 frames. ], batch size: 50, lr: 5.35e-03 +2024-08-06 10:18:30,469 INFO [trainer.py:765] (2/8) Epoch 16, batch 1600, train_loss[loss=3.693, NarTop10Accuracy=0.5733, over 7148.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6192, over 5958.47 frames. ], batch size: 22, lr: 5.34e-03 +2024-08-06 10:18:57,272 INFO [trainer.py:765] (2/8) Epoch 16, batch 1700, train_loss[loss=3.77, NarTop10Accuracy=0.5659, over 6120.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6227, over 5934.62 frames. ], batch size: 13, lr: 5.34e-03 +2024-08-06 10:19:23,979 INFO [trainer.py:765] (2/8) Epoch 16, batch 1800, train_loss[loss=3.717, NarTop10Accuracy=0.5644, over 7345.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6208, over 5993.48 frames. ], batch size: 22, lr: 5.33e-03 +2024-08-06 10:19:50,773 INFO [trainer.py:765] (2/8) Epoch 16, batch 1900, train_loss[loss=3.674, NarTop10Accuracy=0.5885, over 6288.00 frames. ], tot_loss[loss=3.484, NarTop10Accuracy=0.6203, over 6047.24 frames. ], batch size: 49, lr: 5.32e-03 +2024-08-06 10:20:16,601 INFO [trainer.py:765] (2/8) Epoch 16, batch 2000, train_loss[loss=3.496, NarTop10Accuracy=0.6244, over 6264.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6188, over 6008.33 frames. ], batch size: 48, lr: 5.32e-03 +2024-08-06 10:20:42,160 INFO [trainer.py:765] (2/8) Epoch 16, batch 2100, train_loss[loss=3.415, NarTop10Accuracy=0.6179, over 4786.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.6162, over 6022.75 frames. ], batch size: 5, lr: 5.31e-03 +2024-08-06 10:21:07,651 INFO [trainer.py:765] (2/8) Epoch 16, batch 2200, train_loss[loss=3.471, NarTop10Accuracy=0.6302, over 7198.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.6174, over 6058.97 frames. ], batch size: 30, lr: 5.30e-03 +2024-08-06 10:21:36,082 INFO [trainer.py:765] (2/8) Epoch 16, batch 2300, train_loss[loss=3.464, NarTop10Accuracy=0.6176, over 6332.00 frames. ], tot_loss[loss=3.515, NarTop10Accuracy=0.6148, over 6087.05 frames. ], batch size: 10, lr: 5.30e-03 +2024-08-06 10:22:00,906 INFO [trainer.py:765] (2/8) Epoch 16, batch 2400, train_loss[loss=3.34, NarTop10Accuracy=0.6478, over 5130.00 frames. ], tot_loss[loss=3.509, NarTop10Accuracy=0.616, over 5896.34 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 10:22:24,290 INFO [trainer.py:765] (2/8) Epoch 16, batch 2500, train_loss[loss=3.234, NarTop10Accuracy=0.6634, over 5087.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6219, over 5548.25 frames. ], batch size: 6, lr: 5.28e-03 +2024-08-06 10:22:45,798 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 10:23:45,727 INFO [trainer.py:765] (2/8) Epoch 17, batch 100, train_loss[loss=3.413, NarTop10Accuracy=0.6383, over 7175.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6314, over 2378.15 frames. ], batch size: 30, lr: 5.12e-03 +2024-08-06 10:24:19,033 INFO [trainer.py:765] (2/8) Epoch 17, batch 200, train_loss[loss=3.27, NarTop10Accuracy=0.6692, over 6843.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6319, over 3876.57 frames. ], batch size: 17, lr: 5.11e-03 +2024-08-06 10:24:53,441 INFO [trainer.py:765] (2/8) Epoch 17, batch 300, train_loss[loss=3.615, NarTop10Accuracy=0.5966, over 7172.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6308, over 4670.37 frames. ], batch size: 22, lr: 5.10e-03 +2024-08-06 10:25:28,013 INFO [trainer.py:765] (2/8) Epoch 17, batch 400, train_loss[loss=3.714, NarTop10Accuracy=0.5783, over 5097.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6308, over 5123.69 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 10:25:58,606 INFO [trainer.py:765] (2/8) Epoch 17, batch 500, train_loss[loss=3.472, NarTop10Accuracy=0.6202, over 6278.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6315, over 5403.91 frames. ], batch size: 11, lr: 5.09e-03 +2024-08-06 10:26:29,755 INFO [trainer.py:765] (2/8) Epoch 17, batch 600, train_loss[loss=3.746, NarTop10Accuracy=0.5649, over 5764.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6317, over 5673.55 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 10:27:07,498 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 10:27:17,547 INFO [trainer.py:811] (2/8) Epoch 17, validation: loss=3.327, NarTop10Accuracy=0.6554, over 1907754.00 frames. +2024-08-06 10:27:17,548 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 10:27:18,066 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 1.825e+02 1.985e+02 2.150e+02 4.169e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 10:27:18,071 INFO [trainer.py:765] (2/8) Epoch 17, batch 700, train_loss[loss=3.186, NarTop10Accuracy=0.6898, over 4880.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6288, over 5758.52 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 10:27:49,840 INFO [trainer.py:765] (2/8) Epoch 17, batch 800, train_loss[loss=3.141, NarTop10Accuracy=0.6822, over 5111.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6294, over 5811.27 frames. ], batch size: 6, lr: 5.07e-03 +2024-08-06 10:28:24,837 INFO [trainer.py:765] (2/8) Epoch 17, batch 900, train_loss[loss=3.273, NarTop10Accuracy=0.6632, over 6155.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.629, over 5832.32 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 10:28:59,683 INFO [trainer.py:765] (2/8) Epoch 17, batch 1000, train_loss[loss=3.298, NarTop10Accuracy=0.6606, over 6289.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6282, over 5922.84 frames. ], batch size: 13, lr: 5.06e-03 +2024-08-06 10:29:36,658 INFO [trainer.py:765] (2/8) Epoch 17, batch 1100, train_loss[loss=3.253, NarTop10Accuracy=0.672, over 7016.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6269, over 5969.31 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 10:30:08,241 INFO [trainer.py:765] (2/8) Epoch 17, batch 1200, train_loss[loss=3.446, NarTop10Accuracy=0.626, over 7404.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6267, over 5938.61 frames. ], batch size: 31, lr: 5.05e-03 +2024-08-06 10:30:47,101 INFO [trainer.py:765] (2/8) Epoch 17, batch 1300, train_loss[loss=3.378, NarTop10Accuracy=0.6546, over 5174.00 frames. ], tot_loss[loss=3.465, NarTop10Accuracy=0.6246, over 5988.31 frames. ], batch size: 6, lr: 5.04e-03 +2024-08-06 10:31:20,892 INFO [trainer.py:765] (2/8) Epoch 17, batch 1400, train_loss[loss=3.44, NarTop10Accuracy=0.6272, over 6279.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6235, over 6022.91 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 10:31:51,400 INFO [trainer.py:765] (2/8) Epoch 17, batch 1500, train_loss[loss=3.485, NarTop10Accuracy=0.62, over 5942.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.625, over 5976.83 frames. ], batch size: 49, lr: 5.03e-03 +2024-08-06 10:32:19,400 INFO [trainer.py:765] (2/8) Epoch 17, batch 1600, train_loss[loss=3.541, NarTop10Accuracy=0.617, over 7170.00 frames. ], tot_loss[loss=3.474, NarTop10Accuracy=0.6228, over 5957.33 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 10:32:50,393 INFO [trainer.py:765] (2/8) Epoch 17, batch 1700, train_loss[loss=3.751, NarTop10Accuracy=0.565, over 6303.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6198, over 5948.33 frames. ], batch size: 13, lr: 5.02e-03 +2024-08-06 10:33:17,035 INFO [trainer.py:765] (2/8) Epoch 17, batch 1800, train_loss[loss=3.714, NarTop10Accuracy=0.57, over 7017.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6183, over 5998.58 frames. ], batch size: 22, lr: 5.02e-03 +2024-08-06 10:33:43,596 INFO [trainer.py:765] (2/8) Epoch 17, batch 1900, train_loss[loss=3.747, NarTop10Accuracy=0.5648, over 5815.00 frames. ], tot_loss[loss=3.496, NarTop10Accuracy=0.6183, over 6029.03 frames. ], batch size: 48, lr: 5.01e-03 +2024-08-06 10:34:09,287 INFO [trainer.py:765] (2/8) Epoch 17, batch 2000, train_loss[loss=3.79, NarTop10Accuracy=0.5613, over 6261.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6201, over 6019.39 frames. ], batch size: 50, lr: 5.00e-03 +2024-08-06 10:34:34,801 INFO [trainer.py:765] (2/8) Epoch 17, batch 2100, train_loss[loss=3.468, NarTop10Accuracy=0.6209, over 4036.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.6168, over 6001.56 frames. ], batch size: 4, lr: 5.00e-03 +2024-08-06 10:35:00,244 INFO [trainer.py:765] (2/8) Epoch 17, batch 2200, train_loss[loss=3.349, NarTop10Accuracy=0.6549, over 6938.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6222, over 6024.57 frames. ], batch size: 30, lr: 4.99e-03 +2024-08-06 10:35:25,732 INFO [trainer.py:765] (2/8) Epoch 17, batch 2300, train_loss[loss=3.291, NarTop10Accuracy=0.6594, over 5766.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6198, over 6051.22 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 10:35:50,526 INFO [trainer.py:765] (2/8) Epoch 17, batch 2400, train_loss[loss=3.441, NarTop10Accuracy=0.6311, over 5167.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.618, over 5856.43 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 10:36:14,104 INFO [trainer.py:765] (2/8) Epoch 17, batch 2500, train_loss[loss=3.394, NarTop10Accuracy=0.6412, over 5060.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6222, over 5497.09 frames. ], batch size: 6, lr: 4.98e-03 +2024-08-06 10:36:35,816 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 10:37:32,051 INFO [trainer.py:765] (2/8) Epoch 18, batch 100, train_loss[loss=3.277, NarTop10Accuracy=0.6607, over 7303.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6358, over 2356.53 frames. ], batch size: 31, lr: 4.83e-03 +2024-08-06 10:37:39,162 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 10:37:49,085 INFO [trainer.py:811] (2/8) Epoch 18, validation: loss=3.339, NarTop10Accuracy=0.6526, over 1907754.00 frames. +2024-08-06 10:37:49,085 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 10:37:49,684 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.841e+02 1.993e+02 2.161e+02 3.871e+02, threshold=3.985e+02, percent-clipped=0.0 +2024-08-06 10:38:18,144 INFO [trainer.py:765] (2/8) Epoch 18, batch 200, train_loss[loss=3.507, NarTop10Accuracy=0.6136, over 6988.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6338, over 3869.48 frames. ], batch size: 17, lr: 4.82e-03 +2024-08-06 10:38:50,199 INFO [trainer.py:765] (2/8) Epoch 18, batch 300, train_loss[loss=3.431, NarTop10Accuracy=0.6337, over 7207.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.635, over 4660.99 frames. ], batch size: 22, lr: 4.81e-03 +2024-08-06 10:39:23,743 INFO [trainer.py:765] (2/8) Epoch 18, batch 400, train_loss[loss=3.176, NarTop10Accuracy=0.6675, over 5145.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6342, over 5115.31 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 10:39:54,103 INFO [trainer.py:765] (2/8) Epoch 18, batch 500, train_loss[loss=3.493, NarTop10Accuracy=0.6304, over 6095.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.636, over 5380.66 frames. ], batch size: 11, lr: 4.80e-03 +2024-08-06 10:40:28,526 INFO [trainer.py:765] (2/8) Epoch 18, batch 600, train_loss[loss=3.491, NarTop10Accuracy=0.6285, over 5677.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6341, over 5658.33 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 10:41:02,143 INFO [trainer.py:765] (2/8) Epoch 18, batch 700, train_loss[loss=3.277, NarTop10Accuracy=0.6585, over 5040.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6315, over 5730.16 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:41:38,519 INFO [trainer.py:765] (2/8) Epoch 18, batch 800, train_loss[loss=3.695, NarTop10Accuracy=0.5851, over 4993.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.631, over 5775.87 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:42:12,611 INFO [trainer.py:765] (2/8) Epoch 18, batch 900, train_loss[loss=3.412, NarTop10Accuracy=0.6468, over 6252.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6307, over 5805.01 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:42:46,703 INFO [trainer.py:765] (2/8) Epoch 18, batch 1000, train_loss[loss=3.196, NarTop10Accuracy=0.6804, over 6258.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6315, over 5913.02 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:43:24,183 INFO [trainer.py:765] (2/8) Epoch 18, batch 1100, train_loss[loss=3.598, NarTop10Accuracy=0.5903, over 6902.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6263, over 5961.47 frames. ], batch size: 17, lr: 4.77e-03 +2024-08-06 10:44:02,363 INFO [trainer.py:765] (2/8) Epoch 18, batch 1200, train_loss[loss=3.382, NarTop10Accuracy=0.6384, over 7014.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.626, over 5944.46 frames. ], batch size: 30, lr: 4.77e-03 +2024-08-06 10:44:35,919 INFO [trainer.py:765] (2/8) Epoch 18, batch 1300, train_loss[loss=3.287, NarTop10Accuracy=0.6678, over 5029.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6276, over 6009.37 frames. ], batch size: 6, lr: 4.76e-03 +2024-08-06 10:45:10,237 INFO [trainer.py:765] (2/8) Epoch 18, batch 1400, train_loss[loss=3.433, NarTop10Accuracy=0.634, over 6183.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6267, over 6038.63 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 10:45:40,975 INFO [trainer.py:765] (2/8) Epoch 18, batch 1500, train_loss[loss=3.9, NarTop10Accuracy=0.5417, over 5425.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6259, over 5974.90 frames. ], batch size: 49, lr: 4.75e-03 +2024-08-06 10:46:09,055 INFO [trainer.py:765] (2/8) Epoch 18, batch 1600, train_loss[loss=3.354, NarTop10Accuracy=0.654, over 7117.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6261, over 5944.31 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 10:46:35,858 INFO [trainer.py:765] (2/8) Epoch 18, batch 1700, train_loss[loss=3.841, NarTop10Accuracy=0.5518, over 6587.00 frames. ], tot_loss[loss=3.467, NarTop10Accuracy=0.6243, over 5944.73 frames. ], batch size: 14, lr: 4.74e-03 +2024-08-06 10:47:02,438 INFO [trainer.py:765] (2/8) Epoch 18, batch 1800, train_loss[loss=3.394, NarTop10Accuracy=0.6374, over 7114.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6231, over 6002.24 frames. ], batch size: 22, lr: 4.74e-03 +2024-08-06 10:47:29,093 INFO [trainer.py:765] (2/8) Epoch 18, batch 1900, train_loss[loss=3.777, NarTop10Accuracy=0.555, over 5619.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.621, over 6030.46 frames. ], batch size: 49, lr: 4.73e-03 +2024-08-06 10:47:54,884 INFO [trainer.py:765] (2/8) Epoch 18, batch 2000, train_loss[loss=3.504, NarTop10Accuracy=0.6192, over 6074.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6215, over 6019.76 frames. ], batch size: 48, lr: 4.73e-03 +2024-08-06 10:48:20,370 INFO [trainer.py:765] (2/8) Epoch 18, batch 2100, train_loss[loss=3.429, NarTop10Accuracy=0.6396, over 4804.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.6237, over 6008.88 frames. ], batch size: 5, lr: 4.72e-03 +2024-08-06 10:48:24,747 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 10:48:35,039 INFO [trainer.py:811] (2/8) Epoch 18, validation: loss=3.307, NarTop10Accuracy=0.6593, over 1907754.00 frames. +2024-08-06 10:48:35,040 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 10:48:35,534 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 1.855e+02 2.003e+02 2.193e+02 3.481e+02, threshold=4.005e+02, percent-clipped=0.0 +2024-08-06 10:48:56,095 INFO [trainer.py:765] (2/8) Epoch 18, batch 2200, train_loss[loss=3.382, NarTop10Accuracy=0.6449, over 7213.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6216, over 6038.84 frames. ], batch size: 30, lr: 4.72e-03 +2024-08-06 10:49:21,520 INFO [trainer.py:765] (2/8) Epoch 18, batch 2300, train_loss[loss=3.441, NarTop10Accuracy=0.6315, over 5789.00 frames. ], tot_loss[loss=3.48, NarTop10Accuracy=0.6213, over 6051.19 frames. ], batch size: 9, lr: 4.71e-03 +2024-08-06 10:49:46,256 INFO [trainer.py:765] (2/8) Epoch 18, batch 2400, train_loss[loss=3.305, NarTop10Accuracy=0.6743, over 5069.00 frames. ], tot_loss[loss=3.482, NarTop10Accuracy=0.6212, over 5890.47 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 10:50:09,707 INFO [trainer.py:765] (2/8) Epoch 18, batch 2500, train_loss[loss=3.125, NarTop10Accuracy=0.6748, over 5055.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6254, over 5551.36 frames. ], batch size: 6, lr: 4.70e-03 +2024-08-06 10:50:31,098 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 10:51:33,564 INFO [trainer.py:765] (2/8) Epoch 19, batch 100, train_loss[loss=3.17, NarTop10Accuracy=0.6785, over 7273.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6338, over 2368.08 frames. ], batch size: 31, lr: 4.57e-03 +2024-08-06 10:52:06,164 INFO [trainer.py:765] (2/8) Epoch 19, batch 200, train_loss[loss=3.684, NarTop10Accuracy=0.5737, over 6792.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6356, over 3871.72 frames. ], batch size: 17, lr: 4.56e-03 +2024-08-06 10:52:40,031 INFO [trainer.py:765] (2/8) Epoch 19, batch 300, train_loss[loss=3.46, NarTop10Accuracy=0.6321, over 7254.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6372, over 4664.96 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 10:53:12,829 INFO [trainer.py:765] (2/8) Epoch 19, batch 400, train_loss[loss=3.294, NarTop10Accuracy=0.6682, over 5195.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6355, over 5118.05 frames. ], batch size: 7, lr: 4.55e-03 +2024-08-06 10:53:45,020 INFO [trainer.py:765] (2/8) Epoch 19, batch 500, train_loss[loss=3.413, NarTop10Accuracy=0.6343, over 6130.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6347, over 5406.05 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 10:54:18,601 INFO [trainer.py:765] (2/8) Epoch 19, batch 600, train_loss[loss=3.086, NarTop10Accuracy=0.6936, over 5806.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6361, over 5671.93 frames. ], batch size: 9, lr: 4.54e-03 +2024-08-06 10:54:54,112 INFO [trainer.py:765] (2/8) Epoch 19, batch 700, train_loss[loss=3.475, NarTop10Accuracy=0.6264, over 4302.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6336, over 5744.05 frames. ], batch size: 5, lr: 4.54e-03 +2024-08-06 10:55:29,925 INFO [trainer.py:765] (2/8) Epoch 19, batch 800, train_loss[loss=3.456, NarTop10Accuracy=0.6124, over 5060.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6328, over 5807.71 frames. ], batch size: 6, lr: 4.53e-03 +2024-08-06 10:56:02,239 INFO [trainer.py:765] (2/8) Epoch 19, batch 900, train_loss[loss=3.586, NarTop10Accuracy=0.6024, over 6247.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6327, over 5825.62 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 10:56:38,299 INFO [trainer.py:765] (2/8) Epoch 19, batch 1000, train_loss[loss=3.195, NarTop10Accuracy=0.6742, over 6250.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6305, over 5925.72 frames. ], batch size: 13, lr: 4.52e-03 +2024-08-06 10:57:15,188 INFO [trainer.py:765] (2/8) Epoch 19, batch 1100, train_loss[loss=3.32, NarTop10Accuracy=0.6579, over 6890.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6284, over 5953.46 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 10:57:46,665 INFO [trainer.py:765] (2/8) Epoch 19, batch 1200, train_loss[loss=3.472, NarTop10Accuracy=0.629, over 7268.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6283, over 5941.05 frames. ], batch size: 31, lr: 4.51e-03 +2024-08-06 10:58:23,901 INFO [trainer.py:765] (2/8) Epoch 19, batch 1300, train_loss[loss=2.952, NarTop10Accuracy=0.7136, over 5152.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6287, over 6018.07 frames. ], batch size: 6, lr: 4.51e-03 +2024-08-06 10:58:58,028 INFO [trainer.py:765] (2/8) Epoch 19, batch 1400, train_loss[loss=3.49, NarTop10Accuracy=0.6099, over 6161.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6275, over 6029.07 frames. ], batch size: 11, lr: 4.50e-03 +2024-08-06 10:59:30,770 INFO [trainer.py:765] (2/8) Epoch 19, batch 1500, train_loss[loss=3.663, NarTop10Accuracy=0.5876, over 5858.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6294, over 5961.63 frames. ], batch size: 48, lr: 4.50e-03 +2024-08-06 10:59:40,831 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 10:59:50,899 INFO [trainer.py:811] (2/8) Epoch 19, validation: loss=3.276, NarTop10Accuracy=0.6653, over 1907754.00 frames. +2024-08-06 10:59:50,899 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 10:59:51,426 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.829e+02 1.984e+02 2.176e+02 3.542e+02, threshold=3.967e+02, percent-clipped=0.0 +2024-08-06 11:00:08,816 INFO [trainer.py:765] (2/8) Epoch 19, batch 1600, train_loss[loss=3.796, NarTop10Accuracy=0.556, over 7126.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6305, over 5932.43 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:00:35,588 INFO [trainer.py:765] (2/8) Epoch 19, batch 1700, train_loss[loss=3.607, NarTop10Accuracy=0.5985, over 6678.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6261, over 5930.44 frames. ], batch size: 14, lr: 4.49e-03 +2024-08-06 11:01:02,257 INFO [trainer.py:765] (2/8) Epoch 19, batch 1800, train_loss[loss=3.24, NarTop10Accuracy=0.6662, over 6882.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6285, over 5997.93 frames. ], batch size: 21, lr: 4.49e-03 +2024-08-06 11:01:28,930 INFO [trainer.py:765] (2/8) Epoch 19, batch 1900, train_loss[loss=3.667, NarTop10Accuracy=0.5806, over 6105.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6254, over 6034.67 frames. ], batch size: 48, lr: 4.48e-03 +2024-08-06 11:01:54,632 INFO [trainer.py:765] (2/8) Epoch 19, batch 2000, train_loss[loss=3.594, NarTop10Accuracy=0.6056, over 6183.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6265, over 6005.32 frames. ], batch size: 49, lr: 4.48e-03 +2024-08-06 11:02:20,186 INFO [trainer.py:765] (2/8) Epoch 19, batch 2100, train_loss[loss=3.12, NarTop10Accuracy=0.6757, over 3959.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.627, over 5991.03 frames. ], batch size: 4, lr: 4.47e-03 +2024-08-06 11:02:45,694 INFO [trainer.py:765] (2/8) Epoch 19, batch 2200, train_loss[loss=3.39, NarTop10Accuracy=0.6427, over 7197.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6258, over 6033.25 frames. ], batch size: 30, lr: 4.47e-03 +2024-08-06 11:03:11,130 INFO [trainer.py:765] (2/8) Epoch 19, batch 2300, train_loss[loss=3.135, NarTop10Accuracy=0.6885, over 5841.00 frames. ], tot_loss[loss=3.465, NarTop10Accuracy=0.6248, over 6062.85 frames. ], batch size: 9, lr: 4.46e-03 +2024-08-06 11:03:35,950 INFO [trainer.py:765] (2/8) Epoch 19, batch 2400, train_loss[loss=3.687, NarTop10Accuracy=0.5747, over 6436.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6222, over 5882.03 frames. ], batch size: 51, lr: 4.46e-03 +2024-08-06 11:03:59,405 INFO [trainer.py:765] (2/8) Epoch 19, batch 2500, train_loss[loss=3.306, NarTop10Accuracy=0.6423, over 5077.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6308, over 5529.68 frames. ], batch size: 6, lr: 4.45e-03 +2024-08-06 11:04:23,889 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 11:05:26,561 INFO [trainer.py:765] (2/8) Epoch 20, batch 100, train_loss[loss=3.483, NarTop10Accuracy=0.6146, over 7273.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6381, over 2361.81 frames. ], batch size: 31, lr: 4.33e-03 +2024-08-06 11:05:57,409 INFO [trainer.py:765] (2/8) Epoch 20, batch 200, train_loss[loss=3.292, NarTop10Accuracy=0.6638, over 6896.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6397, over 3870.23 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 11:06:30,634 INFO [trainer.py:765] (2/8) Epoch 20, batch 300, train_loss[loss=3.359, NarTop10Accuracy=0.6427, over 6953.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6406, over 4682.01 frames. ], batch size: 22, lr: 4.32e-03 +2024-08-06 11:07:06,396 INFO [trainer.py:765] (2/8) Epoch 20, batch 400, train_loss[loss=3.305, NarTop10Accuracy=0.6679, over 4996.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6378, over 5134.81 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 11:07:38,166 INFO [trainer.py:765] (2/8) Epoch 20, batch 500, train_loss[loss=3.389, NarTop10Accuracy=0.6441, over 6187.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6393, over 5402.48 frames. ], batch size: 11, lr: 4.31e-03 +2024-08-06 11:08:11,568 INFO [trainer.py:765] (2/8) Epoch 20, batch 600, train_loss[loss=3.089, NarTop10Accuracy=0.7014, over 5861.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6402, over 5680.30 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 11:08:46,274 INFO [trainer.py:765] (2/8) Epoch 20, batch 700, train_loss[loss=3.339, NarTop10Accuracy=0.6402, over 5092.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.637, over 5742.33 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 11:09:23,425 INFO [trainer.py:765] (2/8) Epoch 20, batch 800, train_loss[loss=3.456, NarTop10Accuracy=0.629, over 5024.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6363, over 5815.75 frames. ], batch size: 6, lr: 4.30e-03 +2024-08-06 11:09:53,513 INFO [trainer.py:765] (2/8) Epoch 20, batch 900, train_loss[loss=3.341, NarTop10Accuracy=0.6593, over 6193.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6333, over 5827.44 frames. ], batch size: 13, lr: 4.30e-03 +2024-08-06 11:10:12,199 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 11:10:23,738 INFO [trainer.py:811] (2/8) Epoch 20, validation: loss=3.279, NarTop10Accuracy=0.6658, over 1907754.00 frames. +2024-08-06 11:10:23,739 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 11:10:24,298 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.847e+02 2.007e+02 2.180e+02 4.417e+02, threshold=4.013e+02, percent-clipped=0.1 +2024-08-06 11:10:42,965 INFO [trainer.py:765] (2/8) Epoch 20, batch 1000, train_loss[loss=3.127, NarTop10Accuracy=0.6897, over 6149.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6348, over 5913.91 frames. ], batch size: 13, lr: 4.29e-03 +2024-08-06 11:11:21,022 INFO [trainer.py:765] (2/8) Epoch 20, batch 1100, train_loss[loss=3.516, NarTop10Accuracy=0.6122, over 6786.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6314, over 5950.28 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 11:11:55,394 INFO [trainer.py:765] (2/8) Epoch 20, batch 1200, train_loss[loss=3.349, NarTop10Accuracy=0.649, over 6883.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6326, over 5926.55 frames. ], batch size: 30, lr: 4.28e-03 +2024-08-06 11:12:30,752 INFO [trainer.py:765] (2/8) Epoch 20, batch 1300, train_loss[loss=3.756, NarTop10Accuracy=0.5743, over 5033.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6324, over 6020.37 frames. ], batch size: 6, lr: 4.28e-03 +2024-08-06 11:13:10,292 INFO [trainer.py:765] (2/8) Epoch 20, batch 1400, train_loss[loss=3.715, NarTop10Accuracy=0.5558, over 6091.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6311, over 6039.18 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 11:13:38,989 INFO [trainer.py:765] (2/8) Epoch 20, batch 1500, train_loss[loss=3.53, NarTop10Accuracy=0.6144, over 5685.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6297, over 5978.39 frames. ], batch size: 48, lr: 4.27e-03 +2024-08-06 11:14:07,052 INFO [trainer.py:765] (2/8) Epoch 20, batch 1600, train_loss[loss=3.391, NarTop10Accuracy=0.6456, over 7052.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6299, over 5971.61 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 11:14:33,911 INFO [trainer.py:765] (2/8) Epoch 20, batch 1700, train_loss[loss=3.66, NarTop10Accuracy=0.5888, over 6238.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6288, over 5952.76 frames. ], batch size: 13, lr: 4.26e-03 +2024-08-06 11:15:00,590 INFO [trainer.py:765] (2/8) Epoch 20, batch 1800, train_loss[loss=3.314, NarTop10Accuracy=0.6584, over 7166.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6274, over 6027.51 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 11:15:27,277 INFO [trainer.py:765] (2/8) Epoch 20, batch 1900, train_loss[loss=3.487, NarTop10Accuracy=0.6164, over 6181.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6256, over 6060.12 frames. ], batch size: 48, lr: 4.26e-03 +2024-08-06 11:15:56,439 INFO [trainer.py:765] (2/8) Epoch 20, batch 2000, train_loss[loss=3.619, NarTop10Accuracy=0.6008, over 5935.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6268, over 6027.92 frames. ], batch size: 49, lr: 4.25e-03 +2024-08-06 11:16:21,958 INFO [trainer.py:765] (2/8) Epoch 20, batch 2100, train_loss[loss=3.313, NarTop10Accuracy=0.6574, over 4870.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6255, over 6011.90 frames. ], batch size: 5, lr: 4.25e-03 +2024-08-06 11:16:47,405 INFO [trainer.py:765] (2/8) Epoch 20, batch 2200, train_loss[loss=3.305, NarTop10Accuracy=0.6582, over 7259.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6256, over 6050.01 frames. ], batch size: 31, lr: 4.24e-03 +2024-08-06 11:17:12,908 INFO [trainer.py:765] (2/8) Epoch 20, batch 2300, train_loss[loss=3.618, NarTop10Accuracy=0.6008, over 5823.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.6239, over 6080.71 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 11:17:37,714 INFO [trainer.py:765] (2/8) Epoch 20, batch 2400, train_loss[loss=3.233, NarTop10Accuracy=0.6796, over 5080.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6249, over 5898.64 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 11:18:01,248 INFO [trainer.py:765] (2/8) Epoch 20, batch 2500, train_loss[loss=3.639, NarTop10Accuracy=0.5952, over 5110.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.63, over 5554.29 frames. ], batch size: 6, lr: 4.23e-03 +2024-08-06 11:18:22,170 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 11:19:21,460 INFO [trainer.py:765] (2/8) Epoch 21, batch 100, train_loss[loss=3.326, NarTop10Accuracy=0.6534, over 7573.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6369, over 2360.90 frames. ], batch size: 30, lr: 4.12e-03 +2024-08-06 11:19:56,522 INFO [trainer.py:765] (2/8) Epoch 21, batch 200, train_loss[loss=3.417, NarTop10Accuracy=0.6308, over 6921.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6354, over 3856.93 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 11:20:26,597 INFO [trainer.py:765] (2/8) Epoch 21, batch 300, train_loss[loss=3.644, NarTop10Accuracy=0.5882, over 6904.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6402, over 4664.97 frames. ], batch size: 22, lr: 4.11e-03 +2024-08-06 11:20:54,240 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 11:21:04,970 INFO [trainer.py:811] (2/8) Epoch 21, validation: loss=3.291, NarTop10Accuracy=0.6625, over 1907754.00 frames. +2024-08-06 11:21:04,970 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 11:21:05,486 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 1.858e+02 2.007e+02 2.193e+02 3.729e+02, threshold=4.015e+02, percent-clipped=0.0 +2024-08-06 11:21:12,220 INFO [trainer.py:765] (2/8) Epoch 21, batch 400, train_loss[loss=3.356, NarTop10Accuracy=0.6466, over 5249.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6395, over 5117.02 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 11:21:47,569 INFO [trainer.py:765] (2/8) Epoch 21, batch 500, train_loss[loss=3.327, NarTop10Accuracy=0.6547, over 6112.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6426, over 5419.27 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 11:22:18,237 INFO [trainer.py:765] (2/8) Epoch 21, batch 600, train_loss[loss=3.541, NarTop10Accuracy=0.616, over 5623.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6409, over 5681.48 frames. ], batch size: 9, lr: 4.10e-03 +2024-08-06 11:22:56,843 INFO [trainer.py:765] (2/8) Epoch 21, batch 700, train_loss[loss=3.293, NarTop10Accuracy=0.6675, over 5136.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6389, over 5733.73 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 11:23:33,075 INFO [trainer.py:765] (2/8) Epoch 21, batch 800, train_loss[loss=3.344, NarTop10Accuracy=0.6525, over 5123.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6369, over 5795.15 frames. ], batch size: 6, lr: 4.09e-03 +2024-08-06 11:24:03,021 INFO [trainer.py:765] (2/8) Epoch 21, batch 900, train_loss[loss=3.848, NarTop10Accuracy=0.5495, over 6300.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6352, over 5816.73 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 11:24:37,089 INFO [trainer.py:765] (2/8) Epoch 21, batch 1000, train_loss[loss=3.387, NarTop10Accuracy=0.6321, over 6854.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.634, over 5917.83 frames. ], batch size: 14, lr: 4.09e-03 +2024-08-06 11:25:16,428 INFO [trainer.py:765] (2/8) Epoch 21, batch 1100, train_loss[loss=3.683, NarTop10Accuracy=0.5853, over 7008.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6322, over 5958.59 frames. ], batch size: 17, lr: 4.08e-03 +2024-08-06 11:25:47,739 INFO [trainer.py:765] (2/8) Epoch 21, batch 1200, train_loss[loss=3.373, NarTop10Accuracy=0.6459, over 7342.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6338, over 5960.70 frames. ], batch size: 30, lr: 4.08e-03 +2024-08-06 11:26:23,057 INFO [trainer.py:765] (2/8) Epoch 21, batch 1300, train_loss[loss=3.427, NarTop10Accuracy=0.628, over 5089.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6346, over 6033.94 frames. ], batch size: 6, lr: 4.07e-03 +2024-08-06 11:27:00,081 INFO [trainer.py:765] (2/8) Epoch 21, batch 1400, train_loss[loss=3.231, NarTop10Accuracy=0.6723, over 6326.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6326, over 6033.50 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 11:27:35,326 INFO [trainer.py:765] (2/8) Epoch 21, batch 1500, train_loss[loss=3.905, NarTop10Accuracy=0.5303, over 5901.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6332, over 5959.36 frames. ], batch size: 48, lr: 4.07e-03 +2024-08-06 11:28:03,315 INFO [trainer.py:765] (2/8) Epoch 21, batch 1600, train_loss[loss=3.218, NarTop10Accuracy=0.6722, over 7179.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6315, over 5951.03 frames. ], batch size: 23, lr: 4.06e-03 +2024-08-06 11:28:30,105 INFO [trainer.py:765] (2/8) Epoch 21, batch 1700, train_loss[loss=3.799, NarTop10Accuracy=0.5548, over 6283.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6297, over 5938.44 frames. ], batch size: 13, lr: 4.06e-03 +2024-08-06 11:28:56,641 INFO [trainer.py:765] (2/8) Epoch 21, batch 1800, train_loss[loss=3.448, NarTop10Accuracy=0.6192, over 7134.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6294, over 6003.05 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 11:29:23,198 INFO [trainer.py:765] (2/8) Epoch 21, batch 1900, train_loss[loss=3.475, NarTop10Accuracy=0.6237, over 5795.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6282, over 6037.53 frames. ], batch size: 48, lr: 4.05e-03 +2024-08-06 11:29:49,029 INFO [trainer.py:765] (2/8) Epoch 21, batch 2000, train_loss[loss=3.49, NarTop10Accuracy=0.621, over 5857.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6283, over 6007.17 frames. ], batch size: 49, lr: 4.05e-03 +2024-08-06 11:30:14,529 INFO [trainer.py:765] (2/8) Epoch 21, batch 2100, train_loss[loss=3.07, NarTop10Accuracy=0.703, over 3910.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6294, over 5998.51 frames. ], batch size: 4, lr: 4.04e-03 +2024-08-06 11:30:39,871 INFO [trainer.py:765] (2/8) Epoch 21, batch 2200, train_loss[loss=3.595, NarTop10Accuracy=0.5954, over 7217.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6276, over 6047.19 frames. ], batch size: 30, lr: 4.04e-03 +2024-08-06 11:31:05,472 INFO [trainer.py:765] (2/8) Epoch 21, batch 2300, train_loss[loss=3.382, NarTop10Accuracy=0.6481, over 5812.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6271, over 6082.88 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 11:31:23,873 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 11:31:34,439 INFO [trainer.py:811] (2/8) Epoch 21, validation: loss=3.272, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 11:31:34,440 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 11:31:34,937 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 1.892e+02 2.038e+02 2.210e+02 4.910e+02, threshold=4.076e+02, percent-clipped=0.1 +2024-08-06 11:31:40,754 INFO [trainer.py:765] (2/8) Epoch 21, batch 2400, train_loss[loss=3.119, NarTop10Accuracy=0.7063, over 4966.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6271, over 5880.50 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 11:32:04,056 INFO [trainer.py:765] (2/8) Epoch 21, batch 2500, train_loss[loss=3.161, NarTop10Accuracy=0.6781, over 5097.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6328, over 5539.73 frames. ], batch size: 6, lr: 4.03e-03 +2024-08-06 11:32:25,911 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 11:33:29,683 INFO [trainer.py:765] (2/8) Epoch 22, batch 100, train_loss[loss=3.586, NarTop10Accuracy=0.5968, over 7139.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6415, over 2374.27 frames. ], batch size: 30, lr: 3.93e-03 +2024-08-06 11:34:05,037 INFO [trainer.py:765] (2/8) Epoch 22, batch 200, train_loss[loss=3.297, NarTop10Accuracy=0.6658, over 6812.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6446, over 3863.29 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 11:34:37,619 INFO [trainer.py:765] (2/8) Epoch 22, batch 300, train_loss[loss=3.242, NarTop10Accuracy=0.6787, over 7052.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6428, over 4673.89 frames. ], batch size: 22, lr: 3.92e-03 +2024-08-06 11:35:09,969 INFO [trainer.py:765] (2/8) Epoch 22, batch 400, train_loss[loss=3.326, NarTop10Accuracy=0.6483, over 5201.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6419, over 5126.36 frames. ], batch size: 7, lr: 3.92e-03 +2024-08-06 11:35:42,508 INFO [trainer.py:765] (2/8) Epoch 22, batch 500, train_loss[loss=3.414, NarTop10Accuracy=0.6422, over 6120.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6409, over 5405.78 frames. ], batch size: 11, lr: 3.91e-03 +2024-08-06 11:36:16,059 INFO [trainer.py:765] (2/8) Epoch 22, batch 600, train_loss[loss=3.027, NarTop10Accuracy=0.7244, over 5889.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6406, over 5669.20 frames. ], batch size: 9, lr: 3.91e-03 +2024-08-06 11:36:53,858 INFO [trainer.py:765] (2/8) Epoch 22, batch 700, train_loss[loss=3.169, NarTop10Accuracy=0.6879, over 4127.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6403, over 5734.47 frames. ], batch size: 5, lr: 3.91e-03 +2024-08-06 11:37:28,480 INFO [trainer.py:765] (2/8) Epoch 22, batch 800, train_loss[loss=3.312, NarTop10Accuracy=0.6507, over 4960.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6375, over 5789.79 frames. ], batch size: 6, lr: 3.90e-03 +2024-08-06 11:38:03,950 INFO [trainer.py:765] (2/8) Epoch 22, batch 900, train_loss[loss=3.48, NarTop10Accuracy=0.6238, over 6219.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6379, over 5832.54 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 11:38:38,329 INFO [trainer.py:765] (2/8) Epoch 22, batch 1000, train_loss[loss=3.283, NarTop10Accuracy=0.6614, over 6201.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6392, over 5927.06 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 11:39:14,789 INFO [trainer.py:765] (2/8) Epoch 22, batch 1100, train_loss[loss=3.294, NarTop10Accuracy=0.6495, over 6873.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6374, over 5937.48 frames. ], batch size: 17, lr: 3.89e-03 +2024-08-06 11:39:48,523 INFO [trainer.py:765] (2/8) Epoch 22, batch 1200, train_loss[loss=3.356, NarTop10Accuracy=0.6393, over 7038.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6357, over 5932.60 frames. ], batch size: 30, lr: 3.89e-03 +2024-08-06 11:40:25,246 INFO [trainer.py:765] (2/8) Epoch 22, batch 1300, train_loss[loss=3.554, NarTop10Accuracy=0.6072, over 5006.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6337, over 6006.94 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 11:41:00,609 INFO [trainer.py:765] (2/8) Epoch 22, batch 1400, train_loss[loss=3.551, NarTop10Accuracy=0.6069, over 6140.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6337, over 6039.54 frames. ], batch size: 11, lr: 3.88e-03 +2024-08-06 11:41:31,584 INFO [trainer.py:765] (2/8) Epoch 22, batch 1500, train_loss[loss=3.611, NarTop10Accuracy=0.5952, over 6054.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6332, over 5955.56 frames. ], batch size: 49, lr: 3.88e-03 +2024-08-06 11:41:59,677 INFO [trainer.py:765] (2/8) Epoch 22, batch 1600, train_loss[loss=3.324, NarTop10Accuracy=0.6498, over 7187.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6327, over 5932.79 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 11:42:26,463 INFO [trainer.py:765] (2/8) Epoch 22, batch 1700, train_loss[loss=3.414, NarTop10Accuracy=0.6333, over 6289.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6295, over 5925.51 frames. ], batch size: 13, lr: 3.87e-03 +2024-08-06 11:42:50,725 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 11:43:00,818 INFO [trainer.py:811] (2/8) Epoch 22, validation: loss=3.305, NarTop10Accuracy=0.6597, over 1907754.00 frames. +2024-08-06 11:43:00,819 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 11:43:01,327 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.500e+02 1.900e+02 2.042e+02 2.234e+02 3.494e+02, threshold=4.085e+02, percent-clipped=0.0 +2024-08-06 11:43:03,218 INFO [trainer.py:765] (2/8) Epoch 22, batch 1800, train_loss[loss=3.372, NarTop10Accuracy=0.6474, over 7180.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6323, over 5996.77 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 11:43:29,751 INFO [trainer.py:765] (2/8) Epoch 22, batch 1900, train_loss[loss=3.604, NarTop10Accuracy=0.6056, over 6849.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.63, over 6032.99 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 11:43:55,485 INFO [trainer.py:765] (2/8) Epoch 22, batch 2000, train_loss[loss=3.816, NarTop10Accuracy=0.5519, over 6111.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6311, over 5992.35 frames. ], batch size: 49, lr: 3.86e-03 +2024-08-06 11:44:20,932 INFO [trainer.py:765] (2/8) Epoch 22, batch 2100, train_loss[loss=3.421, NarTop10Accuracy=0.6474, over 4764.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6315, over 5984.31 frames. ], batch size: 5, lr: 3.86e-03 +2024-08-06 11:44:46,456 INFO [trainer.py:765] (2/8) Epoch 22, batch 2200, train_loss[loss=3.777, NarTop10Accuracy=0.5585, over 7101.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6303, over 6035.31 frames. ], batch size: 30, lr: 3.86e-03 +2024-08-06 11:45:11,882 INFO [trainer.py:765] (2/8) Epoch 22, batch 2300, train_loss[loss=3.378, NarTop10Accuracy=0.6461, over 5739.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6278, over 6053.72 frames. ], batch size: 9, lr: 3.85e-03 +2024-08-06 11:45:36,583 INFO [trainer.py:765] (2/8) Epoch 22, batch 2400, train_loss[loss=3.18, NarTop10Accuracy=0.6847, over 5207.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6272, over 5889.00 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 11:46:00,081 INFO [trainer.py:765] (2/8) Epoch 22, batch 2500, train_loss[loss=2.987, NarTop10Accuracy=0.722, over 4240.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6331, over 5542.25 frames. ], batch size: 5, lr: 3.85e-03 +2024-08-06 11:46:21,573 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 11:47:20,476 INFO [trainer.py:765] (2/8) Epoch 23, batch 100, train_loss[loss=3.137, NarTop10Accuracy=0.6971, over 7355.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6484, over 2356.47 frames. ], batch size: 30, lr: 3.75e-03 +2024-08-06 11:47:52,035 INFO [trainer.py:765] (2/8) Epoch 23, batch 200, train_loss[loss=3.55, NarTop10Accuracy=0.6085, over 6955.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6486, over 3860.50 frames. ], batch size: 17, lr: 3.75e-03 +2024-08-06 11:48:33,921 INFO [trainer.py:765] (2/8) Epoch 23, batch 300, train_loss[loss=3.325, NarTop10Accuracy=0.6512, over 7403.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6439, over 4658.84 frames. ], batch size: 23, lr: 3.75e-03 +2024-08-06 11:49:06,656 INFO [trainer.py:765] (2/8) Epoch 23, batch 400, train_loss[loss=3.274, NarTop10Accuracy=0.6633, over 5092.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6433, over 5122.58 frames. ], batch size: 7, lr: 3.74e-03 +2024-08-06 11:49:37,618 INFO [trainer.py:765] (2/8) Epoch 23, batch 500, train_loss[loss=3.429, NarTop10Accuracy=0.6299, over 6289.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6435, over 5416.34 frames. ], batch size: 11, lr: 3.74e-03 +2024-08-06 11:50:06,740 INFO [trainer.py:765] (2/8) Epoch 23, batch 600, train_loss[loss=3.435, NarTop10Accuracy=0.6258, over 5785.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6433, over 5691.14 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 11:50:47,600 INFO [trainer.py:765] (2/8) Epoch 23, batch 700, train_loss[loss=3.446, NarTop10Accuracy=0.6306, over 5032.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6425, over 5767.38 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:21,344 INFO [trainer.py:765] (2/8) Epoch 23, batch 800, train_loss[loss=3.394, NarTop10Accuracy=0.6554, over 4850.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.641, over 5812.53 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:52,396 INFO [trainer.py:765] (2/8) Epoch 23, batch 900, train_loss[loss=3.326, NarTop10Accuracy=0.6524, over 6311.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6419, over 5834.39 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 11:52:33,918 INFO [trainer.py:765] (2/8) Epoch 23, batch 1000, train_loss[loss=3.309, NarTop10Accuracy=0.6556, over 6724.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6407, over 5924.65 frames. ], batch size: 14, lr: 3.73e-03 +2024-08-06 11:53:08,608 INFO [trainer.py:765] (2/8) Epoch 23, batch 1100, train_loss[loss=3.508, NarTop10Accuracy=0.6232, over 6852.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6366, over 5959.96 frames. ], batch size: 17, lr: 3.72e-03 +2024-08-06 11:53:40,339 INFO [trainer.py:765] (2/8) Epoch 23, batch 1200, train_loss[loss=3.436, NarTop10Accuracy=0.6345, over 7038.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.636, over 5956.45 frames. ], batch size: 30, lr: 3.72e-03 +2024-08-06 11:53:42,824 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 11:53:53,935 INFO [trainer.py:811] (2/8) Epoch 23, validation: loss=3.236, NarTop10Accuracy=0.6739, over 1907754.00 frames. +2024-08-06 11:53:53,935 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 11:53:54,457 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.901e+02 2.047e+02 2.234e+02 4.368e+02, threshold=4.093e+02, percent-clipped=0.1 +2024-08-06 11:54:30,447 INFO [trainer.py:765] (2/8) Epoch 23, batch 1300, train_loss[loss=3.336, NarTop10Accuracy=0.6531, over 5071.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6361, over 6013.29 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 11:55:04,197 INFO [trainer.py:765] (2/8) Epoch 23, batch 1400, train_loss[loss=3.584, NarTop10Accuracy=0.5982, over 6121.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6363, over 6031.60 frames. ], batch size: 11, lr: 3.71e-03 +2024-08-06 11:55:35,397 INFO [trainer.py:765] (2/8) Epoch 23, batch 1500, train_loss[loss=3.557, NarTop10Accuracy=0.6097, over 6240.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6345, over 5964.33 frames. ], batch size: 49, lr: 3.71e-03 +2024-08-06 11:56:03,427 INFO [trainer.py:765] (2/8) Epoch 23, batch 1600, train_loss[loss=3.379, NarTop10Accuracy=0.6387, over 7455.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6332, over 5951.72 frames. ], batch size: 22, lr: 3.71e-03 +2024-08-06 11:56:30,201 INFO [trainer.py:765] (2/8) Epoch 23, batch 1700, train_loss[loss=3.601, NarTop10Accuracy=0.597, over 6339.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6314, over 5935.28 frames. ], batch size: 13, lr: 3.70e-03 +2024-08-06 11:56:56,968 INFO [trainer.py:765] (2/8) Epoch 23, batch 1800, train_loss[loss=3.139, NarTop10Accuracy=0.6938, over 7254.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6334, over 5998.14 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 11:57:23,596 INFO [trainer.py:765] (2/8) Epoch 23, batch 1900, train_loss[loss=3.517, NarTop10Accuracy=0.6179, over 6186.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6316, over 6027.96 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 11:57:49,250 INFO [trainer.py:765] (2/8) Epoch 23, batch 2000, train_loss[loss=3.727, NarTop10Accuracy=0.5807, over 6058.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6316, over 6008.28 frames. ], batch size: 48, lr: 3.69e-03 +2024-08-06 11:58:14,769 INFO [trainer.py:765] (2/8) Epoch 23, batch 2100, train_loss[loss=3.333, NarTop10Accuracy=0.6532, over 3902.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6315, over 5994.90 frames. ], batch size: 4, lr: 3.69e-03 +2024-08-06 11:58:40,237 INFO [trainer.py:765] (2/8) Epoch 23, batch 2200, train_loss[loss=3.586, NarTop10Accuracy=0.592, over 7188.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6345, over 6043.58 frames. ], batch size: 30, lr: 3.69e-03 +2024-08-06 11:59:08,916 INFO [trainer.py:765] (2/8) Epoch 23, batch 2300, train_loss[loss=3.379, NarTop10Accuracy=0.6395, over 5775.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6302, over 6068.23 frames. ], batch size: 9, lr: 3.68e-03 +2024-08-06 11:59:33,601 INFO [trainer.py:765] (2/8) Epoch 23, batch 2400, train_loss[loss=3.221, NarTop10Accuracy=0.6695, over 5157.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6283, over 5855.69 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 11:59:57,010 INFO [trainer.py:765] (2/8) Epoch 23, batch 2500, train_loss[loss=3.142, NarTop10Accuracy=0.7079, over 4976.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6343, over 5538.45 frames. ], batch size: 6, lr: 3.68e-03 +2024-08-06 12:00:18,098 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 12:01:22,109 INFO [trainer.py:765] (2/8) Epoch 24, batch 100, train_loss[loss=3.632, NarTop10Accuracy=0.5909, over 7248.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6397, over 2382.23 frames. ], batch size: 30, lr: 3.59e-03 +2024-08-06 12:01:51,340 INFO [trainer.py:765] (2/8) Epoch 24, batch 200, train_loss[loss=3.575, NarTop10Accuracy=0.6067, over 6936.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6451, over 3865.16 frames. ], batch size: 17, lr: 3.59e-03 +2024-08-06 12:02:23,511 INFO [trainer.py:765] (2/8) Epoch 24, batch 300, train_loss[loss=3.262, NarTop10Accuracy=0.6596, over 7040.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6455, over 4685.35 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 12:03:02,845 INFO [trainer.py:765] (2/8) Epoch 24, batch 400, train_loss[loss=3.088, NarTop10Accuracy=0.6915, over 5206.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6444, over 5135.92 frames. ], batch size: 7, lr: 3.59e-03 +2024-08-06 12:03:31,255 INFO [trainer.py:765] (2/8) Epoch 24, batch 500, train_loss[loss=3.081, NarTop10Accuracy=0.7051, over 6243.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6463, over 5404.42 frames. ], batch size: 11, lr: 3.58e-03 +2024-08-06 12:04:00,171 INFO [trainer.py:765] (2/8) Epoch 24, batch 600, train_loss[loss=3.554, NarTop10Accuracy=0.6102, over 5694.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6458, over 5670.27 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 12:04:12,529 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 12:04:22,775 INFO [trainer.py:811] (2/8) Epoch 24, validation: loss=3.282, NarTop10Accuracy=0.6644, over 1907754.00 frames. +2024-08-06 12:04:22,776 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 12:04:23,310 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 1.905e+02 2.071e+02 2.258e+02 3.709e+02, threshold=4.142e+02, percent-clipped=0.0 +2024-08-06 12:04:51,732 INFO [trainer.py:765] (2/8) Epoch 24, batch 700, train_loss[loss=3.016, NarTop10Accuracy=0.7017, over 5094.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6446, over 5747.82 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 12:05:21,274 INFO [trainer.py:765] (2/8) Epoch 24, batch 800, train_loss[loss=3.555, NarTop10Accuracy=0.6057, over 5017.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6423, over 5799.17 frames. ], batch size: 6, lr: 3.57e-03 +2024-08-06 12:05:51,753 INFO [trainer.py:765] (2/8) Epoch 24, batch 900, train_loss[loss=3.513, NarTop10Accuracy=0.5975, over 6190.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6422, over 5828.19 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 12:06:32,811 INFO [trainer.py:765] (2/8) Epoch 24, batch 1000, train_loss[loss=3.418, NarTop10Accuracy=0.643, over 6763.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6398, over 5936.54 frames. ], batch size: 14, lr: 3.57e-03 +2024-08-06 12:07:09,040 INFO [trainer.py:765] (2/8) Epoch 24, batch 1100, train_loss[loss=3.328, NarTop10Accuracy=0.652, over 6879.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6383, over 5964.08 frames. ], batch size: 17, lr: 3.56e-03 +2024-08-06 12:07:38,134 INFO [trainer.py:765] (2/8) Epoch 24, batch 1200, train_loss[loss=3.37, NarTop10Accuracy=0.6356, over 7233.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6358, over 5959.03 frames. ], batch size: 31, lr: 3.56e-03 +2024-08-06 12:08:20,730 INFO [trainer.py:765] (2/8) Epoch 24, batch 1300, train_loss[loss=3.053, NarTop10Accuracy=0.7001, over 5015.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6371, over 6028.32 frames. ], batch size: 6, lr: 3.56e-03 +2024-08-06 12:08:56,065 INFO [trainer.py:765] (2/8) Epoch 24, batch 1400, train_loss[loss=3.389, NarTop10Accuracy=0.6414, over 6181.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6338, over 6050.04 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 12:09:24,337 INFO [trainer.py:765] (2/8) Epoch 24, batch 1500, train_loss[loss=3.758, NarTop10Accuracy=0.5681, over 5268.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.633, over 5976.29 frames. ], batch size: 49, lr: 3.55e-03 +2024-08-06 12:09:52,524 INFO [trainer.py:765] (2/8) Epoch 24, batch 1600, train_loss[loss=3.408, NarTop10Accuracy=0.6299, over 7400.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6344, over 5950.14 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 12:10:22,545 INFO [trainer.py:765] (2/8) Epoch 24, batch 1700, train_loss[loss=3.592, NarTop10Accuracy=0.5968, over 6340.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6337, over 5926.42 frames. ], batch size: 13, lr: 3.55e-03 +2024-08-06 12:10:49,272 INFO [trainer.py:765] (2/8) Epoch 24, batch 1800, train_loss[loss=3.182, NarTop10Accuracy=0.6701, over 7117.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6335, over 6004.23 frames. ], batch size: 22, lr: 3.54e-03 +2024-08-06 12:11:15,846 INFO [trainer.py:765] (2/8) Epoch 24, batch 1900, train_loss[loss=3.504, NarTop10Accuracy=0.6213, over 6336.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6337, over 6053.74 frames. ], batch size: 49, lr: 3.54e-03 +2024-08-06 12:11:41,666 INFO [trainer.py:765] (2/8) Epoch 24, batch 2000, train_loss[loss=3.423, NarTop10Accuracy=0.6397, over 6070.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6341, over 6021.64 frames. ], batch size: 49, lr: 3.54e-03 +2024-08-06 12:12:07,103 INFO [trainer.py:765] (2/8) Epoch 24, batch 2100, train_loss[loss=3.03, NarTop10Accuracy=0.7142, over 3913.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6359, over 5988.36 frames. ], batch size: 4, lr: 3.54e-03 +2024-08-06 12:12:33,372 INFO [trainer.py:765] (2/8) Epoch 24, batch 2200, train_loss[loss=3.465, NarTop10Accuracy=0.6255, over 7350.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6365, over 6030.85 frames. ], batch size: 31, lr: 3.53e-03 +2024-08-06 12:12:58,771 INFO [trainer.py:765] (2/8) Epoch 24, batch 2300, train_loss[loss=3.63, NarTop10Accuracy=0.6004, over 5656.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6334, over 6055.78 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 12:13:23,486 INFO [trainer.py:765] (2/8) Epoch 24, batch 2400, train_loss[loss=3.124, NarTop10Accuracy=0.7047, over 5303.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.632, over 5856.84 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 12:13:47,005 INFO [trainer.py:765] (2/8) Epoch 24, batch 2500, train_loss[loss=2.946, NarTop10Accuracy=0.7273, over 5036.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6396, over 5523.17 frames. ], batch size: 6, lr: 3.52e-03 +2024-08-06 12:14:07,904 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 12:14:50,195 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 12:15:00,657 INFO [trainer.py:811] (2/8) Epoch 25, validation: loss=3.279, NarTop10Accuracy=0.6656, over 1907754.00 frames. +2024-08-06 12:15:00,658 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 27701MB +2024-08-06 12:15:01,363 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.921e+02 2.068e+02 2.276e+02 6.228e+02, threshold=4.136e+02, percent-clipped=0.3 +2024-08-06 12:15:17,917 INFO [trainer.py:765] (2/8) Epoch 25, batch 100, train_loss[loss=3.255, NarTop10Accuracy=0.6784, over 7048.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.646, over 2381.93 frames. ], batch size: 30, lr: 3.45e-03 +2024-08-06 12:15:53,499 INFO [trainer.py:765] (2/8) Epoch 25, batch 200, train_loss[loss=3.201, NarTop10Accuracy=0.6702, over 6907.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6461, over 3878.83 frames. ], batch size: 17, lr: 3.44e-03 +2024-08-06 12:16:23,595 INFO [trainer.py:765] (2/8) Epoch 25, batch 300, train_loss[loss=3.389, NarTop10Accuracy=0.6481, over 7223.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6465, over 4691.07 frames. ], batch size: 22, lr: 3.44e-03 +2024-08-06 12:16:59,162 INFO [trainer.py:765] (2/8) Epoch 25, batch 400, train_loss[loss=3.59, NarTop10Accuracy=0.5869, over 5172.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6449, over 5135.39 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 12:17:32,096 INFO [trainer.py:765] (2/8) Epoch 25, batch 500, train_loss[loss=3.116, NarTop10Accuracy=0.7119, over 6141.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6473, over 5426.47 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 12:18:05,180 INFO [trainer.py:765] (2/8) Epoch 25, batch 600, train_loss[loss=3.317, NarTop10Accuracy=0.667, over 5968.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.646, over 5680.29 frames. ], batch size: 9, lr: 3.43e-03 +2024-08-06 12:18:39,597 INFO [trainer.py:765] (2/8) Epoch 25, batch 700, train_loss[loss=3.18, NarTop10Accuracy=0.6983, over 5162.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6434, over 5749.19 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 12:19:16,014 INFO [trainer.py:765] (2/8) Epoch 25, batch 800, train_loss[loss=3.069, NarTop10Accuracy=0.6907, over 5185.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6417, over 5803.23 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 12:19:49,558 INFO [trainer.py:765] (2/8) Epoch 25, batch 900, train_loss[loss=3.131, NarTop10Accuracy=0.6908, over 6657.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6404, over 5821.41 frames. ], batch size: 14, lr: 3.43e-03 +2024-08-06 12:20:23,876 INFO [trainer.py:765] (2/8) Epoch 25, batch 1000, train_loss[loss=3.511, NarTop10Accuracy=0.6328, over 6777.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6401, over 5923.16 frames. ], batch size: 14, lr: 3.42e-03 +2024-08-06 12:21:01,914 INFO [trainer.py:765] (2/8) Epoch 25, batch 1100, train_loss[loss=3.343, NarTop10Accuracy=0.6537, over 6919.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.638, over 5960.48 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 12:21:40,637 INFO [trainer.py:765] (2/8) Epoch 25, batch 1200, train_loss[loss=3.579, NarTop10Accuracy=0.6037, over 7110.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6394, over 5936.70 frames. ], batch size: 30, lr: 3.42e-03 +2024-08-06 12:22:11,837 INFO [trainer.py:765] (2/8) Epoch 25, batch 1300, train_loss[loss=3.35, NarTop10Accuracy=0.6444, over 5056.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6401, over 6016.27 frames. ], batch size: 6, lr: 3.41e-03 +2024-08-06 12:22:48,550 INFO [trainer.py:765] (2/8) Epoch 25, batch 1400, train_loss[loss=3.348, NarTop10Accuracy=0.6404, over 5943.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6395, over 6027.93 frames. ], batch size: 11, lr: 3.41e-03 +2024-08-06 12:23:21,655 INFO [trainer.py:765] (2/8) Epoch 25, batch 1500, train_loss[loss=3.899, NarTop10Accuracy=0.5363, over 5855.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6389, over 5969.23 frames. ], batch size: 49, lr: 3.41e-03 +2024-08-06 12:23:49,716 INFO [trainer.py:765] (2/8) Epoch 25, batch 1600, train_loss[loss=3.319, NarTop10Accuracy=0.6606, over 7230.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6383, over 5946.66 frames. ], batch size: 22, lr: 3.41e-03 +2024-08-06 12:24:16,372 INFO [trainer.py:765] (2/8) Epoch 25, batch 1700, train_loss[loss=3.632, NarTop10Accuracy=0.593, over 6224.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6357, over 5929.91 frames. ], batch size: 13, lr: 3.40e-03 +2024-08-06 12:24:43,092 INFO [trainer.py:765] (2/8) Epoch 25, batch 1800, train_loss[loss=3.6, NarTop10Accuracy=0.5962, over 7113.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6361, over 6000.83 frames. ], batch size: 22, lr: 3.40e-03 +2024-08-06 12:25:09,775 INFO [trainer.py:765] (2/8) Epoch 25, batch 1900, train_loss[loss=3.502, NarTop10Accuracy=0.6175, over 5955.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6332, over 6045.45 frames. ], batch size: 49, lr: 3.40e-03 +2024-08-06 12:25:35,709 INFO [trainer.py:765] (2/8) Epoch 25, batch 2000, train_loss[loss=3.569, NarTop10Accuracy=0.6057, over 6553.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6357, over 6024.10 frames. ], batch size: 50, lr: 3.40e-03 +2024-08-06 12:25:47,854 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 12:25:58,846 INFO [trainer.py:811] (2/8) Epoch 25, validation: loss=3.265, NarTop10Accuracy=0.667, over 1907754.00 frames. +2024-08-06 12:25:58,847 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29652MB +2024-08-06 12:25:59,343 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 1.947e+02 2.092e+02 2.280e+02 8.190e+02, threshold=4.185e+02, percent-clipped=0.2 +2024-08-06 12:26:12,225 INFO [trainer.py:765] (2/8) Epoch 25, batch 2100, train_loss[loss=3.13, NarTop10Accuracy=0.6654, over 4906.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6347, over 5996.51 frames. ], batch size: 5, lr: 3.39e-03 +2024-08-06 12:26:37,833 INFO [trainer.py:765] (2/8) Epoch 25, batch 2200, train_loss[loss=3.524, NarTop10Accuracy=0.6101, over 7162.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6356, over 6035.92 frames. ], batch size: 30, lr: 3.39e-03 +2024-08-06 12:27:03,344 INFO [trainer.py:765] (2/8) Epoch 25, batch 2300, train_loss[loss=3.689, NarTop10Accuracy=0.589, over 5772.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6339, over 6065.88 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 12:27:28,151 INFO [trainer.py:765] (2/8) Epoch 25, batch 2400, train_loss[loss=3.352, NarTop10Accuracy=0.6495, over 4996.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.633, over 5865.43 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 12:27:51,732 INFO [trainer.py:765] (2/8) Epoch 25, batch 2500, train_loss[loss=3.546, NarTop10Accuracy=0.5989, over 5075.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6364, over 5526.99 frames. ], batch size: 6, lr: 3.38e-03 +2024-08-06 12:28:13,095 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 12:29:08,881 INFO [trainer.py:765] (2/8) Epoch 26, batch 100, train_loss[loss=3.619, NarTop10Accuracy=0.591, over 7219.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.649, over 2362.87 frames. ], batch size: 30, lr: 3.31e-03 +2024-08-06 12:29:44,318 INFO [trainer.py:765] (2/8) Epoch 26, batch 200, train_loss[loss=3.339, NarTop10Accuracy=0.6536, over 6863.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6484, over 3867.77 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 12:30:19,754 INFO [trainer.py:765] (2/8) Epoch 26, batch 300, train_loss[loss=3.227, NarTop10Accuracy=0.6712, over 7199.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6472, over 4671.80 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 12:30:52,509 INFO [trainer.py:765] (2/8) Epoch 26, batch 400, train_loss[loss=3.245, NarTop10Accuracy=0.6611, over 5139.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6471, over 5131.68 frames. ], batch size: 7, lr: 3.30e-03 +2024-08-06 12:31:26,530 INFO [trainer.py:765] (2/8) Epoch 26, batch 500, train_loss[loss=3.357, NarTop10Accuracy=0.6512, over 6114.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6494, over 5411.75 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 12:31:59,782 INFO [trainer.py:765] (2/8) Epoch 26, batch 600, train_loss[loss=3.389, NarTop10Accuracy=0.6358, over 5855.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.646, over 5671.38 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 12:32:36,966 INFO [trainer.py:765] (2/8) Epoch 26, batch 700, train_loss[loss=3.307, NarTop10Accuracy=0.666, over 4961.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6453, over 5729.10 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 12:33:10,808 INFO [trainer.py:765] (2/8) Epoch 26, batch 800, train_loss[loss=3.466, NarTop10Accuracy=0.6131, over 5020.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.645, over 5782.53 frames. ], batch size: 6, lr: 3.29e-03 +2024-08-06 12:33:46,257 INFO [trainer.py:765] (2/8) Epoch 26, batch 900, train_loss[loss=3.716, NarTop10Accuracy=0.5636, over 6141.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6442, over 5804.55 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 12:34:22,902 INFO [trainer.py:765] (2/8) Epoch 26, batch 1000, train_loss[loss=3.068, NarTop10Accuracy=0.7, over 6663.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6413, over 5899.18 frames. ], batch size: 14, lr: 3.29e-03 +2024-08-06 12:34:57,797 INFO [trainer.py:765] (2/8) Epoch 26, batch 1100, train_loss[loss=3.288, NarTop10Accuracy=0.661, over 6823.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6416, over 5940.24 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 12:35:31,893 INFO [trainer.py:765] (2/8) Epoch 26, batch 1200, train_loss[loss=3.167, NarTop10Accuracy=0.6909, over 7267.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6423, over 5945.47 frames. ], batch size: 30, lr: 3.28e-03 +2024-08-06 12:36:10,657 INFO [trainer.py:765] (2/8) Epoch 26, batch 1300, train_loss[loss=3.237, NarTop10Accuracy=0.6792, over 4891.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6403, over 6011.40 frames. ], batch size: 6, lr: 3.28e-03 +2024-08-06 12:36:44,564 INFO [trainer.py:765] (2/8) Epoch 26, batch 1400, train_loss[loss=3.331, NarTop10Accuracy=0.6363, over 6209.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6392, over 6028.33 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 12:37:03,592 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 12:37:13,567 INFO [trainer.py:811] (2/8) Epoch 26, validation: loss=3.231, NarTop10Accuracy=0.6753, over 1907754.00 frames. +2024-08-06 12:37:13,568 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29729MB +2024-08-06 12:37:14,078 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.928e+02 2.102e+02 2.299e+02 4.602e+02, threshold=4.203e+02, percent-clipped=0.2 +2024-08-06 12:37:23,028 INFO [trainer.py:765] (2/8) Epoch 26, batch 1500, train_loss[loss=3.719, NarTop10Accuracy=0.5764, over 6311.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6396, over 5970.74 frames. ], batch size: 50, lr: 3.28e-03 +2024-08-06 12:37:51,061 INFO [trainer.py:765] (2/8) Epoch 26, batch 1600, train_loss[loss=3.495, NarTop10Accuracy=0.6215, over 7093.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6406, over 5950.53 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:38:17,853 INFO [trainer.py:765] (2/8) Epoch 26, batch 1700, train_loss[loss=3.533, NarTop10Accuracy=0.609, over 6458.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6376, over 5939.02 frames. ], batch size: 13, lr: 3.27e-03 +2024-08-06 12:38:44,384 INFO [trainer.py:765] (2/8) Epoch 26, batch 1800, train_loss[loss=3.316, NarTop10Accuracy=0.6514, over 7297.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6368, over 5988.71 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:39:10,952 INFO [trainer.py:765] (2/8) Epoch 26, batch 1900, train_loss[loss=3.727, NarTop10Accuracy=0.5794, over 5851.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6357, over 6035.74 frames. ], batch size: 49, lr: 3.27e-03 +2024-08-06 12:39:36,610 INFO [trainer.py:765] (2/8) Epoch 26, batch 2000, train_loss[loss=3.417, NarTop10Accuracy=0.6361, over 5561.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6339, over 6014.14 frames. ], batch size: 49, lr: 3.26e-03 +2024-08-06 12:40:02,148 INFO [trainer.py:765] (2/8) Epoch 26, batch 2100, train_loss[loss=3.314, NarTop10Accuracy=0.6452, over 3916.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6361, over 6000.14 frames. ], batch size: 4, lr: 3.26e-03 +2024-08-06 12:40:27,759 INFO [trainer.py:765] (2/8) Epoch 26, batch 2200, train_loss[loss=3.357, NarTop10Accuracy=0.6397, over 7296.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6346, over 6024.20 frames. ], batch size: 31, lr: 3.26e-03 +2024-08-06 12:40:53,233 INFO [trainer.py:765] (2/8) Epoch 26, batch 2300, train_loss[loss=3.126, NarTop10Accuracy=0.6906, over 5758.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6352, over 6060.06 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 12:41:17,931 INFO [trainer.py:765] (2/8) Epoch 26, batch 2400, train_loss[loss=3.308, NarTop10Accuracy=0.6438, over 5165.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6344, over 5893.78 frames. ], batch size: 7, lr: 3.25e-03 +2024-08-06 12:41:44,478 INFO [trainer.py:765] (2/8) Epoch 26, batch 2500, train_loss[loss=3.222, NarTop10Accuracy=0.6781, over 5044.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.64, over 5546.66 frames. ], batch size: 6, lr: 3.25e-03 +2024-08-06 12:42:05,516 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 12:43:12,533 INFO [trainer.py:765] (2/8) Epoch 27, batch 100, train_loss[loss=3.738, NarTop10Accuracy=0.5686, over 7409.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6481, over 2378.95 frames. ], batch size: 31, lr: 3.19e-03 +2024-08-06 12:43:43,575 INFO [trainer.py:765] (2/8) Epoch 27, batch 200, train_loss[loss=3.36, NarTop10Accuracy=0.6377, over 6901.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6512, over 3874.81 frames. ], batch size: 17, lr: 3.18e-03 +2024-08-06 12:44:13,786 INFO [trainer.py:765] (2/8) Epoch 27, batch 300, train_loss[loss=3.248, NarTop10Accuracy=0.6662, over 7101.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.652, over 4674.14 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 12:44:50,460 INFO [trainer.py:765] (2/8) Epoch 27, batch 400, train_loss[loss=3.183, NarTop10Accuracy=0.6879, over 5076.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6514, over 5124.33 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 12:45:20,669 INFO [trainer.py:765] (2/8) Epoch 27, batch 500, train_loss[loss=2.999, NarTop10Accuracy=0.6979, over 6259.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6521, over 5401.99 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 12:45:55,260 INFO [trainer.py:765] (2/8) Epoch 27, batch 600, train_loss[loss=3.418, NarTop10Accuracy=0.6322, over 5757.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6486, over 5680.00 frames. ], batch size: 9, lr: 3.17e-03 +2024-08-06 12:46:26,747 INFO [trainer.py:765] (2/8) Epoch 27, batch 700, train_loss[loss=3.535, NarTop10Accuracy=0.6142, over 5142.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6498, over 5734.61 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:05,016 INFO [trainer.py:765] (2/8) Epoch 27, batch 800, train_loss[loss=3.016, NarTop10Accuracy=0.7173, over 5033.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6467, over 5791.15 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:32,741 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 12:47:42,765 INFO [trainer.py:811] (2/8) Epoch 27, validation: loss=3.258, NarTop10Accuracy=0.6695, over 1907754.00 frames. +2024-08-06 12:47:42,766 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29729MB +2024-08-06 12:47:43,335 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 1.939e+02 2.100e+02 2.298e+02 4.859e+02, threshold=4.201e+02, percent-clipped=0.2 +2024-08-06 12:47:47,258 INFO [trainer.py:765] (2/8) Epoch 27, batch 900, train_loss[loss=3.345, NarTop10Accuracy=0.6454, over 6193.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6447, over 5818.89 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 12:48:22,862 INFO [trainer.py:765] (2/8) Epoch 27, batch 1000, train_loss[loss=3.518, NarTop10Accuracy=0.6161, over 6300.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6452, over 5927.65 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 12:48:58,084 INFO [trainer.py:765] (2/8) Epoch 27, batch 1100, train_loss[loss=3.491, NarTop10Accuracy=0.6146, over 6482.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6422, over 5963.02 frames. ], batch size: 16, lr: 3.16e-03 +2024-08-06 12:49:34,896 INFO [trainer.py:765] (2/8) Epoch 27, batch 1200, train_loss[loss=3.307, NarTop10Accuracy=0.6542, over 7253.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6413, over 5960.87 frames. ], batch size: 30, lr: 3.16e-03 +2024-08-06 12:50:06,241 INFO [trainer.py:765] (2/8) Epoch 27, batch 1300, train_loss[loss=3.158, NarTop10Accuracy=0.6828, over 4977.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6417, over 6023.15 frames. ], batch size: 6, lr: 3.16e-03 +2024-08-06 12:50:42,950 INFO [trainer.py:765] (2/8) Epoch 27, batch 1400, train_loss[loss=3.238, NarTop10Accuracy=0.6682, over 6214.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6387, over 6039.84 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 12:51:11,277 INFO [trainer.py:765] (2/8) Epoch 27, batch 1500, train_loss[loss=3.362, NarTop10Accuracy=0.6477, over 6106.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6402, over 5972.44 frames. ], batch size: 50, lr: 3.15e-03 +2024-08-06 12:51:39,352 INFO [trainer.py:765] (2/8) Epoch 27, batch 1600, train_loss[loss=3.317, NarTop10Accuracy=0.6602, over 7156.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6401, over 5942.73 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:52:06,061 INFO [trainer.py:765] (2/8) Epoch 27, batch 1700, train_loss[loss=3.622, NarTop10Accuracy=0.6002, over 6274.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6393, over 5939.03 frames. ], batch size: 13, lr: 3.15e-03 +2024-08-06 12:52:32,668 INFO [trainer.py:765] (2/8) Epoch 27, batch 1800, train_loss[loss=3.487, NarTop10Accuracy=0.6267, over 7284.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6408, over 6001.67 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:53:02,290 INFO [trainer.py:765] (2/8) Epoch 27, batch 1900, train_loss[loss=3.641, NarTop10Accuracy=0.5913, over 5783.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.638, over 6039.70 frames. ], batch size: 49, lr: 3.14e-03 +2024-08-06 12:53:27,998 INFO [trainer.py:765] (2/8) Epoch 27, batch 2000, train_loss[loss=3.487, NarTop10Accuracy=0.6201, over 6607.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6395, over 6007.89 frames. ], batch size: 51, lr: 3.14e-03 +2024-08-06 12:53:53,538 INFO [trainer.py:765] (2/8) Epoch 27, batch 2100, train_loss[loss=3.656, NarTop10Accuracy=0.5865, over 4760.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6394, over 6001.67 frames. ], batch size: 5, lr: 3.14e-03 +2024-08-06 12:54:18,997 INFO [trainer.py:765] (2/8) Epoch 27, batch 2200, train_loss[loss=3.276, NarTop10Accuracy=0.672, over 7125.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6405, over 6043.56 frames. ], batch size: 30, lr: 3.14e-03 +2024-08-06 12:54:44,480 INFO [trainer.py:765] (2/8) Epoch 27, batch 2300, train_loss[loss=3.242, NarTop10Accuracy=0.6789, over 5691.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6367, over 6069.53 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 12:55:09,218 INFO [trainer.py:765] (2/8) Epoch 27, batch 2400, train_loss[loss=3.221, NarTop10Accuracy=0.6731, over 5001.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6343, over 5874.02 frames. ], batch size: 7, lr: 3.13e-03 +2024-08-06 12:55:32,727 INFO [trainer.py:765] (2/8) Epoch 27, batch 2500, train_loss[loss=3.116, NarTop10Accuracy=0.6935, over 5053.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6408, over 5523.90 frames. ], batch size: 6, lr: 3.13e-03 +2024-08-06 12:55:53,883 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 12:56:46,803 INFO [trainer.py:765] (2/8) Epoch 28, batch 100, train_loss[loss=3.19, NarTop10Accuracy=0.6785, over 7139.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6552, over 2376.14 frames. ], batch size: 30, lr: 3.07e-03 +2024-08-06 12:57:23,204 INFO [trainer.py:765] (2/8) Epoch 28, batch 200, train_loss[loss=3.39, NarTop10Accuracy=0.6485, over 7081.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.651, over 3871.63 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 12:57:55,704 INFO [trainer.py:765] (2/8) Epoch 28, batch 300, train_loss[loss=3.376, NarTop10Accuracy=0.6433, over 7311.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6487, over 4685.24 frames. ], batch size: 22, lr: 3.07e-03 +2024-08-06 12:57:56,457 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 12:58:06,828 INFO [trainer.py:811] (2/8) Epoch 28, validation: loss=3.275, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 12:58:06,828 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29729MB +2024-08-06 12:58:07,333 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 1.944e+02 2.106e+02 2.298e+02 4.786e+02, threshold=4.211e+02, percent-clipped=0.1 +2024-08-06 12:58:34,932 INFO [trainer.py:765] (2/8) Epoch 28, batch 400, train_loss[loss=3.343, NarTop10Accuracy=0.6448, over 5072.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6476, over 5137.73 frames. ], batch size: 7, lr: 3.06e-03 +2024-08-06 12:59:11,437 INFO [trainer.py:765] (2/8) Epoch 28, batch 500, train_loss[loss=3.139, NarTop10Accuracy=0.7057, over 6136.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6504, over 5407.55 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 12:59:44,487 INFO [trainer.py:765] (2/8) Epoch 28, batch 600, train_loss[loss=3.333, NarTop10Accuracy=0.6625, over 5840.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6504, over 5677.73 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 13:00:20,013 INFO [trainer.py:765] (2/8) Epoch 28, batch 700, train_loss[loss=3.188, NarTop10Accuracy=0.673, over 4998.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6497, over 5736.39 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 13:00:56,433 INFO [trainer.py:765] (2/8) Epoch 28, batch 800, train_loss[loss=3.273, NarTop10Accuracy=0.6637, over 4975.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6476, over 5777.87 frames. ], batch size: 6, lr: 3.05e-03 +2024-08-06 13:01:31,042 INFO [trainer.py:765] (2/8) Epoch 28, batch 900, train_loss[loss=3.243, NarTop10Accuracy=0.6629, over 6633.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6468, over 5806.78 frames. ], batch size: 14, lr: 3.05e-03 +2024-08-06 13:02:06,494 INFO [trainer.py:765] (2/8) Epoch 28, batch 1000, train_loss[loss=3.695, NarTop10Accuracy=0.5796, over 6531.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6425, over 5913.13 frames. ], batch size: 14, lr: 3.05e-03 +2024-08-06 13:02:41,229 INFO [trainer.py:765] (2/8) Epoch 28, batch 1100, train_loss[loss=3.296, NarTop10Accuracy=0.6521, over 6901.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6431, over 5941.47 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 13:03:16,895 INFO [trainer.py:765] (2/8) Epoch 28, batch 1200, train_loss[loss=3.456, NarTop10Accuracy=0.6121, over 7302.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6428, over 5950.03 frames. ], batch size: 30, lr: 3.05e-03 +2024-08-06 13:03:54,154 INFO [trainer.py:765] (2/8) Epoch 28, batch 1300, train_loss[loss=3.18, NarTop10Accuracy=0.6889, over 4957.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.643, over 6011.68 frames. ], batch size: 6, lr: 3.04e-03 +2024-08-06 13:04:28,712 INFO [trainer.py:765] (2/8) Epoch 28, batch 1400, train_loss[loss=3.467, NarTop10Accuracy=0.6203, over 6162.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6398, over 6047.02 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 13:05:02,349 INFO [trainer.py:765] (2/8) Epoch 28, batch 1500, train_loss[loss=3.528, NarTop10Accuracy=0.6141, over 6085.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.642, over 5977.31 frames. ], batch size: 49, lr: 3.04e-03 +2024-08-06 13:05:30,371 INFO [trainer.py:765] (2/8) Epoch 28, batch 1600, train_loss[loss=3.686, NarTop10Accuracy=0.5761, over 7283.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6397, over 5938.29 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 13:05:57,130 INFO [trainer.py:765] (2/8) Epoch 28, batch 1700, train_loss[loss=3.766, NarTop10Accuracy=0.5635, over 6208.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.641, over 5926.14 frames. ], batch size: 13, lr: 3.04e-03 +2024-08-06 13:06:23,732 INFO [trainer.py:765] (2/8) Epoch 28, batch 1800, train_loss[loss=3.77, NarTop10Accuracy=0.5682, over 7202.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6413, over 5994.18 frames. ], batch size: 22, lr: 3.03e-03 +2024-08-06 13:06:50,373 INFO [trainer.py:765] (2/8) Epoch 28, batch 1900, train_loss[loss=3.43, NarTop10Accuracy=0.6346, over 6320.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6415, over 6048.74 frames. ], batch size: 51, lr: 3.03e-03 +2024-08-06 13:07:16,115 INFO [trainer.py:765] (2/8) Epoch 28, batch 2000, train_loss[loss=3.46, NarTop10Accuracy=0.6266, over 5843.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6428, over 6010.19 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 13:07:41,547 INFO [trainer.py:765] (2/8) Epoch 28, batch 2100, train_loss[loss=3.655, NarTop10Accuracy=0.5906, over 3865.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6407, over 5983.98 frames. ], batch size: 4, lr: 3.03e-03 +2024-08-06 13:08:06,931 INFO [trainer.py:765] (2/8) Epoch 28, batch 2200, train_loss[loss=3.506, NarTop10Accuracy=0.6092, over 7147.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6382, over 6032.41 frames. ], batch size: 30, lr: 3.02e-03 +2024-08-06 13:08:32,387 INFO [trainer.py:765] (2/8) Epoch 28, batch 2300, train_loss[loss=3.411, NarTop10Accuracy=0.6326, over 5784.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6355, over 6061.50 frames. ], batch size: 9, lr: 3.02e-03 +2024-08-06 13:08:33,135 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 13:08:43,385 INFO [trainer.py:811] (2/8) Epoch 28, validation: loss=3.224, NarTop10Accuracy=0.676, over 1907754.00 frames. +2024-08-06 13:08:43,386 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 13:08:43,890 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 1.997e+02 2.131e+02 2.314e+02 6.875e+02, threshold=4.261e+02, percent-clipped=0.5 +2024-08-06 13:09:07,390 INFO [trainer.py:765] (2/8) Epoch 28, batch 2400, train_loss[loss=3.42, NarTop10Accuracy=0.6283, over 5135.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6351, over 5874.74 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 13:09:30,781 INFO [trainer.py:765] (2/8) Epoch 28, batch 2500, train_loss[loss=3.463, NarTop10Accuracy=0.6253, over 4937.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.642, over 5524.14 frames. ], batch size: 6, lr: 3.02e-03 +2024-08-06 13:09:52,199 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 13:10:48,193 INFO [trainer.py:765] (2/8) Epoch 29, batch 100, train_loss[loss=3.62, NarTop10Accuracy=0.594, over 7226.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6582, over 2374.47 frames. ], batch size: 30, lr: 2.96e-03 +2024-08-06 13:11:20,841 INFO [trainer.py:765] (2/8) Epoch 29, batch 200, train_loss[loss=3.387, NarTop10Accuracy=0.6456, over 6756.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.654, over 3871.14 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 13:11:56,950 INFO [trainer.py:765] (2/8) Epoch 29, batch 300, train_loss[loss=3.122, NarTop10Accuracy=0.699, over 7281.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6538, over 4665.16 frames. ], batch size: 22, lr: 2.96e-03 +2024-08-06 13:12:29,717 INFO [trainer.py:765] (2/8) Epoch 29, batch 400, train_loss[loss=3.277, NarTop10Accuracy=0.6617, over 5240.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6527, over 5112.80 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 13:12:59,921 INFO [trainer.py:765] (2/8) Epoch 29, batch 500, train_loss[loss=3.444, NarTop10Accuracy=0.624, over 6172.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6518, over 5408.55 frames. ], batch size: 11, lr: 2.95e-03 +2024-08-06 13:13:33,547 INFO [trainer.py:765] (2/8) Epoch 29, batch 600, train_loss[loss=3.654, NarTop10Accuracy=0.5917, over 5819.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6499, over 5683.38 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 13:14:09,937 INFO [trainer.py:765] (2/8) Epoch 29, batch 700, train_loss[loss=3.566, NarTop10Accuracy=0.5999, over 5169.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.648, over 5765.77 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:14:46,677 INFO [trainer.py:765] (2/8) Epoch 29, batch 800, train_loss[loss=3.27, NarTop10Accuracy=0.6593, over 5063.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6433, over 5806.63 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:15:17,114 INFO [trainer.py:765] (2/8) Epoch 29, batch 900, train_loss[loss=3.263, NarTop10Accuracy=0.6664, over 6281.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6456, over 5824.78 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 13:15:59,363 INFO [trainer.py:765] (2/8) Epoch 29, batch 1000, train_loss[loss=3.764, NarTop10Accuracy=0.5642, over 6248.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6458, over 5910.14 frames. ], batch size: 13, lr: 2.94e-03 +2024-08-06 13:16:31,713 INFO [trainer.py:765] (2/8) Epoch 29, batch 1100, train_loss[loss=3.543, NarTop10Accuracy=0.6072, over 6777.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6413, over 5928.71 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 13:17:04,933 INFO [trainer.py:765] (2/8) Epoch 29, batch 1200, train_loss[loss=3.524, NarTop10Accuracy=0.6139, over 7230.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6425, over 5927.99 frames. ], batch size: 31, lr: 2.94e-03 +2024-08-06 13:17:43,956 INFO [trainer.py:765] (2/8) Epoch 29, batch 1300, train_loss[loss=3.36, NarTop10Accuracy=0.6471, over 5105.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6418, over 5999.28 frames. ], batch size: 6, lr: 2.94e-03 +2024-08-06 13:18:17,924 INFO [trainer.py:765] (2/8) Epoch 29, batch 1400, train_loss[loss=3.773, NarTop10Accuracy=0.5607, over 6008.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6415, over 6021.86 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 13:18:48,306 INFO [trainer.py:765] (2/8) Epoch 29, batch 1500, train_loss[loss=3.66, NarTop10Accuracy=0.5834, over 6262.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6415, over 5953.98 frames. ], batch size: 48, lr: 2.93e-03 +2024-08-06 13:19:16,409 INFO [trainer.py:765] (2/8) Epoch 29, batch 1600, train_loss[loss=3.278, NarTop10Accuracy=0.6661, over 7195.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6409, over 5955.01 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:19:43,242 INFO [trainer.py:765] (2/8) Epoch 29, batch 1700, train_loss[loss=3.162, NarTop10Accuracy=0.6794, over 6269.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6402, over 5931.65 frames. ], batch size: 13, lr: 2.93e-03 +2024-08-06 13:19:49,091 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 13:19:59,386 INFO [trainer.py:811] (2/8) Epoch 29, validation: loss=3.233, NarTop10Accuracy=0.6754, over 1907754.00 frames. +2024-08-06 13:19:59,387 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 13:19:59,903 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.964e+02 2.123e+02 2.299e+02 5.520e+02, threshold=4.246e+02, percent-clipped=0.2 +2024-08-06 13:20:20,108 INFO [trainer.py:765] (2/8) Epoch 29, batch 1800, train_loss[loss=3.378, NarTop10Accuracy=0.6353, over 7138.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.641, over 6011.62 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:20:46,846 INFO [trainer.py:765] (2/8) Epoch 29, batch 1900, train_loss[loss=3.495, NarTop10Accuracy=0.6164, over 6086.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6377, over 6041.36 frames. ], batch size: 50, lr: 2.93e-03 +2024-08-06 13:21:12,479 INFO [trainer.py:765] (2/8) Epoch 29, batch 2000, train_loss[loss=3.72, NarTop10Accuracy=0.5784, over 6574.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6392, over 6012.42 frames. ], batch size: 49, lr: 2.92e-03 +2024-08-06 13:21:37,983 INFO [trainer.py:765] (2/8) Epoch 29, batch 2100, train_loss[loss=3.442, NarTop10Accuracy=0.6262, over 4814.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6396, over 5998.35 frames. ], batch size: 5, lr: 2.92e-03 +2024-08-06 13:22:03,360 INFO [trainer.py:765] (2/8) Epoch 29, batch 2200, train_loss[loss=3.266, NarTop10Accuracy=0.6683, over 6995.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6394, over 6034.62 frames. ], batch size: 30, lr: 2.92e-03 +2024-08-06 13:22:28,831 INFO [trainer.py:765] (2/8) Epoch 29, batch 2300, train_loss[loss=3.485, NarTop10Accuracy=0.6228, over 5707.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6363, over 6070.33 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 13:22:53,621 INFO [trainer.py:765] (2/8) Epoch 29, batch 2400, train_loss[loss=3.437, NarTop10Accuracy=0.637, over 5140.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6359, over 5876.57 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 13:23:16,979 INFO [trainer.py:765] (2/8) Epoch 29, batch 2500, train_loss[loss=3.254, NarTop10Accuracy=0.6704, over 5028.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.641, over 5531.54 frames. ], batch size: 6, lr: 2.91e-03 +2024-08-06 13:23:38,267 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 13:24:38,391 INFO [trainer.py:765] (2/8) Epoch 30, batch 100, train_loss[loss=3.245, NarTop10Accuracy=0.6736, over 7057.00 frames. ], tot_loss[loss=3.28, NarTop10Accuracy=0.6648, over 2372.27 frames. ], batch size: 30, lr: 2.86e-03 +2024-08-06 13:25:14,782 INFO [trainer.py:765] (2/8) Epoch 30, batch 200, train_loss[loss=3.29, NarTop10Accuracy=0.669, over 6864.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.6611, over 3856.36 frames. ], batch size: 17, lr: 2.86e-03 +2024-08-06 13:25:46,846 INFO [trainer.py:765] (2/8) Epoch 30, batch 300, train_loss[loss=3.233, NarTop10Accuracy=0.6738, over 7320.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6554, over 4652.94 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 13:26:17,538 INFO [trainer.py:765] (2/8) Epoch 30, batch 400, train_loss[loss=3.234, NarTop10Accuracy=0.6797, over 5187.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6525, over 5100.17 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 13:26:53,919 INFO [trainer.py:765] (2/8) Epoch 30, batch 500, train_loss[loss=3.374, NarTop10Accuracy=0.6454, over 6229.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6577, over 5388.05 frames. ], batch size: 11, lr: 2.85e-03 +2024-08-06 13:27:25,422 INFO [trainer.py:765] (2/8) Epoch 30, batch 600, train_loss[loss=3.245, NarTop10Accuracy=0.6707, over 5910.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6566, over 5663.42 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 13:28:00,307 INFO [trainer.py:765] (2/8) Epoch 30, batch 700, train_loss[loss=3.22, NarTop10Accuracy=0.6801, over 5132.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6548, over 5727.80 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:28:37,477 INFO [trainer.py:765] (2/8) Epoch 30, batch 800, train_loss[loss=3.206, NarTop10Accuracy=0.6598, over 5030.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6493, over 5794.67 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:29:10,424 INFO [trainer.py:765] (2/8) Epoch 30, batch 900, train_loss[loss=3.454, NarTop10Accuracy=0.6287, over 6120.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6484, over 5812.51 frames. ], batch size: 13, lr: 2.85e-03 +2024-08-06 13:29:45,914 INFO [trainer.py:765] (2/8) Epoch 30, batch 1000, train_loss[loss=3.403, NarTop10Accuracy=0.6255, over 6261.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6456, over 5919.55 frames. ], batch size: 13, lr: 2.84e-03 +2024-08-06 13:30:24,172 INFO [trainer.py:765] (2/8) Epoch 30, batch 1100, train_loss[loss=3.362, NarTop10Accuracy=0.6355, over 6900.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6447, over 5955.13 frames. ], batch size: 17, lr: 2.84e-03 +2024-08-06 13:30:38,002 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 13:30:48,195 INFO [trainer.py:811] (2/8) Epoch 30, validation: loss=3.239, NarTop10Accuracy=0.6729, over 1907754.00 frames. +2024-08-06 13:30:48,196 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 13:30:48,917 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 1.985e+02 2.139e+02 2.326e+02 4.628e+02, threshold=4.279e+02, percent-clipped=0.1 +2024-08-06 13:31:05,665 INFO [trainer.py:765] (2/8) Epoch 30, batch 1200, train_loss[loss=3.483, NarTop10Accuracy=0.6264, over 7022.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6428, over 5955.28 frames. ], batch size: 30, lr: 2.84e-03 +2024-08-06 13:31:43,021 INFO [trainer.py:765] (2/8) Epoch 30, batch 1300, train_loss[loss=3.111, NarTop10Accuracy=0.6894, over 5085.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6438, over 6021.03 frames. ], batch size: 6, lr: 2.84e-03 +2024-08-06 13:32:19,325 INFO [trainer.py:765] (2/8) Epoch 30, batch 1400, train_loss[loss=3.653, NarTop10Accuracy=0.578, over 6109.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6428, over 6032.76 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 13:32:52,335 INFO [trainer.py:765] (2/8) Epoch 30, batch 1500, train_loss[loss=3.46, NarTop10Accuracy=0.6212, over 6091.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6428, over 5960.26 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:33:20,407 INFO [trainer.py:765] (2/8) Epoch 30, batch 1600, train_loss[loss=3.606, NarTop10Accuracy=0.5969, over 7243.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6413, over 5945.74 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:33:47,200 INFO [trainer.py:765] (2/8) Epoch 30, batch 1700, train_loss[loss=3.398, NarTop10Accuracy=0.6395, over 6747.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6422, over 5932.01 frames. ], batch size: 14, lr: 2.83e-03 +2024-08-06 13:34:13,887 INFO [trainer.py:765] (2/8) Epoch 30, batch 1800, train_loss[loss=3.668, NarTop10Accuracy=0.576, over 7102.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6416, over 6003.12 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:34:40,547 INFO [trainer.py:765] (2/8) Epoch 30, batch 1900, train_loss[loss=3.627, NarTop10Accuracy=0.5893, over 6491.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6402, over 6031.07 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:35:06,315 INFO [trainer.py:765] (2/8) Epoch 30, batch 2000, train_loss[loss=3.734, NarTop10Accuracy=0.5734, over 5924.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6433, over 6016.85 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:35:31,872 INFO [trainer.py:765] (2/8) Epoch 30, batch 2100, train_loss[loss=3.577, NarTop10Accuracy=0.6028, over 3871.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6414, over 5998.82 frames. ], batch size: 4, lr: 2.82e-03 +2024-08-06 13:36:00,553 INFO [trainer.py:765] (2/8) Epoch 30, batch 2200, train_loss[loss=3.407, NarTop10Accuracy=0.6307, over 7256.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6422, over 6034.10 frames. ], batch size: 30, lr: 2.82e-03 +2024-08-06 13:36:26,029 INFO [trainer.py:765] (2/8) Epoch 30, batch 2300, train_loss[loss=3.504, NarTop10Accuracy=0.6181, over 5820.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6399, over 6061.46 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 13:36:50,824 INFO [trainer.py:765] (2/8) Epoch 30, batch 2400, train_loss[loss=3.435, NarTop10Accuracy=0.6323, over 5154.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6384, over 5882.56 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 13:37:14,388 INFO [trainer.py:765] (2/8) Epoch 30, batch 2500, train_loss[loss=3.225, NarTop10Accuracy=0.6724, over 4992.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6447, over 5540.49 frames. ], batch size: 6, lr: 2.82e-03 +2024-08-06 13:37:35,732 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 13:38:28,438 INFO [trainer.py:765] (2/8) Epoch 31, batch 100, train_loss[loss=3.191, NarTop10Accuracy=0.6844, over 7425.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6621, over 2372.03 frames. ], batch size: 31, lr: 2.77e-03 +2024-08-06 13:39:02,651 INFO [trainer.py:765] (2/8) Epoch 31, batch 200, train_loss[loss=3.198, NarTop10Accuracy=0.68, over 6942.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6584, over 3879.03 frames. ], batch size: 17, lr: 2.76e-03 +2024-08-06 13:39:34,676 INFO [trainer.py:765] (2/8) Epoch 31, batch 300, train_loss[loss=3.427, NarTop10Accuracy=0.632, over 7244.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6589, over 4671.59 frames. ], batch size: 22, lr: 2.76e-03 +2024-08-06 13:40:07,363 INFO [trainer.py:765] (2/8) Epoch 31, batch 400, train_loss[loss=3.607, NarTop10Accuracy=0.6082, over 5130.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6544, over 5116.35 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 13:40:37,813 INFO [trainer.py:765] (2/8) Epoch 31, batch 500, train_loss[loss=3.139, NarTop10Accuracy=0.6913, over 6138.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.655, over 5397.48 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 13:40:58,298 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 13:41:08,777 INFO [trainer.py:811] (2/8) Epoch 31, validation: loss=3.268, NarTop10Accuracy=0.6673, over 1907754.00 frames. +2024-08-06 13:41:08,778 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 13:41:09,338 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 1.987e+02 2.143e+02 2.328e+02 4.341e+02, threshold=4.287e+02, percent-clipped=0.1 +2024-08-06 13:41:20,862 INFO [trainer.py:765] (2/8) Epoch 31, batch 600, train_loss[loss=3.269, NarTop10Accuracy=0.6619, over 5679.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6535, over 5683.43 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 13:41:54,259 INFO [trainer.py:765] (2/8) Epoch 31, batch 700, train_loss[loss=3.113, NarTop10Accuracy=0.6967, over 4218.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6532, over 5738.63 frames. ], batch size: 5, lr: 2.76e-03 +2024-08-06 13:42:32,158 INFO [trainer.py:765] (2/8) Epoch 31, batch 800, train_loss[loss=3.124, NarTop10Accuracy=0.6931, over 4973.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6505, over 5785.63 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:43:06,274 INFO [trainer.py:765] (2/8) Epoch 31, batch 900, train_loss[loss=3.224, NarTop10Accuracy=0.6723, over 6679.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.651, over 5805.19 frames. ], batch size: 14, lr: 2.75e-03 +2024-08-06 13:43:38,009 INFO [trainer.py:765] (2/8) Epoch 31, batch 1000, train_loss[loss=3.232, NarTop10Accuracy=0.6748, over 6642.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6484, over 5915.48 frames. ], batch size: 14, lr: 2.75e-03 +2024-08-06 13:44:14,513 INFO [trainer.py:765] (2/8) Epoch 31, batch 1100, train_loss[loss=3.554, NarTop10Accuracy=0.6105, over 6971.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6472, over 5950.63 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 13:44:53,786 INFO [trainer.py:765] (2/8) Epoch 31, batch 1200, train_loss[loss=3.372, NarTop10Accuracy=0.6368, over 7047.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6439, over 5948.26 frames. ], batch size: 30, lr: 2.75e-03 +2024-08-06 13:45:25,076 INFO [trainer.py:765] (2/8) Epoch 31, batch 1300, train_loss[loss=3.275, NarTop10Accuracy=0.6594, over 5117.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6459, over 6011.14 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:45:58,740 INFO [trainer.py:765] (2/8) Epoch 31, batch 1400, train_loss[loss=3.148, NarTop10Accuracy=0.6923, over 6189.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6447, over 6043.28 frames. ], batch size: 11, lr: 2.74e-03 +2024-08-06 13:46:33,490 INFO [trainer.py:765] (2/8) Epoch 31, batch 1500, train_loss[loss=3.542, NarTop10Accuracy=0.6125, over 6255.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6443, over 5990.62 frames. ], batch size: 48, lr: 2.74e-03 +2024-08-06 13:47:04,657 INFO [trainer.py:765] (2/8) Epoch 31, batch 1600, train_loss[loss=3.297, NarTop10Accuracy=0.6621, over 7195.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6445, over 5958.19 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 13:47:31,424 INFO [trainer.py:765] (2/8) Epoch 31, batch 1700, train_loss[loss=3.646, NarTop10Accuracy=0.5877, over 6093.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6437, over 5938.37 frames. ], batch size: 13, lr: 2.74e-03 +2024-08-06 13:47:58,016 INFO [trainer.py:765] (2/8) Epoch 31, batch 1800, train_loss[loss=3.565, NarTop10Accuracy=0.6062, over 6929.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6442, over 6000.00 frames. ], batch size: 21, lr: 2.74e-03 +2024-08-06 13:48:24,576 INFO [trainer.py:765] (2/8) Epoch 31, batch 1900, train_loss[loss=3.455, NarTop10Accuracy=0.6329, over 5895.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6427, over 6046.18 frames. ], batch size: 48, lr: 2.74e-03 +2024-08-06 13:48:50,257 INFO [trainer.py:765] (2/8) Epoch 31, batch 2000, train_loss[loss=3.684, NarTop10Accuracy=0.5836, over 6127.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.643, over 6032.45 frames. ], batch size: 49, lr: 2.73e-03 +2024-08-06 13:49:15,764 INFO [trainer.py:765] (2/8) Epoch 31, batch 2100, train_loss[loss=3.546, NarTop10Accuracy=0.6041, over 4881.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6424, over 6023.91 frames. ], batch size: 5, lr: 2.73e-03 +2024-08-06 13:49:41,278 INFO [trainer.py:765] (2/8) Epoch 31, batch 2200, train_loss[loss=3.426, NarTop10Accuracy=0.634, over 7268.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6436, over 6039.07 frames. ], batch size: 31, lr: 2.73e-03 +2024-08-06 13:50:06,708 INFO [trainer.py:765] (2/8) Epoch 31, batch 2300, train_loss[loss=3.282, NarTop10Accuracy=0.6748, over 5673.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6406, over 6066.19 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 13:50:31,393 INFO [trainer.py:765] (2/8) Epoch 31, batch 2400, train_loss[loss=3.229, NarTop10Accuracy=0.6757, over 5136.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6377, over 5889.10 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 13:50:54,892 INFO [trainer.py:765] (2/8) Epoch 31, batch 2500, train_loss[loss=2.984, NarTop10Accuracy=0.7232, over 4127.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6412, over 5544.63 frames. ], batch size: 5, lr: 2.72e-03 +2024-08-06 13:51:08,995 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 13:51:19,069 INFO [trainer.py:811] (2/8) Epoch 31, validation: loss=3.234, NarTop10Accuracy=0.6746, over 1907754.00 frames. +2024-08-06 13:51:19,070 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 13:51:19,540 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.007e+02 2.182e+02 2.368e+02 4.565e+02, threshold=4.363e+02, percent-clipped=0.1 +2024-08-06 13:51:26,602 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 13:52:19,911 INFO [trainer.py:765] (2/8) Epoch 32, batch 100, train_loss[loss=3.371, NarTop10Accuracy=0.6521, over 7347.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6564, over 2362.92 frames. ], batch size: 31, lr: 2.68e-03 +2024-08-06 13:52:52,538 INFO [trainer.py:765] (2/8) Epoch 32, batch 200, train_loss[loss=3.449, NarTop10Accuracy=0.619, over 6657.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6548, over 3863.53 frames. ], batch size: 16, lr: 2.68e-03 +2024-08-06 13:53:28,093 INFO [trainer.py:765] (2/8) Epoch 32, batch 300, train_loss[loss=3.175, NarTop10Accuracy=0.6901, over 6974.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6558, over 4668.57 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 13:54:00,887 INFO [trainer.py:765] (2/8) Epoch 32, batch 400, train_loss[loss=3.543, NarTop10Accuracy=0.5983, over 5202.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6543, over 5108.34 frames. ], batch size: 7, lr: 2.67e-03 +2024-08-06 13:54:32,822 INFO [trainer.py:765] (2/8) Epoch 32, batch 500, train_loss[loss=3.016, NarTop10Accuracy=0.7131, over 6327.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6554, over 5388.37 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 13:55:01,772 INFO [trainer.py:765] (2/8) Epoch 32, batch 600, train_loss[loss=3.179, NarTop10Accuracy=0.6751, over 5923.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6549, over 5665.90 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 13:55:41,512 INFO [trainer.py:765] (2/8) Epoch 32, batch 700, train_loss[loss=3.062, NarTop10Accuracy=0.7119, over 4977.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6514, over 5740.65 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:13,173 INFO [trainer.py:765] (2/8) Epoch 32, batch 800, train_loss[loss=3.204, NarTop10Accuracy=0.6737, over 5165.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6513, over 5799.19 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:43,166 INFO [trainer.py:765] (2/8) Epoch 32, batch 900, train_loss[loss=3.483, NarTop10Accuracy=0.6241, over 6238.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6513, over 5835.76 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 13:57:24,521 INFO [trainer.py:765] (2/8) Epoch 32, batch 1000, train_loss[loss=3.71, NarTop10Accuracy=0.5721, over 6264.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6477, over 5926.00 frames. ], batch size: 13, lr: 2.66e-03 +2024-08-06 13:57:57,452 INFO [trainer.py:765] (2/8) Epoch 32, batch 1100, train_loss[loss=3.221, NarTop10Accuracy=0.6667, over 6728.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6457, over 5956.95 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 13:58:30,541 INFO [trainer.py:765] (2/8) Epoch 32, batch 1200, train_loss[loss=3.038, NarTop10Accuracy=0.7087, over 7170.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6466, over 5939.79 frames. ], batch size: 30, lr: 2.66e-03 +2024-08-06 13:59:08,260 INFO [trainer.py:765] (2/8) Epoch 32, batch 1300, train_loss[loss=3.346, NarTop10Accuracy=0.6413, over 5100.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6448, over 6015.66 frames. ], batch size: 6, lr: 2.66e-03 +2024-08-06 13:59:42,266 INFO [trainer.py:765] (2/8) Epoch 32, batch 1400, train_loss[loss=3.215, NarTop10Accuracy=0.67, over 6163.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6449, over 6038.75 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 14:00:12,976 INFO [trainer.py:765] (2/8) Epoch 32, batch 1500, train_loss[loss=3.745, NarTop10Accuracy=0.5609, over 6034.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6467, over 5988.56 frames. ], batch size: 49, lr: 2.66e-03 +2024-08-06 14:00:40,824 INFO [trainer.py:765] (2/8) Epoch 32, batch 1600, train_loss[loss=3.202, NarTop10Accuracy=0.6749, over 7182.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6463, over 5947.89 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:01:07,533 INFO [trainer.py:765] (2/8) Epoch 32, batch 1700, train_loss[loss=3.244, NarTop10Accuracy=0.6671, over 6193.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6442, over 5939.11 frames. ], batch size: 13, lr: 2.65e-03 +2024-08-06 14:01:34,089 INFO [trainer.py:765] (2/8) Epoch 32, batch 1800, train_loss[loss=3.286, NarTop10Accuracy=0.6688, over 7004.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6428, over 5993.47 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:02:00,636 INFO [trainer.py:765] (2/8) Epoch 32, batch 1900, train_loss[loss=3.456, NarTop10Accuracy=0.6292, over 6087.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6411, over 6050.08 frames. ], batch size: 49, lr: 2.65e-03 +2024-08-06 14:02:20,590 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 14:02:30,653 INFO [trainer.py:811] (2/8) Epoch 32, validation: loss=3.204, NarTop10Accuracy=0.6812, over 1907754.00 frames. +2024-08-06 14:02:30,653 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 14:02:31,152 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.032e+02 2.200e+02 2.392e+02 6.182e+02, threshold=4.401e+02, percent-clipped=0.1 +2024-08-06 14:02:36,383 INFO [trainer.py:765] (2/8) Epoch 32, batch 2000, train_loss[loss=3.575, NarTop10Accuracy=0.6074, over 6250.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6424, over 6019.03 frames. ], batch size: 49, lr: 2.65e-03 +2024-08-06 14:03:01,698 INFO [trainer.py:765] (2/8) Epoch 32, batch 2100, train_loss[loss=3.359, NarTop10Accuracy=0.6394, over 4847.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6403, over 6008.03 frames. ], batch size: 5, lr: 2.65e-03 +2024-08-06 14:03:27,176 INFO [trainer.py:765] (2/8) Epoch 32, batch 2200, train_loss[loss=3.515, NarTop10Accuracy=0.6205, over 7456.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6405, over 6030.52 frames. ], batch size: 31, lr: 2.64e-03 +2024-08-06 14:03:52,586 INFO [trainer.py:765] (2/8) Epoch 32, batch 2300, train_loss[loss=3.674, NarTop10Accuracy=0.5842, over 5849.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6377, over 6060.40 frames. ], batch size: 9, lr: 2.64e-03 +2024-08-06 14:04:17,274 INFO [trainer.py:765] (2/8) Epoch 32, batch 2400, train_loss[loss=3.71, NarTop10Accuracy=0.5809, over 5171.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6376, over 5899.47 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 14:04:40,635 INFO [trainer.py:765] (2/8) Epoch 32, batch 2500, train_loss[loss=3.188, NarTop10Accuracy=0.6858, over 5007.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6457, over 5554.50 frames. ], batch size: 6, lr: 2.64e-03 +2024-08-06 14:05:01,834 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 14:06:02,906 INFO [trainer.py:765] (2/8) Epoch 33, batch 100, train_loss[loss=3.366, NarTop10Accuracy=0.6458, over 7238.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6561, over 2367.50 frames. ], batch size: 30, lr: 2.60e-03 +2024-08-06 14:06:36,079 INFO [trainer.py:765] (2/8) Epoch 33, batch 200, train_loss[loss=3.23, NarTop10Accuracy=0.6657, over 6828.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6588, over 3860.83 frames. ], batch size: 17, lr: 2.59e-03 +2024-08-06 14:07:12,146 INFO [trainer.py:765] (2/8) Epoch 33, batch 300, train_loss[loss=3.216, NarTop10Accuracy=0.6878, over 7314.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6581, over 4667.41 frames. ], batch size: 22, lr: 2.59e-03 +2024-08-06 14:07:48,255 INFO [trainer.py:765] (2/8) Epoch 33, batch 400, train_loss[loss=3.368, NarTop10Accuracy=0.6476, over 5207.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.656, over 5116.54 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 14:08:18,547 INFO [trainer.py:765] (2/8) Epoch 33, batch 500, train_loss[loss=3.264, NarTop10Accuracy=0.6617, over 6081.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6551, over 5398.01 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 14:08:49,792 INFO [trainer.py:765] (2/8) Epoch 33, batch 600, train_loss[loss=3, NarTop10Accuracy=0.7086, over 5752.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6527, over 5663.46 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 14:09:32,925 INFO [trainer.py:765] (2/8) Epoch 33, batch 700, train_loss[loss=3.277, NarTop10Accuracy=0.6585, over 5075.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6528, over 5731.82 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 14:10:04,596 INFO [trainer.py:765] (2/8) Epoch 33, batch 800, train_loss[loss=2.895, NarTop10Accuracy=0.7398, over 5304.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.65, over 5792.10 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 14:10:35,386 INFO [trainer.py:765] (2/8) Epoch 33, batch 900, train_loss[loss=3.427, NarTop10Accuracy=0.6412, over 6217.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6516, over 5818.13 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 14:11:15,068 INFO [trainer.py:765] (2/8) Epoch 33, batch 1000, train_loss[loss=3.311, NarTop10Accuracy=0.665, over 6620.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6508, over 5922.59 frames. ], batch size: 14, lr: 2.58e-03 +2024-08-06 14:11:47,301 INFO [trainer.py:765] (2/8) Epoch 33, batch 1100, train_loss[loss=3.638, NarTop10Accuracy=0.5929, over 6973.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6481, over 5955.37 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 14:12:20,928 INFO [trainer.py:765] (2/8) Epoch 33, batch 1200, train_loss[loss=3.466, NarTop10Accuracy=0.6253, over 7589.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6481, over 5953.05 frames. ], batch size: 32, lr: 2.58e-03 +2024-08-06 14:12:57,629 INFO [trainer.py:765] (2/8) Epoch 33, batch 1300, train_loss[loss=3.718, NarTop10Accuracy=0.5626, over 5032.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6498, over 6016.88 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 14:13:30,666 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 14:13:41,686 INFO [trainer.py:811] (2/8) Epoch 33, validation: loss=3.242, NarTop10Accuracy=0.6732, over 1907754.00 frames. +2024-08-06 14:13:41,687 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 14:13:42,263 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.031e+02 2.174e+02 2.363e+02 4.871e+02, threshold=4.347e+02, percent-clipped=0.1 +2024-08-06 14:13:42,801 INFO [trainer.py:765] (2/8) Epoch 33, batch 1400, train_loss[loss=3.221, NarTop10Accuracy=0.6641, over 6146.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6506, over 6041.98 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 14:14:11,243 INFO [trainer.py:765] (2/8) Epoch 33, batch 1500, train_loss[loss=3.473, NarTop10Accuracy=0.621, over 6374.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6466, over 5972.91 frames. ], batch size: 49, lr: 2.57e-03 +2024-08-06 14:14:39,190 INFO [trainer.py:765] (2/8) Epoch 33, batch 1600, train_loss[loss=3.112, NarTop10Accuracy=0.6802, over 7166.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6467, over 5964.50 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:05,855 INFO [trainer.py:765] (2/8) Epoch 33, batch 1700, train_loss[loss=3.283, NarTop10Accuracy=0.6568, over 6243.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.646, over 5951.02 frames. ], batch size: 13, lr: 2.57e-03 +2024-08-06 14:15:32,588 INFO [trainer.py:765] (2/8) Epoch 33, batch 1800, train_loss[loss=3.375, NarTop10Accuracy=0.651, over 6862.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6474, over 6008.82 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:59,212 INFO [trainer.py:765] (2/8) Epoch 33, batch 1900, train_loss[loss=3.429, NarTop10Accuracy=0.6324, over 6340.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6454, over 6029.54 frames. ], batch size: 50, lr: 2.57e-03 +2024-08-06 14:16:24,893 INFO [trainer.py:765] (2/8) Epoch 33, batch 2000, train_loss[loss=3.6, NarTop10Accuracy=0.5991, over 6002.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6468, over 6003.26 frames. ], batch size: 49, lr: 2.57e-03 +2024-08-06 14:16:50,349 INFO [trainer.py:765] (2/8) Epoch 33, batch 2100, train_loss[loss=3.416, NarTop10Accuracy=0.6231, over 4047.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6475, over 5976.75 frames. ], batch size: 4, lr: 2.56e-03 +2024-08-06 14:17:15,824 INFO [trainer.py:765] (2/8) Epoch 33, batch 2200, train_loss[loss=3.629, NarTop10Accuracy=0.6016, over 7283.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6477, over 6010.82 frames. ], batch size: 30, lr: 2.56e-03 +2024-08-06 14:17:41,307 INFO [trainer.py:765] (2/8) Epoch 33, batch 2300, train_loss[loss=3.213, NarTop10Accuracy=0.665, over 5840.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.644, over 6043.34 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 14:18:10,142 INFO [trainer.py:765] (2/8) Epoch 33, batch 2400, train_loss[loss=3.556, NarTop10Accuracy=0.6102, over 5078.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6414, over 5852.87 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 14:18:33,705 INFO [trainer.py:765] (2/8) Epoch 33, batch 2500, train_loss[loss=3.267, NarTop10Accuracy=0.6621, over 5304.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6477, over 5512.30 frames. ], batch size: 6, lr: 2.56e-03 +2024-08-06 14:18:54,598 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 14:19:51,932 INFO [trainer.py:765] (2/8) Epoch 34, batch 100, train_loss[loss=3.276, NarTop10Accuracy=0.666, over 7146.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6589, over 2373.11 frames. ], batch size: 31, lr: 2.52e-03 +2024-08-06 14:20:24,372 INFO [trainer.py:765] (2/8) Epoch 34, batch 200, train_loss[loss=3.329, NarTop10Accuracy=0.6573, over 6840.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.6606, over 3867.76 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 14:21:00,842 INFO [trainer.py:765] (2/8) Epoch 34, batch 300, train_loss[loss=3.226, NarTop10Accuracy=0.6779, over 7150.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6566, over 4678.25 frames. ], batch size: 22, lr: 2.51e-03 +2024-08-06 14:21:31,449 INFO [trainer.py:765] (2/8) Epoch 34, batch 400, train_loss[loss=2.934, NarTop10Accuracy=0.7291, over 5037.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.658, over 5131.68 frames. ], batch size: 7, lr: 2.51e-03 +2024-08-06 14:22:01,875 INFO [trainer.py:765] (2/8) Epoch 34, batch 500, train_loss[loss=3.197, NarTop10Accuracy=0.6902, over 6253.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6545, over 5399.37 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 14:22:36,826 INFO [trainer.py:765] (2/8) Epoch 34, batch 600, train_loss[loss=3.345, NarTop10Accuracy=0.6535, over 5749.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6559, over 5657.19 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 14:23:14,605 INFO [trainer.py:765] (2/8) Epoch 34, batch 700, train_loss[loss=3.238, NarTop10Accuracy=0.6637, over 5118.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6552, over 5723.91 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:46,606 INFO [trainer.py:765] (2/8) Epoch 34, batch 800, train_loss[loss=3.25, NarTop10Accuracy=0.6662, over 5017.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6534, over 5768.34 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:50,719 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 14:24:00,855 INFO [trainer.py:811] (2/8) Epoch 34, validation: loss=3.226, NarTop10Accuracy=0.6758, over 1907754.00 frames. +2024-08-06 14:24:00,856 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 14:24:01,413 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.033e+02 2.200e+02 2.391e+02 5.918e+02, threshold=4.399e+02, percent-clipped=0.1 +2024-08-06 14:24:28,899 INFO [trainer.py:765] (2/8) Epoch 34, batch 900, train_loss[loss=3.174, NarTop10Accuracy=0.6723, over 6220.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6534, over 5794.53 frames. ], batch size: 13, lr: 2.51e-03 +2024-08-06 14:25:05,287 INFO [trainer.py:765] (2/8) Epoch 34, batch 1000, train_loss[loss=3.398, NarTop10Accuracy=0.6452, over 6588.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6513, over 5905.19 frames. ], batch size: 14, lr: 2.50e-03 +2024-08-06 14:25:37,996 INFO [trainer.py:765] (2/8) Epoch 34, batch 1100, train_loss[loss=3.521, NarTop10Accuracy=0.6166, over 6858.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.651, over 5943.49 frames. ], batch size: 17, lr: 2.50e-03 +2024-08-06 14:26:13,974 INFO [trainer.py:765] (2/8) Epoch 34, batch 1200, train_loss[loss=3.251, NarTop10Accuracy=0.6633, over 7115.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6529, over 5936.41 frames. ], batch size: 30, lr: 2.50e-03 +2024-08-06 14:26:52,652 INFO [trainer.py:765] (2/8) Epoch 34, batch 1300, train_loss[loss=3.506, NarTop10Accuracy=0.6098, over 4884.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6515, over 6021.67 frames. ], batch size: 6, lr: 2.50e-03 +2024-08-06 14:27:24,383 INFO [trainer.py:765] (2/8) Epoch 34, batch 1400, train_loss[loss=3.033, NarTop10Accuracy=0.7037, over 6041.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6488, over 6031.68 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 14:27:52,726 INFO [trainer.py:765] (2/8) Epoch 34, batch 1500, train_loss[loss=3.908, NarTop10Accuracy=0.5374, over 6069.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6507, over 5963.59 frames. ], batch size: 49, lr: 2.50e-03 +2024-08-06 14:28:20,672 INFO [trainer.py:765] (2/8) Epoch 34, batch 1600, train_loss[loss=3.367, NarTop10Accuracy=0.6541, over 7101.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6476, over 5961.17 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 14:28:47,384 INFO [trainer.py:765] (2/8) Epoch 34, batch 1700, train_loss[loss=3.467, NarTop10Accuracy=0.6214, over 6260.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6454, over 5947.29 frames. ], batch size: 13, lr: 2.49e-03 +2024-08-06 14:29:14,009 INFO [trainer.py:765] (2/8) Epoch 34, batch 1800, train_loss[loss=3.771, NarTop10Accuracy=0.5672, over 7034.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.647, over 6013.19 frames. ], batch size: 22, lr: 2.49e-03 +2024-08-06 14:29:43,752 INFO [trainer.py:765] (2/8) Epoch 34, batch 1900, train_loss[loss=3.671, NarTop10Accuracy=0.5898, over 6093.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.644, over 6049.60 frames. ], batch size: 51, lr: 2.49e-03 +2024-08-06 14:30:09,515 INFO [trainer.py:765] (2/8) Epoch 34, batch 2000, train_loss[loss=3.549, NarTop10Accuracy=0.6124, over 6136.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6435, over 6020.47 frames. ], batch size: 55, lr: 2.49e-03 +2024-08-06 14:30:35,016 INFO [trainer.py:765] (2/8) Epoch 34, batch 2100, train_loss[loss=2.951, NarTop10Accuracy=0.7021, over 4062.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.644, over 5991.30 frames. ], batch size: 4, lr: 2.49e-03 +2024-08-06 14:31:00,511 INFO [trainer.py:765] (2/8) Epoch 34, batch 2200, train_loss[loss=3.511, NarTop10Accuracy=0.6218, over 7318.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6429, over 6028.68 frames. ], batch size: 31, lr: 2.49e-03 +2024-08-06 14:31:25,978 INFO [trainer.py:765] (2/8) Epoch 34, batch 2300, train_loss[loss=3.056, NarTop10Accuracy=0.7107, over 5855.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6422, over 6075.56 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 14:31:50,751 INFO [trainer.py:765] (2/8) Epoch 34, batch 2400, train_loss[loss=3.362, NarTop10Accuracy=0.6367, over 5107.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6404, over 5885.27 frames. ], batch size: 7, lr: 2.48e-03 +2024-08-06 14:32:14,249 INFO [trainer.py:765] (2/8) Epoch 34, batch 2500, train_loss[loss=3.35, NarTop10Accuracy=0.6445, over 5066.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6446, over 5540.65 frames. ], batch size: 6, lr: 2.48e-03 +2024-08-06 14:32:35,481 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 14:33:26,336 INFO [trainer.py:765] (2/8) Epoch 35, batch 100, train_loss[loss=3.168, NarTop10Accuracy=0.6986, over 6960.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6593, over 2378.75 frames. ], batch size: 30, lr: 2.44e-03 +2024-08-06 14:34:03,581 INFO [trainer.py:765] (2/8) Epoch 35, batch 200, train_loss[loss=3.288, NarTop10Accuracy=0.6677, over 6813.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6585, over 3886.14 frames. ], batch size: 17, lr: 2.44e-03 +2024-08-06 14:34:13,186 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 14:34:23,574 INFO [trainer.py:811] (2/8) Epoch 35, validation: loss=3.163, NarTop10Accuracy=0.689, over 1907754.00 frames. +2024-08-06 14:34:23,575 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 14:34:24,110 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.042e+02 2.203e+02 2.360e+02 4.181e+02, threshold=4.406e+02, percent-clipped=0.0 +2024-08-06 14:34:44,665 INFO [trainer.py:765] (2/8) Epoch 35, batch 300, train_loss[loss=3.544, NarTop10Accuracy=0.6133, over 7019.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6592, over 4670.64 frames. ], batch size: 22, lr: 2.44e-03 +2024-08-06 14:35:13,543 INFO [trainer.py:765] (2/8) Epoch 35, batch 400, train_loss[loss=3.159, NarTop10Accuracy=0.7033, over 5121.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6564, over 5113.26 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 14:35:48,187 INFO [trainer.py:765] (2/8) Epoch 35, batch 500, train_loss[loss=3.505, NarTop10Accuracy=0.6098, over 6248.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6562, over 5398.82 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 14:36:22,747 INFO [trainer.py:765] (2/8) Epoch 35, batch 600, train_loss[loss=3.154, NarTop10Accuracy=0.6911, over 5687.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6547, over 5674.29 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 14:36:57,827 INFO [trainer.py:765] (2/8) Epoch 35, batch 700, train_loss[loss=3.378, NarTop10Accuracy=0.6391, over 5002.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6544, over 5734.85 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 14:37:29,769 INFO [trainer.py:765] (2/8) Epoch 35, batch 800, train_loss[loss=3.089, NarTop10Accuracy=0.6941, over 5034.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6543, over 5795.39 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:38:03,304 INFO [trainer.py:765] (2/8) Epoch 35, batch 900, train_loss[loss=3.193, NarTop10Accuracy=0.6773, over 6242.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6545, over 5823.45 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 14:38:43,709 INFO [trainer.py:765] (2/8) Epoch 35, batch 1000, train_loss[loss=3.169, NarTop10Accuracy=0.673, over 6239.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6525, over 5928.07 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 14:39:16,567 INFO [trainer.py:765] (2/8) Epoch 35, batch 1100, train_loss[loss=3.434, NarTop10Accuracy=0.6256, over 6763.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6505, over 5957.24 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 14:39:50,838 INFO [trainer.py:765] (2/8) Epoch 35, batch 1200, train_loss[loss=3.288, NarTop10Accuracy=0.6612, over 6972.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6513, over 5956.03 frames. ], batch size: 30, lr: 2.43e-03 +2024-08-06 14:40:33,953 INFO [trainer.py:765] (2/8) Epoch 35, batch 1300, train_loss[loss=3.078, NarTop10Accuracy=0.7063, over 4956.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6509, over 6022.90 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:41:03,184 INFO [trainer.py:765] (2/8) Epoch 35, batch 1400, train_loss[loss=3.357, NarTop10Accuracy=0.6466, over 6064.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6476, over 6044.33 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 14:41:33,824 INFO [trainer.py:765] (2/8) Epoch 35, batch 1500, train_loss[loss=3.368, NarTop10Accuracy=0.6435, over 6894.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6477, over 5976.74 frames. ], batch size: 49, lr: 2.43e-03 +2024-08-06 14:42:01,777 INFO [trainer.py:765] (2/8) Epoch 35, batch 1600, train_loss[loss=3.559, NarTop10Accuracy=0.5978, over 7197.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6475, over 5936.74 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 14:42:28,467 INFO [trainer.py:765] (2/8) Epoch 35, batch 1700, train_loss[loss=3.345, NarTop10Accuracy=0.6616, over 6291.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6467, over 5922.26 frames. ], batch size: 13, lr: 2.42e-03 +2024-08-06 14:42:55,040 INFO [trainer.py:765] (2/8) Epoch 35, batch 1800, train_loss[loss=3.19, NarTop10Accuracy=0.676, over 7169.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6463, over 5991.02 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 14:43:21,648 INFO [trainer.py:765] (2/8) Epoch 35, batch 1900, train_loss[loss=3.409, NarTop10Accuracy=0.6432, over 5831.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6455, over 6029.94 frames. ], batch size: 49, lr: 2.42e-03 +2024-08-06 14:43:47,367 INFO [trainer.py:765] (2/8) Epoch 35, batch 2000, train_loss[loss=3.493, NarTop10Accuracy=0.6217, over 6545.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6462, over 6014.54 frames. ], batch size: 49, lr: 2.42e-03 +2024-08-06 14:44:12,857 INFO [trainer.py:765] (2/8) Epoch 35, batch 2100, train_loss[loss=3.192, NarTop10Accuracy=0.6842, over 4010.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6466, over 5998.16 frames. ], batch size: 4, lr: 2.42e-03 +2024-08-06 14:44:38,389 INFO [trainer.py:765] (2/8) Epoch 35, batch 2200, train_loss[loss=3.534, NarTop10Accuracy=0.6226, over 7298.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6458, over 6047.44 frames. ], batch size: 31, lr: 2.42e-03 +2024-08-06 14:44:47,200 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 14:44:57,441 INFO [trainer.py:811] (2/8) Epoch 35, validation: loss=3.219, NarTop10Accuracy=0.6773, over 1907754.00 frames. +2024-08-06 14:44:57,442 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 14:44:57,973 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.083e+02 2.237e+02 2.412e+02 3.944e+02, threshold=4.474e+02, percent-clipped=0.0 +2024-08-06 14:45:14,099 INFO [trainer.py:765] (2/8) Epoch 35, batch 2300, train_loss[loss=3.111, NarTop10Accuracy=0.6949, over 5756.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6441, over 6070.50 frames. ], batch size: 9, lr: 2.41e-03 +2024-08-06 14:45:38,819 INFO [trainer.py:765] (2/8) Epoch 35, batch 2400, train_loss[loss=3.086, NarTop10Accuracy=0.6912, over 5828.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6441, over 5881.43 frames. ], batch size: 8, lr: 2.41e-03 +2024-08-06 14:46:02,146 INFO [trainer.py:765] (2/8) Epoch 35, batch 2500, train_loss[loss=3.36, NarTop10Accuracy=0.6452, over 4127.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6496, over 5530.29 frames. ], batch size: 5, lr: 2.41e-03 +2024-08-06 14:46:23,387 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 14:47:25,441 INFO [trainer.py:765] (2/8) Epoch 36, batch 100, train_loss[loss=3.23, NarTop10Accuracy=0.6751, over 6967.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.6594, over 2369.74 frames. ], batch size: 30, lr: 2.38e-03 +2024-08-06 14:47:58,357 INFO [trainer.py:765] (2/8) Epoch 36, batch 200, train_loss[loss=3.205, NarTop10Accuracy=0.6667, over 6866.00 frames. ], tot_loss[loss=3.284, NarTop10Accuracy=0.6626, over 3877.69 frames. ], batch size: 17, lr: 2.37e-03 +2024-08-06 14:48:30,724 INFO [trainer.py:765] (2/8) Epoch 36, batch 300, train_loss[loss=3.182, NarTop10Accuracy=0.687, over 7333.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.6627, over 4676.60 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 14:49:04,814 INFO [trainer.py:765] (2/8) Epoch 36, batch 400, train_loss[loss=2.971, NarTop10Accuracy=0.7224, over 5046.00 frames. ], tot_loss[loss=3.279, NarTop10Accuracy=0.6632, over 5152.63 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 14:49:36,588 INFO [trainer.py:765] (2/8) Epoch 36, batch 500, train_loss[loss=3.496, NarTop10Accuracy=0.6207, over 6090.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6622, over 5433.61 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 14:50:09,654 INFO [trainer.py:765] (2/8) Epoch 36, batch 600, train_loss[loss=3.241, NarTop10Accuracy=0.6785, over 5745.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6582, over 5699.50 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 14:50:46,513 INFO [trainer.py:765] (2/8) Epoch 36, batch 700, train_loss[loss=3.275, NarTop10Accuracy=0.6722, over 5169.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6575, over 5756.21 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:23,702 INFO [trainer.py:765] (2/8) Epoch 36, batch 800, train_loss[loss=3.376, NarTop10Accuracy=0.638, over 5016.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6569, over 5803.65 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:54,346 INFO [trainer.py:765] (2/8) Epoch 36, batch 900, train_loss[loss=3.213, NarTop10Accuracy=0.6772, over 6388.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6571, over 5816.32 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 14:52:30,323 INFO [trainer.py:765] (2/8) Epoch 36, batch 1000, train_loss[loss=3.256, NarTop10Accuracy=0.6609, over 6258.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6549, over 5910.19 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 14:53:06,863 INFO [trainer.py:765] (2/8) Epoch 36, batch 1100, train_loss[loss=2.974, NarTop10Accuracy=0.7194, over 6854.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6506, over 5949.68 frames. ], batch size: 17, lr: 2.36e-03 +2024-08-06 14:53:40,248 INFO [trainer.py:765] (2/8) Epoch 36, batch 1200, train_loss[loss=3.427, NarTop10Accuracy=0.6409, over 7380.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6529, over 5945.49 frames. ], batch size: 31, lr: 2.36e-03 +2024-08-06 14:54:15,855 INFO [trainer.py:765] (2/8) Epoch 36, batch 1300, train_loss[loss=2.989, NarTop10Accuracy=0.7198, over 5004.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6516, over 6008.08 frames. ], batch size: 6, lr: 2.36e-03 +2024-08-06 14:54:51,540 INFO [trainer.py:765] (2/8) Epoch 36, batch 1400, train_loss[loss=3.366, NarTop10Accuracy=0.6448, over 6169.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6496, over 6029.11 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 14:55:21,802 INFO [trainer.py:765] (2/8) Epoch 36, batch 1500, train_loss[loss=3.603, NarTop10Accuracy=0.5922, over 6330.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6472, over 5973.20 frames. ], batch size: 53, lr: 2.36e-03 +2024-08-06 14:55:49,902 INFO [trainer.py:765] (2/8) Epoch 36, batch 1600, train_loss[loss=3.307, NarTop10Accuracy=0.6554, over 7195.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6474, over 5955.32 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 14:56:04,131 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 14:56:14,600 INFO [trainer.py:811] (2/8) Epoch 36, validation: loss=3.22, NarTop10Accuracy=0.6784, over 1907754.00 frames. +2024-08-06 14:56:14,601 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 14:56:15,103 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.063e+02 2.224e+02 2.398e+02 5.290e+02, threshold=4.447e+02, percent-clipped=0.1 +2024-08-06 14:56:27,178 INFO [trainer.py:765] (2/8) Epoch 36, batch 1700, train_loss[loss=3.098, NarTop10Accuracy=0.7054, over 6169.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6498, over 5932.20 frames. ], batch size: 13, lr: 2.35e-03 +2024-08-06 14:56:53,759 INFO [trainer.py:765] (2/8) Epoch 36, batch 1800, train_loss[loss=3.485, NarTop10Accuracy=0.6171, over 7225.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6482, over 6006.38 frames. ], batch size: 22, lr: 2.35e-03 +2024-08-06 14:57:20,336 INFO [trainer.py:765] (2/8) Epoch 36, batch 1900, train_loss[loss=3.535, NarTop10Accuracy=0.6158, over 6095.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6483, over 6053.86 frames. ], batch size: 49, lr: 2.35e-03 +2024-08-06 14:57:46,056 INFO [trainer.py:765] (2/8) Epoch 36, batch 2000, train_loss[loss=3.64, NarTop10Accuracy=0.587, over 5738.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6458, over 6033.85 frames. ], batch size: 49, lr: 2.35e-03 +2024-08-06 14:58:11,404 INFO [trainer.py:765] (2/8) Epoch 36, batch 2100, train_loss[loss=3.033, NarTop10Accuracy=0.7027, over 4875.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6461, over 6026.45 frames. ], batch size: 5, lr: 2.35e-03 +2024-08-06 14:58:36,832 INFO [trainer.py:765] (2/8) Epoch 36, batch 2200, train_loss[loss=3.716, NarTop10Accuracy=0.5719, over 7288.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.644, over 6050.11 frames. ], batch size: 31, lr: 2.35e-03 +2024-08-06 14:59:02,344 INFO [trainer.py:765] (2/8) Epoch 36, batch 2300, train_loss[loss=3.296, NarTop10Accuracy=0.6546, over 5951.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6424, over 6069.40 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 14:59:27,094 INFO [trainer.py:765] (2/8) Epoch 36, batch 2400, train_loss[loss=3.432, NarTop10Accuracy=0.6305, over 5235.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6437, over 5883.71 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 14:59:50,503 INFO [trainer.py:765] (2/8) Epoch 36, batch 2500, train_loss[loss=3.403, NarTop10Accuracy=0.6399, over 5049.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.649, over 5526.79 frames. ], batch size: 6, lr: 2.34e-03 +2024-08-06 15:00:11,383 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 15:01:14,218 INFO [trainer.py:765] (2/8) Epoch 37, batch 100, train_loss[loss=3.277, NarTop10Accuracy=0.6697, over 7159.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6644, over 2370.95 frames. ], batch size: 30, lr: 2.31e-03 +2024-08-06 15:01:44,097 INFO [trainer.py:765] (2/8) Epoch 37, batch 200, train_loss[loss=3.167, NarTop10Accuracy=0.6936, over 6824.00 frames. ], tot_loss[loss=3.275, NarTop10Accuracy=0.6649, over 3875.30 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 15:02:17,382 INFO [trainer.py:765] (2/8) Epoch 37, batch 300, train_loss[loss=3.202, NarTop10Accuracy=0.6838, over 7154.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6626, over 4685.71 frames. ], batch size: 22, lr: 2.31e-03 +2024-08-06 15:02:48,346 INFO [trainer.py:765] (2/8) Epoch 37, batch 400, train_loss[loss=3.197, NarTop10Accuracy=0.6821, over 5092.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6599, over 5135.12 frames. ], batch size: 7, lr: 2.31e-03 +2024-08-06 15:03:26,570 INFO [trainer.py:765] (2/8) Epoch 37, batch 500, train_loss[loss=3.204, NarTop10Accuracy=0.6844, over 6219.00 frames. ], tot_loss[loss=3.29, NarTop10Accuracy=0.6612, over 5418.17 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 15:03:58,032 INFO [trainer.py:765] (2/8) Epoch 37, batch 600, train_loss[loss=3.293, NarTop10Accuracy=0.6674, over 5771.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6598, over 5687.60 frames. ], batch size: 9, lr: 2.30e-03 +2024-08-06 15:04:30,247 INFO [trainer.py:765] (2/8) Epoch 37, batch 700, train_loss[loss=2.939, NarTop10Accuracy=0.7305, over 4994.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6573, over 5754.67 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:12,163 INFO [trainer.py:765] (2/8) Epoch 37, batch 800, train_loss[loss=3.433, NarTop10Accuracy=0.6384, over 5119.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6552, over 5818.71 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:40,606 INFO [trainer.py:765] (2/8) Epoch 37, batch 900, train_loss[loss=3.186, NarTop10Accuracy=0.6777, over 6400.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6541, over 5833.22 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 15:06:15,608 INFO [trainer.py:765] (2/8) Epoch 37, batch 1000, train_loss[loss=3.159, NarTop10Accuracy=0.6886, over 6176.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6524, over 5929.98 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 15:06:42,491 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 15:06:53,168 INFO [trainer.py:811] (2/8) Epoch 37, validation: loss=3.234, NarTop10Accuracy=0.6744, over 1907754.00 frames. +2024-08-06 15:06:53,170 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 15:06:53,809 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.068e+02 2.238e+02 2.409e+02 6.392e+02, threshold=4.475e+02, percent-clipped=0.1 +2024-08-06 15:07:01,306 INFO [trainer.py:765] (2/8) Epoch 37, batch 1100, train_loss[loss=3.577, NarTop10Accuracy=0.6076, over 6888.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6526, over 5957.11 frames. ], batch size: 17, lr: 2.30e-03 +2024-08-06 15:07:32,718 INFO [trainer.py:765] (2/8) Epoch 37, batch 1200, train_loss[loss=3.296, NarTop10Accuracy=0.6594, over 7216.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6533, over 5955.84 frames. ], batch size: 30, lr: 2.30e-03 +2024-08-06 15:08:04,777 INFO [trainer.py:765] (2/8) Epoch 37, batch 1300, train_loss[loss=3.694, NarTop10Accuracy=0.5839, over 5166.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6512, over 6021.41 frames. ], batch size: 6, lr: 2.29e-03 +2024-08-06 15:08:47,879 INFO [trainer.py:765] (2/8) Epoch 37, batch 1400, train_loss[loss=3.163, NarTop10Accuracy=0.6911, over 6030.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6484, over 6042.13 frames. ], batch size: 11, lr: 2.29e-03 +2024-08-06 15:09:16,180 INFO [trainer.py:765] (2/8) Epoch 37, batch 1500, train_loss[loss=3.629, NarTop10Accuracy=0.5933, over 6006.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6479, over 5963.13 frames. ], batch size: 51, lr: 2.29e-03 +2024-08-06 15:09:44,190 INFO [trainer.py:765] (2/8) Epoch 37, batch 1600, train_loss[loss=3.354, NarTop10Accuracy=0.6383, over 7109.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6478, over 5960.09 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:10:11,082 INFO [trainer.py:765] (2/8) Epoch 37, batch 1700, train_loss[loss=3.279, NarTop10Accuracy=0.6679, over 6080.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6492, over 5948.43 frames. ], batch size: 13, lr: 2.29e-03 +2024-08-06 15:10:37,752 INFO [trainer.py:765] (2/8) Epoch 37, batch 1800, train_loss[loss=3.412, NarTop10Accuracy=0.6307, over 7215.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6511, over 6012.18 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:11:04,270 INFO [trainer.py:765] (2/8) Epoch 37, batch 1900, train_loss[loss=3.531, NarTop10Accuracy=0.6082, over 6088.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.647, over 6047.32 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:11:29,941 INFO [trainer.py:765] (2/8) Epoch 37, batch 2000, train_loss[loss=3.584, NarTop10Accuracy=0.5923, over 6298.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6473, over 6024.03 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:11:58,797 INFO [trainer.py:765] (2/8) Epoch 37, batch 2100, train_loss[loss=3.294, NarTop10Accuracy=0.6773, over 3844.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6487, over 6004.21 frames. ], batch size: 4, lr: 2.29e-03 +2024-08-06 15:12:24,312 INFO [trainer.py:765] (2/8) Epoch 37, batch 2200, train_loss[loss=3.271, NarTop10Accuracy=0.6668, over 7058.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6481, over 6047.43 frames. ], batch size: 30, lr: 2.28e-03 +2024-08-06 15:12:49,786 INFO [trainer.py:765] (2/8) Epoch 37, batch 2300, train_loss[loss=3.282, NarTop10Accuracy=0.6543, over 5802.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6477, over 6062.61 frames. ], batch size: 9, lr: 2.28e-03 +2024-08-06 15:13:14,526 INFO [trainer.py:765] (2/8) Epoch 37, batch 2400, train_loss[loss=3.17, NarTop10Accuracy=0.6772, over 5139.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6465, over 5876.37 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 15:13:37,942 INFO [trainer.py:765] (2/8) Epoch 37, batch 2500, train_loss[loss=3.255, NarTop10Accuracy=0.6623, over 5203.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6496, over 5560.49 frames. ], batch size: 6, lr: 2.28e-03 +2024-08-06 15:13:59,146 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 15:14:50,846 INFO [trainer.py:765] (2/8) Epoch 38, batch 100, train_loss[loss=3.616, NarTop10Accuracy=0.6026, over 7423.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6563, over 2375.77 frames. ], batch size: 31, lr: 2.25e-03 +2024-08-06 15:15:27,287 INFO [trainer.py:765] (2/8) Epoch 38, batch 200, train_loss[loss=3.299, NarTop10Accuracy=0.663, over 6890.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6603, over 3878.28 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 15:16:01,280 INFO [trainer.py:765] (2/8) Epoch 38, batch 300, train_loss[loss=3.385, NarTop10Accuracy=0.6491, over 7179.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6638, over 4679.67 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 15:16:32,595 INFO [trainer.py:765] (2/8) Epoch 38, batch 400, train_loss[loss=3.044, NarTop10Accuracy=0.7124, over 5223.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.6627, over 5135.23 frames. ], batch size: 7, lr: 2.24e-03 +2024-08-06 15:17:04,257 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 15:17:14,104 INFO [trainer.py:811] (2/8) Epoch 38, validation: loss=3.229, NarTop10Accuracy=0.6755, over 1907754.00 frames. +2024-08-06 15:17:14,105 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 15:17:14,630 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.062e+02 2.214e+02 2.396e+02 3.845e+02, threshold=4.429e+02, percent-clipped=0.0 +2024-08-06 15:17:16,480 INFO [trainer.py:765] (2/8) Epoch 38, batch 500, train_loss[loss=3.37, NarTop10Accuracy=0.6464, over 6190.00 frames. ], tot_loss[loss=3.282, NarTop10Accuracy=0.6628, over 5405.93 frames. ], batch size: 11, lr: 2.24e-03 +2024-08-06 15:17:53,875 INFO [trainer.py:765] (2/8) Epoch 38, batch 600, train_loss[loss=3.192, NarTop10Accuracy=0.6887, over 5828.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6605, over 5677.70 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 15:18:26,466 INFO [trainer.py:765] (2/8) Epoch 38, batch 700, train_loss[loss=3.078, NarTop10Accuracy=0.7081, over 4927.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6581, over 5751.03 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:01,129 INFO [trainer.py:765] (2/8) Epoch 38, batch 800, train_loss[loss=3.175, NarTop10Accuracy=0.6829, over 5109.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6571, over 5808.89 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:36,540 INFO [trainer.py:765] (2/8) Epoch 38, batch 900, train_loss[loss=3.532, NarTop10Accuracy=0.6148, over 6277.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.6575, over 5825.76 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 15:20:09,134 INFO [trainer.py:765] (2/8) Epoch 38, batch 1000, train_loss[loss=3.392, NarTop10Accuracy=0.6443, over 6652.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6549, over 5924.23 frames. ], batch size: 14, lr: 2.24e-03 +2024-08-06 15:20:47,346 INFO [trainer.py:765] (2/8) Epoch 38, batch 1100, train_loss[loss=3.527, NarTop10Accuracy=0.6188, over 6872.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.653, over 5962.89 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 15:21:25,594 INFO [trainer.py:765] (2/8) Epoch 38, batch 1200, train_loss[loss=3.511, NarTop10Accuracy=0.6156, over 7627.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6512, over 5972.35 frames. ], batch size: 30, lr: 2.23e-03 +2024-08-06 15:21:57,556 INFO [trainer.py:765] (2/8) Epoch 38, batch 1300, train_loss[loss=3.201, NarTop10Accuracy=0.6769, over 5141.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6529, over 6035.70 frames. ], batch size: 6, lr: 2.23e-03 +2024-08-06 15:22:29,468 INFO [trainer.py:765] (2/8) Epoch 38, batch 1400, train_loss[loss=3.179, NarTop10Accuracy=0.6825, over 6192.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6497, over 6051.09 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 15:23:06,615 INFO [trainer.py:765] (2/8) Epoch 38, batch 1500, train_loss[loss=3.423, NarTop10Accuracy=0.6332, over 5476.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6465, over 5977.27 frames. ], batch size: 49, lr: 2.23e-03 +2024-08-06 15:23:34,640 INFO [trainer.py:765] (2/8) Epoch 38, batch 1600, train_loss[loss=3.301, NarTop10Accuracy=0.6568, over 7120.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6461, over 5959.03 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:01,433 INFO [trainer.py:765] (2/8) Epoch 38, batch 1700, train_loss[loss=3.22, NarTop10Accuracy=0.6696, over 6239.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6464, over 5941.46 frames. ], batch size: 13, lr: 2.23e-03 +2024-08-06 15:24:28,065 INFO [trainer.py:765] (2/8) Epoch 38, batch 1800, train_loss[loss=3.284, NarTop10Accuracy=0.6602, over 7149.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6459, over 6003.62 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:54,673 INFO [trainer.py:765] (2/8) Epoch 38, batch 1900, train_loss[loss=3.314, NarTop10Accuracy=0.66, over 5780.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6454, over 6051.74 frames. ], batch size: 49, lr: 2.23e-03 +2024-08-06 15:25:20,410 INFO [trainer.py:765] (2/8) Epoch 38, batch 2000, train_loss[loss=3.456, NarTop10Accuracy=0.6377, over 6086.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6473, over 6019.19 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 15:25:45,857 INFO [trainer.py:765] (2/8) Epoch 38, batch 2100, train_loss[loss=3.445, NarTop10Accuracy=0.6351, over 3967.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6475, over 5988.84 frames. ], batch size: 4, lr: 2.22e-03 +2024-08-06 15:26:11,316 INFO [trainer.py:765] (2/8) Epoch 38, batch 2200, train_loss[loss=3.63, NarTop10Accuracy=0.5885, over 7162.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6481, over 6039.70 frames. ], batch size: 30, lr: 2.22e-03 +2024-08-06 15:26:36,708 INFO [trainer.py:765] (2/8) Epoch 38, batch 2300, train_loss[loss=3.356, NarTop10Accuracy=0.6604, over 5882.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6457, over 6061.05 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 15:27:01,480 INFO [trainer.py:765] (2/8) Epoch 38, batch 2400, train_loss[loss=3.353, NarTop10Accuracy=0.6596, over 5086.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6437, over 5870.46 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 15:27:23,144 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 15:27:33,589 INFO [trainer.py:811] (2/8) Epoch 38, validation: loss=3.213, NarTop10Accuracy=0.6782, over 1907754.00 frames. +2024-08-06 15:27:33,590 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 15:27:34,075 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.098e+02 2.247e+02 2.437e+02 3.550e+02, threshold=4.494e+02, percent-clipped=0.0 +2024-08-06 15:27:35,514 INFO [trainer.py:765] (2/8) Epoch 38, batch 2500, train_loss[loss=3.405, NarTop10Accuracy=0.6496, over 5188.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6503, over 5545.19 frames. ], batch size: 6, lr: 2.22e-03 +2024-08-06 15:27:56,726 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 15:28:51,227 INFO [trainer.py:765] (2/8) Epoch 39, batch 100, train_loss[loss=3.281, NarTop10Accuracy=0.6554, over 7272.00 frames. ], tot_loss[loss=3.276, NarTop10Accuracy=0.6634, over 2388.01 frames. ], batch size: 31, lr: 2.19e-03 +2024-08-06 15:29:28,052 INFO [trainer.py:765] (2/8) Epoch 39, batch 200, train_loss[loss=3.358, NarTop10Accuracy=0.6357, over 6978.00 frames. ], tot_loss[loss=3.275, NarTop10Accuracy=0.6637, over 3872.83 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 15:30:02,017 INFO [trainer.py:765] (2/8) Epoch 39, batch 300, train_loss[loss=3.213, NarTop10Accuracy=0.6736, over 7138.00 frames. ], tot_loss[loss=3.276, NarTop10Accuracy=0.6629, over 4685.21 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 15:30:32,992 INFO [trainer.py:765] (2/8) Epoch 39, batch 400, train_loss[loss=2.952, NarTop10Accuracy=0.7172, over 4998.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6605, over 5136.50 frames. ], batch size: 7, lr: 2.19e-03 +2024-08-06 15:31:03,569 INFO [trainer.py:765] (2/8) Epoch 39, batch 500, train_loss[loss=3.136, NarTop10Accuracy=0.6978, over 6089.00 frames. ], tot_loss[loss=3.29, NarTop10Accuracy=0.6606, over 5407.40 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 15:31:40,850 INFO [trainer.py:765] (2/8) Epoch 39, batch 600, train_loss[loss=3.299, NarTop10Accuracy=0.6509, over 5779.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6596, over 5680.22 frames. ], batch size: 9, lr: 2.18e-03 +2024-08-06 15:32:14,451 INFO [trainer.py:765] (2/8) Epoch 39, batch 700, train_loss[loss=3.272, NarTop10Accuracy=0.6667, over 4239.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6587, over 5735.10 frames. ], batch size: 5, lr: 2.18e-03 +2024-08-06 15:32:44,166 INFO [trainer.py:765] (2/8) Epoch 39, batch 800, train_loss[loss=3.266, NarTop10Accuracy=0.6645, over 5049.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.6586, over 5801.51 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:33:21,117 INFO [trainer.py:765] (2/8) Epoch 39, batch 900, train_loss[loss=3.146, NarTop10Accuracy=0.687, over 6185.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6574, over 5827.46 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 15:34:02,655 INFO [trainer.py:765] (2/8) Epoch 39, batch 1000, train_loss[loss=3.109, NarTop10Accuracy=0.6929, over 6203.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6569, over 5932.33 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 15:34:33,094 INFO [trainer.py:765] (2/8) Epoch 39, batch 1100, train_loss[loss=3.262, NarTop10Accuracy=0.6597, over 6852.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6554, over 5962.98 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 15:35:09,244 INFO [trainer.py:765] (2/8) Epoch 39, batch 1200, train_loss[loss=3.228, NarTop10Accuracy=0.6684, over 7512.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6558, over 5960.16 frames. ], batch size: 31, lr: 2.18e-03 +2024-08-06 15:35:46,813 INFO [trainer.py:765] (2/8) Epoch 39, batch 1300, train_loss[loss=3.496, NarTop10Accuracy=0.6245, over 5092.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6545, over 6014.65 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:36:18,851 INFO [trainer.py:765] (2/8) Epoch 39, batch 1400, train_loss[loss=3.292, NarTop10Accuracy=0.6703, over 6103.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6554, over 6020.44 frames. ], batch size: 11, lr: 2.17e-03 +2024-08-06 15:36:47,214 INFO [trainer.py:765] (2/8) Epoch 39, batch 1500, train_loss[loss=3.357, NarTop10Accuracy=0.6469, over 5868.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6503, over 5965.46 frames. ], batch size: 49, lr: 2.17e-03 +2024-08-06 15:37:15,216 INFO [trainer.py:765] (2/8) Epoch 39, batch 1600, train_loss[loss=3.185, NarTop10Accuracy=0.6856, over 6966.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.65, over 5954.33 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:37:41,883 INFO [trainer.py:765] (2/8) Epoch 39, batch 1700, train_loss[loss=3.203, NarTop10Accuracy=0.6665, over 6743.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6486, over 5928.65 frames. ], batch size: 14, lr: 2.17e-03 +2024-08-06 15:38:08,510 INFO [trainer.py:765] (2/8) Epoch 39, batch 1800, train_loss[loss=3.162, NarTop10Accuracy=0.6869, over 7099.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6488, over 6004.48 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:38:35,253 INFO [trainer.py:765] (2/8) Epoch 39, batch 1900, train_loss[loss=3.488, NarTop10Accuracy=0.6264, over 6191.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6443, over 6049.45 frames. ], batch size: 48, lr: 2.17e-03 +2024-08-06 15:38:37,990 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 15:38:48,262 INFO [trainer.py:811] (2/8) Epoch 39, validation: loss=3.177, NarTop10Accuracy=0.6866, over 1907754.00 frames. +2024-08-06 15:38:48,263 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 15:38:48,768 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.106e+02 2.266e+02 2.462e+02 4.274e+02, threshold=4.532e+02, percent-clipped=0.0 +2024-08-06 15:39:11,226 INFO [trainer.py:765] (2/8) Epoch 39, batch 2000, train_loss[loss=3.352, NarTop10Accuracy=0.6406, over 6092.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6489, over 6005.27 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 15:39:36,692 INFO [trainer.py:765] (2/8) Epoch 39, batch 2100, train_loss[loss=3.611, NarTop10Accuracy=0.5883, over 3902.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.65, over 5974.19 frames. ], batch size: 4, lr: 2.17e-03 +2024-08-06 15:40:02,086 INFO [trainer.py:765] (2/8) Epoch 39, batch 2200, train_loss[loss=3.587, NarTop10Accuracy=0.6019, over 7303.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6505, over 6020.48 frames. ], batch size: 31, lr: 2.17e-03 +2024-08-06 15:40:27,496 INFO [trainer.py:765] (2/8) Epoch 39, batch 2300, train_loss[loss=3.135, NarTop10Accuracy=0.6868, over 5821.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6505, over 6063.04 frames. ], batch size: 9, lr: 2.16e-03 +2024-08-06 15:40:52,331 INFO [trainer.py:765] (2/8) Epoch 39, batch 2400, train_loss[loss=3.391, NarTop10Accuracy=0.6509, over 5064.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6496, over 5878.94 frames. ], batch size: 7, lr: 2.16e-03 +2024-08-06 15:41:15,695 INFO [trainer.py:765] (2/8) Epoch 39, batch 2500, train_loss[loss=3.767, NarTop10Accuracy=0.5728, over 5093.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6527, over 5537.62 frames. ], batch size: 6, lr: 2.16e-03 +2024-08-06 15:41:36,910 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 15:42:35,254 INFO [trainer.py:765] (2/8) Epoch 40, batch 100, train_loss[loss=3.559, NarTop10Accuracy=0.5999, over 6889.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6587, over 2382.94 frames. ], batch size: 30, lr: 2.13e-03 +2024-08-06 15:43:09,645 INFO [trainer.py:765] (2/8) Epoch 40, batch 200, train_loss[loss=3.406, NarTop10Accuracy=0.6307, over 6665.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6629, over 3882.77 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 15:43:43,738 INFO [trainer.py:765] (2/8) Epoch 40, batch 300, train_loss[loss=3.328, NarTop10Accuracy=0.6541, over 7052.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6623, over 4683.13 frames. ], batch size: 22, lr: 2.13e-03 +2024-08-06 15:44:18,202 INFO [trainer.py:765] (2/8) Epoch 40, batch 400, train_loss[loss=3.215, NarTop10Accuracy=0.6739, over 5141.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6628, over 5133.78 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 15:44:50,257 INFO [trainer.py:765] (2/8) Epoch 40, batch 500, train_loss[loss=3.283, NarTop10Accuracy=0.657, over 6125.00 frames. ], tot_loss[loss=3.273, NarTop10Accuracy=0.6646, over 5406.50 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 15:45:25,431 INFO [trainer.py:765] (2/8) Epoch 40, batch 600, train_loss[loss=3.632, NarTop10Accuracy=0.5839, over 5856.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6606, over 5669.31 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 15:45:58,647 INFO [trainer.py:765] (2/8) Epoch 40, batch 700, train_loss[loss=3.361, NarTop10Accuracy=0.6392, over 5020.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6573, over 5719.59 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 15:46:34,887 INFO [trainer.py:765] (2/8) Epoch 40, batch 800, train_loss[loss=3.126, NarTop10Accuracy=0.698, over 5148.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6559, over 5791.58 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 15:47:07,290 INFO [trainer.py:765] (2/8) Epoch 40, batch 900, train_loss[loss=3.137, NarTop10Accuracy=0.7042, over 6326.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6563, over 5801.17 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:47:43,510 INFO [trainer.py:765] (2/8) Epoch 40, batch 1000, train_loss[loss=3.595, NarTop10Accuracy=0.5953, over 6216.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6551, over 5921.31 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:48:18,710 INFO [trainer.py:765] (2/8) Epoch 40, batch 1100, train_loss[loss=3.415, NarTop10Accuracy=0.6248, over 6876.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6542, over 5948.47 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 15:48:52,094 INFO [trainer.py:765] (2/8) Epoch 40, batch 1200, train_loss[loss=3.424, NarTop10Accuracy=0.6304, over 7043.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6545, over 5947.39 frames. ], batch size: 30, lr: 2.12e-03 +2024-08-06 15:49:29,783 INFO [trainer.py:765] (2/8) Epoch 40, batch 1300, train_loss[loss=3.334, NarTop10Accuracy=0.6487, over 5093.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6542, over 6016.00 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 15:49:38,245 INFO [trainer.py:803] (2/8) Computing validation loss +2024-08-06 15:49:48,934 INFO [trainer.py:811] (2/8) Epoch 40, validation: loss=3.171, NarTop10Accuracy=0.6871, over 1907754.00 frames. +2024-08-06 15:49:48,935 INFO [trainer.py:814] (2/8) Maximum memory allocated so far is 29813MB +2024-08-06 15:49:49,615 INFO [optim.py:386] (2/8) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.095e+02 2.264e+02 2.441e+02 4.960e+02, threshold=4.528e+02, percent-clipped=0.1 +2024-08-06 15:50:12,460 INFO [trainer.py:765] (2/8) Epoch 40, batch 1400, train_loss[loss=3.129, NarTop10Accuracy=0.6899, over 6064.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6518, over 6043.44 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 15:50:45,930 INFO [trainer.py:765] (2/8) Epoch 40, batch 1500, train_loss[loss=3.529, NarTop10Accuracy=0.61, over 6258.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6534, over 5985.98 frames. ], batch size: 49, lr: 2.12e-03 +2024-08-06 15:51:13,820 INFO [trainer.py:765] (2/8) Epoch 40, batch 1600, train_loss[loss=3.366, NarTop10Accuracy=0.6568, over 6992.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6545, over 5953.70 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:51:40,571 INFO [trainer.py:765] (2/8) Epoch 40, batch 1700, train_loss[loss=3.424, NarTop10Accuracy=0.6372, over 6292.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6541, over 5934.84 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:52:07,236 INFO [trainer.py:765] (2/8) Epoch 40, batch 1800, train_loss[loss=3.552, NarTop10Accuracy=0.6094, over 7299.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6518, over 6007.79 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:52:33,820 INFO [trainer.py:765] (2/8) Epoch 40, batch 1900, train_loss[loss=3.44, NarTop10Accuracy=0.6362, over 6141.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6515, over 6047.42 frames. ], batch size: 49, lr: 2.11e-03 +2024-08-06 15:52:59,511 INFO [trainer.py:765] (2/8) Epoch 40, batch 2000, train_loss[loss=3.338, NarTop10Accuracy=0.6496, over 6358.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6518, over 5986.29 frames. ], batch size: 49, lr: 2.11e-03 +2024-08-06 15:53:24,913 INFO [trainer.py:765] (2/8) Epoch 40, batch 2100, train_loss[loss=3.301, NarTop10Accuracy=0.659, over 4899.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6515, over 5980.84 frames. ], batch size: 5, lr: 2.11e-03 +2024-08-06 15:53:50,419 INFO [trainer.py:765] (2/8) Epoch 40, batch 2200, train_loss[loss=3.286, NarTop10Accuracy=0.665, over 7071.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6522, over 6023.94 frames. ], batch size: 30, lr: 2.11e-03 +2024-08-06 15:54:15,886 INFO [trainer.py:765] (2/8) Epoch 40, batch 2300, train_loss[loss=3.23, NarTop10Accuracy=0.6704, over 5776.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6494, over 6054.93 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 15:54:43,787 INFO [trainer.py:765] (2/8) Epoch 40, batch 2400, train_loss[loss=3.507, NarTop10Accuracy=0.6342, over 5071.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6474, over 5882.20 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 15:55:07,364 INFO [trainer.py:765] (2/8) Epoch 40, batch 2500, train_loss[loss=3.209, NarTop10Accuracy=0.6776, over 4926.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6535, over 5546.50 frames. ], batch size: 6, lr: 2.11e-03 +2024-08-06 15:55:28,460 INFO [trainer.py:650] (2/8) Reaches end of dataloader. +2024-08-06 15:55:28,462 INFO [trainer.py:1069] (2/8) Done! diff --git a/libritts/log/log-train-2024-08-06-06-41-41-3 b/libritts/log/log-train-2024-08-06-06-41-41-3 new file mode 100644 index 0000000000000000000000000000000000000000..5f857728da6b06c4ac3d1fe49544b6b5fbacf2c5 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-41-41-3 @@ -0,0 +1,1260 @@ +2024-08-06 06:41:41,437 INFO [trainer.py:870] (3/8) Training started +2024-08-06 06:41:41,438 INFO [trainer.py:889] (3/8) Device: cuda:3 +2024-08-06 06:41:41,438 INFO [trainer.py:890] (3/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:41:41,439 INFO [trainer.py:892] (3/8) About to create model +2024-08-06 06:41:42,189 INFO [trainer.py:899] (3/8) Number of model parameters: 367386628 +2024-08-06 06:41:42,189 INFO [checkpoint.py:112] (3/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 06:41:43,987 INFO [trainer.py:914] (3/8) Using DDP +2024-08-06 06:41:46,902 INFO [datamodule.py:427] (3/8) About to get train cuts +2024-08-06 06:41:46,904 INFO [datamodule.py:434] (3/8) About to get dev cuts +2024-08-06 06:41:46,905 INFO [datamodule.py:292] (3/8) Disable SpecAugment +2024-08-06 06:41:46,905 INFO [datamodule.py:294] (3/8) About to create train dataset +2024-08-06 06:41:46,906 INFO [datamodule.py:323] (3/8) Using DynamicBucketingSampler +2024-08-06 06:41:47,523 INFO [datamodule.py:344] (3/8) About to create train dataloader +2024-08-06 06:41:47,523 INFO [datamodule.py:367] (3/8) About to create dev dataset +2024-08-06 06:41:47,856 INFO [datamodule.py:388] (3/8) About to create dev dataloader +2024-08-06 06:42:36,135 INFO [trainer.py:765] (3/8) Epoch 1, batch 100, train_loss[loss=96.66, NarTop10Accuracy=0.01557, over 7428.00 frames. ], tot_loss[loss=80.13, NarTop10Accuracy=0.05222, over 2381.78 frames. ], batch size: 31, lr: 2.25e-02 +2024-08-06 06:43:05,817 INFO [trainer.py:765] (3/8) Epoch 1, batch 200, train_loss[loss=122, NarTop10Accuracy=0.02196, over 6851.00 frames. ], tot_loss[loss=99.16, NarTop10Accuracy=0.04432, over 3878.67 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 06:43:33,849 INFO [trainer.py:765] (3/8) Epoch 1, batch 300, train_loss[loss=74.74, NarTop10Accuracy=0.02407, over 7200.00 frames. ], tot_loss[loss=87.02, NarTop10Accuracy=0.04652, over 4678.55 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 06:44:05,250 INFO [trainer.py:765] (3/8) Epoch 1, batch 400, train_loss[loss=31.75, NarTop10Accuracy=0.05369, over 5095.00 frames. ], tot_loss[loss=67.86, NarTop10Accuracy=0.04975, over 5144.26 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 06:44:33,444 INFO [trainer.py:765] (3/8) Epoch 1, batch 500, train_loss[loss=17.56, NarTop10Accuracy=0.02652, over 6216.00 frames. ], tot_loss[loss=48.74, NarTop10Accuracy=0.0551, over 5419.86 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 06:45:02,923 INFO [trainer.py:765] (3/8) Epoch 1, batch 600, train_loss[loss=6.068, NarTop10Accuracy=0.1563, over 5801.00 frames. ], tot_loss[loss=33.38, NarTop10Accuracy=0.06183, over 5680.36 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 06:45:40,481 INFO [trainer.py:765] (3/8) Epoch 1, batch 700, train_loss[loss=7.05, NarTop10Accuracy=0.1144, over 5125.00 frames. ], tot_loss[loss=23.59, NarTop10Accuracy=0.06892, over 5746.87 frames. ], batch size: 6, lr: 2.99e-02 +2024-08-06 06:46:09,664 INFO [trainer.py:765] (3/8) Epoch 1, batch 800, train_loss[loss=6.529, NarTop10Accuracy=0.1338, over 5027.00 frames. ], tot_loss[loss=17.55, NarTop10Accuracy=0.08046, over 5798.47 frames. ], batch size: 6, lr: 2.98e-02 +2024-08-06 06:46:37,732 INFO [trainer.py:765] (3/8) Epoch 1, batch 900, train_loss[loss=5.914, NarTop10Accuracy=0.1901, over 6285.00 frames. ], tot_loss[loss=13.05, NarTop10Accuracy=0.1097, over 5823.63 frames. ], batch size: 13, lr: 2.98e-02 +2024-08-06 06:47:13,909 INFO [trainer.py:765] (3/8) Epoch 1, batch 1000, train_loss[loss=5.797, NarTop10Accuracy=0.1954, over 6221.00 frames. ], tot_loss[loss=10.2, NarTop10Accuracy=0.1349, over 5913.85 frames. ], batch size: 13, lr: 2.97e-02 +2024-08-06 06:47:47,140 INFO [trainer.py:765] (3/8) Epoch 1, batch 1100, train_loss[loss=5.417, NarTop10Accuracy=0.217, over 7229.00 frames. ], tot_loss[loss=8.439, NarTop10Accuracy=0.1545, over 5944.60 frames. ], batch size: 18, lr: 2.96e-02 +2024-08-06 06:48:15,709 INFO [trainer.py:765] (3/8) Epoch 1, batch 1200, train_loss[loss=6.188, NarTop10Accuracy=0.155, over 7236.00 frames. ], tot_loss[loss=7.317, NarTop10Accuracy=0.1731, over 5959.13 frames. ], batch size: 30, lr: 2.96e-02 +2024-08-06 06:48:47,234 INFO [trainer.py:765] (3/8) Epoch 1, batch 1300, train_loss[loss=5.47, NarTop10Accuracy=0.212, over 4404.00 frames. ], tot_loss[loss=6.621, NarTop10Accuracy=0.1844, over 6025.38 frames. ], batch size: 5, lr: 2.95e-02 +2024-08-06 06:49:23,566 INFO [trainer.py:765] (3/8) Epoch 1, batch 1400, train_loss[loss=5.516, NarTop10Accuracy=0.2009, over 6195.00 frames. ], tot_loss[loss=6.201, NarTop10Accuracy=0.1913, over 6030.65 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 06:49:51,506 INFO [trainer.py:765] (3/8) Epoch 1, batch 1500, train_loss[loss=5.557, NarTop10Accuracy=0.2071, over 5661.00 frames. ], tot_loss[loss=5.933, NarTop10Accuracy=0.1983, over 5960.28 frames. ], batch size: 49, lr: 2.94e-02 +2024-08-06 06:50:19,162 INFO [trainer.py:765] (3/8) Epoch 1, batch 1600, train_loss[loss=5.624, NarTop10Accuracy=0.1886, over 7071.00 frames. ], tot_loss[loss=5.763, NarTop10Accuracy=0.203, over 5947.94 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 06:50:45,597 INFO [trainer.py:765] (3/8) Epoch 1, batch 1700, train_loss[loss=5.354, NarTop10Accuracy=0.2294, over 6665.00 frames. ], tot_loss[loss=5.641, NarTop10Accuracy=0.2089, over 5941.52 frames. ], batch size: 14, lr: 2.92e-02 +2024-08-06 06:51:11,955 INFO [trainer.py:765] (3/8) Epoch 1, batch 1800, train_loss[loss=5.561, NarTop10Accuracy=0.2006, over 7071.00 frames. ], tot_loss[loss=5.549, NarTop10Accuracy=0.2158, over 6013.89 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 06:51:38,224 INFO [trainer.py:765] (3/8) Epoch 1, batch 1900, train_loss[loss=5.612, NarTop10Accuracy=0.1892, over 5924.00 frames. ], tot_loss[loss=5.495, NarTop10Accuracy=0.2206, over 6040.46 frames. ], batch size: 49, lr: 2.90e-02 +2024-08-06 06:52:03,653 INFO [trainer.py:765] (3/8) Epoch 1, batch 2000, train_loss[loss=5.419, NarTop10Accuracy=0.2347, over 5992.00 frames. ], tot_loss[loss=5.443, NarTop10Accuracy=0.2271, over 6020.03 frames. ], batch size: 50, lr: 2.89e-02 +2024-08-06 06:52:03,654 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 06:52:13,994 INFO [trainer.py:811] (3/8) Epoch 1, validation: loss=5.351, NarTop10Accuracy=0.2423, over 1907754.00 frames. +2024-08-06 06:52:13,994 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 26412MB +2024-08-06 06:52:14,534 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 4.341e+01 2.262e+02 7.241e+02 2.074e+04 7.259e+05, threshold=1.448e+03, percent-clipped=0.0 +2024-08-06 06:52:39,586 INFO [trainer.py:765] (3/8) Epoch 1, batch 2100, train_loss[loss=5.356, NarTop10Accuracy=0.2512, over 4736.00 frames. ], tot_loss[loss=5.393, NarTop10Accuracy=0.2356, over 6004.95 frames. ], batch size: 5, lr: 2.88e-02 +2024-08-06 06:53:05,355 INFO [trainer.py:765] (3/8) Epoch 1, batch 2200, train_loss[loss=5.457, NarTop10Accuracy=0.2211, over 7214.00 frames. ], tot_loss[loss=5.363, NarTop10Accuracy=0.2397, over 6040.48 frames. ], batch size: 30, lr: 2.87e-02 +2024-08-06 06:53:30,701 INFO [trainer.py:765] (3/8) Epoch 1, batch 2300, train_loss[loss=5.34, NarTop10Accuracy=0.2323, over 5664.00 frames. ], tot_loss[loss=5.344, NarTop10Accuracy=0.243, over 6071.77 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 06:53:55,359 INFO [trainer.py:765] (3/8) Epoch 1, batch 2400, train_loss[loss=5.205, NarTop10Accuracy=0.2604, over 5039.00 frames. ], tot_loss[loss=5.31, NarTop10Accuracy=0.2502, over 5877.76 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 06:54:18,659 INFO [trainer.py:765] (3/8) Epoch 1, batch 2500, train_loss[loss=4.996, NarTop10Accuracy=0.3112, over 5151.00 frames. ], tot_loss[loss=5.262, NarTop10Accuracy=0.2589, over 5536.39 frames. ], batch size: 6, lr: 2.84e-02 +2024-08-06 06:54:39,604 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 06:55:37,936 INFO [trainer.py:765] (3/8) Epoch 2, batch 100, train_loss[loss=4.945, NarTop10Accuracy=0.3135, over 7135.00 frames. ], tot_loss[loss=5.185, NarTop10Accuracy=0.2778, over 2377.31 frames. ], batch size: 31, lr: 2.77e-02 +2024-08-06 06:56:16,405 INFO [trainer.py:765] (3/8) Epoch 2, batch 200, train_loss[loss=5.018, NarTop10Accuracy=0.3149, over 6785.00 frames. ], tot_loss[loss=5.169, NarTop10Accuracy=0.2808, over 3882.82 frames. ], batch size: 17, lr: 2.76e-02 +2024-08-06 06:56:44,973 INFO [trainer.py:765] (3/8) Epoch 2, batch 300, train_loss[loss=5.143, NarTop10Accuracy=0.29, over 7301.00 frames. ], tot_loss[loss=5.146, NarTop10Accuracy=0.2856, over 4710.73 frames. ], batch size: 22, lr: 2.75e-02 +2024-08-06 06:57:13,938 INFO [trainer.py:765] (3/8) Epoch 2, batch 400, train_loss[loss=5.533, NarTop10Accuracy=0.1992, over 5115.00 frames. ], tot_loss[loss=5.131, NarTop10Accuracy=0.2886, over 5133.46 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 06:57:56,209 INFO [trainer.py:765] (3/8) Epoch 2, batch 500, train_loss[loss=4.865, NarTop10Accuracy=0.3387, over 6160.00 frames. ], tot_loss[loss=5.101, NarTop10Accuracy=0.2948, over 5410.41 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 06:58:25,426 INFO [trainer.py:765] (3/8) Epoch 2, batch 600, train_loss[loss=5.09, NarTop10Accuracy=0.3033, over 5804.00 frames. ], tot_loss[loss=5.086, NarTop10Accuracy=0.2979, over 5701.95 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 06:58:55,282 INFO [trainer.py:765] (3/8) Epoch 2, batch 700, train_loss[loss=4.651, NarTop10Accuracy=0.3723, over 4987.00 frames. ], tot_loss[loss=5.082, NarTop10Accuracy=0.2986, over 5767.39 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 06:59:31,889 INFO [trainer.py:765] (3/8) Epoch 2, batch 800, train_loss[loss=5.118, NarTop10Accuracy=0.2886, over 5103.00 frames. ], tot_loss[loss=5.086, NarTop10Accuracy=0.2977, over 5804.66 frames. ], batch size: 6, lr: 2.69e-02 +2024-08-06 07:00:03,183 INFO [trainer.py:765] (3/8) Epoch 2, batch 900, train_loss[loss=5.403, NarTop10Accuracy=0.2242, over 6215.00 frames. ], tot_loss[loss=5.049, NarTop10Accuracy=0.3043, over 5821.60 frames. ], batch size: 13, lr: 2.68e-02 +2024-08-06 07:00:33,142 INFO [trainer.py:765] (3/8) Epoch 2, batch 1000, train_loss[loss=5.008, NarTop10Accuracy=0.3061, over 6584.00 frames. ], tot_loss[loss=5.013, NarTop10Accuracy=0.3111, over 5931.77 frames. ], batch size: 14, lr: 2.66e-02 +2024-08-06 07:01:05,573 INFO [trainer.py:765] (3/8) Epoch 2, batch 1100, train_loss[loss=4.947, NarTop10Accuracy=0.3291, over 6813.00 frames. ], tot_loss[loss=5.015, NarTop10Accuracy=0.3114, over 5956.31 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 07:01:46,285 INFO [trainer.py:765] (3/8) Epoch 2, batch 1200, train_loss[loss=4.88, NarTop10Accuracy=0.3322, over 7511.00 frames. ], tot_loss[loss=4.995, NarTop10Accuracy=0.3141, over 5952.79 frames. ], batch size: 31, lr: 2.64e-02 +2024-08-06 07:02:15,645 INFO [trainer.py:765] (3/8) Epoch 2, batch 1300, train_loss[loss=5.385, NarTop10Accuracy=0.2204, over 5052.00 frames. ], tot_loss[loss=4.957, NarTop10Accuracy=0.3217, over 6023.34 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 07:02:45,253 INFO [trainer.py:765] (3/8) Epoch 2, batch 1400, train_loss[loss=4.669, NarTop10Accuracy=0.3881, over 6192.00 frames. ], tot_loss[loss=4.939, NarTop10Accuracy=0.3247, over 6036.98 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 07:02:50,268 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 07:03:02,094 INFO [trainer.py:811] (3/8) Epoch 2, validation: loss=4.943, NarTop10Accuracy=0.3266, over 1907754.00 frames. +2024-08-06 07:03:02,095 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 26548MB +2024-08-06 07:03:02,638 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 5.429e+01 1.166e+02 1.425e+02 1.750e+02 6.435e+02, threshold=2.851e+02, percent-clipped=0.0 +2024-08-06 07:03:25,471 INFO [trainer.py:765] (3/8) Epoch 2, batch 1500, train_loss[loss=5.041, NarTop10Accuracy=0.3091, over 6342.00 frames. ], tot_loss[loss=4.922, NarTop10Accuracy=0.3274, over 5972.09 frames. ], batch size: 48, lr: 2.60e-02 +2024-08-06 07:03:53,553 INFO [trainer.py:765] (3/8) Epoch 2, batch 1600, train_loss[loss=4.897, NarTop10Accuracy=0.3433, over 6963.00 frames. ], tot_loss[loss=4.899, NarTop10Accuracy=0.3324, over 5945.76 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 07:04:20,313 INFO [trainer.py:765] (3/8) Epoch 2, batch 1700, train_loss[loss=4.941, NarTop10Accuracy=0.3314, over 6244.00 frames. ], tot_loss[loss=4.895, NarTop10Accuracy=0.3341, over 5928.40 frames. ], batch size: 13, lr: 2.58e-02 +2024-08-06 07:04:46,888 INFO [trainer.py:765] (3/8) Epoch 2, batch 1800, train_loss[loss=4.787, NarTop10Accuracy=0.3621, over 7043.00 frames. ], tot_loss[loss=4.881, NarTop10Accuracy=0.3364, over 6000.95 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 07:05:13,586 INFO [trainer.py:765] (3/8) Epoch 2, batch 1900, train_loss[loss=4.881, NarTop10Accuracy=0.34, over 5822.00 frames. ], tot_loss[loss=4.857, NarTop10Accuracy=0.3416, over 6029.73 frames. ], batch size: 49, lr: 2.55e-02 +2024-08-06 07:05:39,286 INFO [trainer.py:765] (3/8) Epoch 2, batch 2000, train_loss[loss=4.883, NarTop10Accuracy=0.3358, over 6064.00 frames. ], tot_loss[loss=4.837, NarTop10Accuracy=0.3455, over 6011.12 frames. ], batch size: 48, lr: 2.54e-02 +2024-08-06 07:06:04,829 INFO [trainer.py:765] (3/8) Epoch 2, batch 2100, train_loss[loss=4.532, NarTop10Accuracy=0.3858, over 4819.00 frames. ], tot_loss[loss=4.841, NarTop10Accuracy=0.345, over 6024.76 frames. ], batch size: 5, lr: 2.52e-02 +2024-08-06 07:06:30,373 INFO [trainer.py:765] (3/8) Epoch 2, batch 2200, train_loss[loss=4.642, NarTop10Accuracy=0.3867, over 7269.00 frames. ], tot_loss[loss=4.801, NarTop10Accuracy=0.353, over 6059.12 frames. ], batch size: 31, lr: 2.51e-02 +2024-08-06 07:06:55,875 INFO [trainer.py:765] (3/8) Epoch 2, batch 2300, train_loss[loss=4.445, NarTop10Accuracy=0.4219, over 5820.00 frames. ], tot_loss[loss=4.801, NarTop10Accuracy=0.3536, over 6082.73 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 07:07:20,576 INFO [trainer.py:765] (3/8) Epoch 2, batch 2400, train_loss[loss=4.914, NarTop10Accuracy=0.3288, over 5197.00 frames. ], tot_loss[loss=4.774, NarTop10Accuracy=0.3595, over 5885.66 frames. ], batch size: 7, lr: 2.49e-02 +2024-08-06 07:07:47,111 INFO [trainer.py:765] (3/8) Epoch 2, batch 2500, train_loss[loss=4.736, NarTop10Accuracy=0.3633, over 5030.00 frames. ], tot_loss[loss=4.748, NarTop10Accuracy=0.3645, over 5527.57 frames. ], batch size: 6, lr: 2.47e-02 +2024-08-06 07:08:07,983 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 07:09:08,538 INFO [trainer.py:765] (3/8) Epoch 3, batch 100, train_loss[loss=4.934, NarTop10Accuracy=0.3321, over 7167.00 frames. ], tot_loss[loss=4.64, NarTop10Accuracy=0.3875, over 2375.75 frames. ], batch size: 31, lr: 2.35e-02 +2024-08-06 07:09:41,500 INFO [trainer.py:765] (3/8) Epoch 3, batch 200, train_loss[loss=4.368, NarTop10Accuracy=0.4485, over 6886.00 frames. ], tot_loss[loss=4.611, NarTop10Accuracy=0.3924, over 3878.92 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 07:10:16,976 INFO [trainer.py:765] (3/8) Epoch 3, batch 300, train_loss[loss=4.436, NarTop10Accuracy=0.4266, over 7062.00 frames. ], tot_loss[loss=4.602, NarTop10Accuracy=0.3938, over 4683.75 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 07:10:49,792 INFO [trainer.py:765] (3/8) Epoch 3, batch 400, train_loss[loss=4.61, NarTop10Accuracy=0.3976, over 4978.00 frames. ], tot_loss[loss=4.576, NarTop10Accuracy=0.3989, over 5148.33 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 07:11:18,179 INFO [trainer.py:765] (3/8) Epoch 3, batch 500, train_loss[loss=4.744, NarTop10Accuracy=0.3605, over 6153.00 frames. ], tot_loss[loss=4.584, NarTop10Accuracy=0.3973, over 5417.65 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 07:11:51,263 INFO [trainer.py:765] (3/8) Epoch 3, batch 600, train_loss[loss=4.428, NarTop10Accuracy=0.4307, over 5794.00 frames. ], tot_loss[loss=4.571, NarTop10Accuracy=0.4001, over 5682.66 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 07:12:32,101 INFO [trainer.py:765] (3/8) Epoch 3, batch 700, train_loss[loss=4.591, NarTop10Accuracy=0.402, over 5186.00 frames. ], tot_loss[loss=4.558, NarTop10Accuracy=0.4025, over 5755.03 frames. ], batch size: 6, lr: 2.29e-02 +2024-08-06 07:13:01,919 INFO [trainer.py:765] (3/8) Epoch 3, batch 800, train_loss[loss=4.456, NarTop10Accuracy=0.4341, over 5146.00 frames. ], tot_loss[loss=4.539, NarTop10Accuracy=0.4063, over 5787.10 frames. ], batch size: 6, lr: 2.27e-02 +2024-08-06 07:13:12,668 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 07:13:22,883 INFO [trainer.py:811] (3/8) Epoch 3, validation: loss=4.43, NarTop10Accuracy=0.4285, over 1907754.00 frames. +2024-08-06 07:13:22,884 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 27325MB +2024-08-06 07:13:23,430 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 6.823e+01 1.318e+02 1.583e+02 1.978e+02 8.364e+02, threshold=3.166e+02, percent-clipped=5.2 +2024-08-06 07:13:42,435 INFO [trainer.py:765] (3/8) Epoch 3, batch 900, train_loss[loss=4.446, NarTop10Accuracy=0.4326, over 6764.00 frames. ], tot_loss[loss=4.513, NarTop10Accuracy=0.4104, over 5819.70 frames. ], batch size: 14, lr: 2.26e-02 +2024-08-06 07:14:25,627 INFO [trainer.py:765] (3/8) Epoch 3, batch 1000, train_loss[loss=4.308, NarTop10Accuracy=0.4409, over 6228.00 frames. ], tot_loss[loss=4.493, NarTop10Accuracy=0.4138, over 5896.13 frames. ], batch size: 13, lr: 2.25e-02 +2024-08-06 07:14:56,325 INFO [trainer.py:765] (3/8) Epoch 3, batch 1100, train_loss[loss=4.404, NarTop10Accuracy=0.4253, over 6789.00 frames. ], tot_loss[loss=4.484, NarTop10Accuracy=0.4151, over 5939.80 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 07:15:29,866 INFO [trainer.py:765] (3/8) Epoch 3, batch 1200, train_loss[loss=4.359, NarTop10Accuracy=0.4392, over 7170.00 frames. ], tot_loss[loss=4.47, NarTop10Accuracy=0.418, over 5957.27 frames. ], batch size: 30, lr: 2.23e-02 +2024-08-06 07:16:12,665 INFO [trainer.py:765] (3/8) Epoch 3, batch 1300, train_loss[loss=4.302, NarTop10Accuracy=0.4707, over 5042.00 frames. ], tot_loss[loss=4.462, NarTop10Accuracy=0.42, over 6037.69 frames. ], batch size: 6, lr: 2.22e-02 +2024-08-06 07:16:42,204 INFO [trainer.py:765] (3/8) Epoch 3, batch 1400, train_loss[loss=4.222, NarTop10Accuracy=0.4613, over 6140.00 frames. ], tot_loss[loss=4.46, NarTop10Accuracy=0.4209, over 6045.55 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 07:17:10,663 INFO [trainer.py:765] (3/8) Epoch 3, batch 1500, train_loss[loss=4.652, NarTop10Accuracy=0.3781, over 6087.00 frames. ], tot_loss[loss=4.443, NarTop10Accuracy=0.4241, over 5981.77 frames. ], batch size: 49, lr: 2.20e-02 +2024-08-06 07:17:38,769 INFO [trainer.py:765] (3/8) Epoch 3, batch 1600, train_loss[loss=4.093, NarTop10Accuracy=0.4931, over 7275.00 frames. ], tot_loss[loss=4.424, NarTop10Accuracy=0.4277, over 5961.03 frames. ], batch size: 22, lr: 2.19e-02 +2024-08-06 07:18:05,505 INFO [trainer.py:765] (3/8) Epoch 3, batch 1700, train_loss[loss=4.046, NarTop10Accuracy=0.494, over 6242.00 frames. ], tot_loss[loss=4.395, NarTop10Accuracy=0.433, over 5951.58 frames. ], batch size: 13, lr: 2.18e-02 +2024-08-06 07:18:32,161 INFO [trainer.py:765] (3/8) Epoch 3, batch 1800, train_loss[loss=4.342, NarTop10Accuracy=0.4392, over 7112.00 frames. ], tot_loss[loss=4.387, NarTop10Accuracy=0.4348, over 6014.46 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 07:19:01,958 INFO [trainer.py:765] (3/8) Epoch 3, batch 1900, train_loss[loss=4.621, NarTop10Accuracy=0.3858, over 6237.00 frames. ], tot_loss[loss=4.377, NarTop10Accuracy=0.4369, over 6054.26 frames. ], batch size: 48, lr: 2.16e-02 +2024-08-06 07:19:27,622 INFO [trainer.py:765] (3/8) Epoch 3, batch 2000, train_loss[loss=4.431, NarTop10Accuracy=0.4209, over 5740.00 frames. ], tot_loss[loss=4.355, NarTop10Accuracy=0.441, over 6024.03 frames. ], batch size: 49, lr: 2.15e-02 +2024-08-06 07:19:53,070 INFO [trainer.py:765] (3/8) Epoch 3, batch 2100, train_loss[loss=4.124, NarTop10Accuracy=0.4874, over 4783.00 frames. ], tot_loss[loss=4.33, NarTop10Accuracy=0.4458, over 6011.43 frames. ], batch size: 5, lr: 2.14e-02 +2024-08-06 07:20:18,554 INFO [trainer.py:765] (3/8) Epoch 3, batch 2200, train_loss[loss=4.699, NarTop10Accuracy=0.3752, over 7022.00 frames. ], tot_loss[loss=4.314, NarTop10Accuracy=0.4491, over 6058.28 frames. ], batch size: 30, lr: 2.13e-02 +2024-08-06 07:20:44,051 INFO [trainer.py:765] (3/8) Epoch 3, batch 2300, train_loss[loss=4.04, NarTop10Accuracy=0.5024, over 5916.00 frames. ], tot_loss[loss=4.322, NarTop10Accuracy=0.4481, over 6077.71 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 07:21:08,678 INFO [trainer.py:765] (3/8) Epoch 3, batch 2400, train_loss[loss=4.094, NarTop10Accuracy=0.4964, over 5318.00 frames. ], tot_loss[loss=4.312, NarTop10Accuracy=0.4497, over 5871.56 frames. ], batch size: 7, lr: 2.11e-02 +2024-08-06 07:21:32,172 INFO [trainer.py:765] (3/8) Epoch 3, batch 2500, train_loss[loss=4.275, NarTop10Accuracy=0.4517, over 5047.00 frames. ], tot_loss[loss=4.257, NarTop10Accuracy=0.4602, over 5538.48 frames. ], batch size: 6, lr: 2.10e-02 +2024-08-06 07:21:53,112 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 07:23:00,977 INFO [trainer.py:765] (3/8) Epoch 4, batch 100, train_loss[loss=4.105, NarTop10Accuracy=0.4864, over 7208.00 frames. ], tot_loss[loss=4.211, NarTop10Accuracy=0.4711, over 2386.68 frames. ], batch size: 30, lr: 1.97e-02 +2024-08-06 07:23:33,303 INFO [trainer.py:765] (3/8) Epoch 4, batch 200, train_loss[loss=4.173, NarTop10Accuracy=0.4794, over 6958.00 frames. ], tot_loss[loss=4.183, NarTop10Accuracy=0.4764, over 3876.49 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 07:23:51,466 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 07:24:01,517 INFO [trainer.py:811] (3/8) Epoch 4, validation: loss=4.035, NarTop10Accuracy=0.5085, over 1907754.00 frames. +2024-08-06 07:24:01,517 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 27325MB +2024-08-06 07:24:02,097 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 9.910e+01 1.530e+02 1.750e+02 2.064e+02 5.317e+02, threshold=3.500e+02, percent-clipped=3.3 +2024-08-06 07:24:14,361 INFO [trainer.py:765] (3/8) Epoch 4, batch 300, train_loss[loss=4.01, NarTop10Accuracy=0.5177, over 7124.00 frames. ], tot_loss[loss=4.171, NarTop10Accuracy=0.4792, over 4685.45 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 07:24:53,596 INFO [trainer.py:765] (3/8) Epoch 4, batch 400, train_loss[loss=3.941, NarTop10Accuracy=0.5277, over 5202.00 frames. ], tot_loss[loss=4.17, NarTop10Accuracy=0.479, over 5144.21 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 07:25:25,295 INFO [trainer.py:765] (3/8) Epoch 4, batch 500, train_loss[loss=4.07, NarTop10Accuracy=0.4993, over 6178.00 frames. ], tot_loss[loss=4.163, NarTop10Accuracy=0.4804, over 5435.78 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 07:25:56,975 INFO [trainer.py:765] (3/8) Epoch 4, batch 600, train_loss[loss=4.349, NarTop10Accuracy=0.4486, over 5753.00 frames. ], tot_loss[loss=4.154, NarTop10Accuracy=0.482, over 5671.03 frames. ], batch size: 9, lr: 1.92e-02 +2024-08-06 07:26:37,607 INFO [trainer.py:765] (3/8) Epoch 4, batch 700, train_loss[loss=4.097, NarTop10Accuracy=0.5067, over 4999.00 frames. ], tot_loss[loss=4.151, NarTop10Accuracy=0.4828, over 5742.08 frames. ], batch size: 6, lr: 1.92e-02 +2024-08-06 07:27:07,433 INFO [trainer.py:765] (3/8) Epoch 4, batch 800, train_loss[loss=4.011, NarTop10Accuracy=0.5109, over 4897.00 frames. ], tot_loss[loss=4.14, NarTop10Accuracy=0.485, over 5790.80 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 07:27:42,042 INFO [trainer.py:765] (3/8) Epoch 4, batch 900, train_loss[loss=4.337, NarTop10Accuracy=0.4557, over 6205.00 frames. ], tot_loss[loss=4.115, NarTop10Accuracy=0.4897, over 5811.80 frames. ], batch size: 13, lr: 1.90e-02 +2024-08-06 07:28:20,670 INFO [trainer.py:765] (3/8) Epoch 4, batch 1000, train_loss[loss=4.027, NarTop10Accuracy=0.5033, over 6223.00 frames. ], tot_loss[loss=4.1, NarTop10Accuracy=0.4924, over 5922.77 frames. ], batch size: 13, lr: 1.89e-02 +2024-08-06 07:28:54,071 INFO [trainer.py:765] (3/8) Epoch 4, batch 1100, train_loss[loss=4.043, NarTop10Accuracy=0.5005, over 6884.00 frames. ], tot_loss[loss=4.117, NarTop10Accuracy=0.4894, over 5976.43 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 07:29:29,599 INFO [trainer.py:765] (3/8) Epoch 4, batch 1200, train_loss[loss=4.182, NarTop10Accuracy=0.4815, over 7489.00 frames. ], tot_loss[loss=4.109, NarTop10Accuracy=0.4905, over 5964.79 frames. ], batch size: 31, lr: 1.87e-02 +2024-08-06 07:30:04,991 INFO [trainer.py:765] (3/8) Epoch 4, batch 1300, train_loss[loss=4.018, NarTop10Accuracy=0.5136, over 5057.00 frames. ], tot_loss[loss=4.078, NarTop10Accuracy=0.4968, over 6044.48 frames. ], batch size: 6, lr: 1.87e-02 +2024-08-06 07:30:43,379 INFO [trainer.py:765] (3/8) Epoch 4, batch 1400, train_loss[loss=4.083, NarTop10Accuracy=0.4973, over 6183.00 frames. ], tot_loss[loss=4.076, NarTop10Accuracy=0.4973, over 6059.49 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 07:31:11,831 INFO [trainer.py:765] (3/8) Epoch 4, batch 1500, train_loss[loss=4.118, NarTop10Accuracy=0.4935, over 5927.00 frames. ], tot_loss[loss=4.069, NarTop10Accuracy=0.4988, over 5965.73 frames. ], batch size: 49, lr: 1.85e-02 +2024-08-06 07:31:39,961 INFO [trainer.py:765] (3/8) Epoch 4, batch 1600, train_loss[loss=4.189, NarTop10Accuracy=0.4784, over 7184.00 frames. ], tot_loss[loss=4.078, NarTop10Accuracy=0.4974, over 5951.97 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 07:32:06,854 INFO [trainer.py:765] (3/8) Epoch 4, batch 1700, train_loss[loss=4.605, NarTop10Accuracy=0.3983, over 6214.00 frames. ], tot_loss[loss=4.055, NarTop10Accuracy=0.5017, over 5921.29 frames. ], batch size: 13, lr: 1.84e-02 +2024-08-06 07:32:33,483 INFO [trainer.py:765] (3/8) Epoch 4, batch 1800, train_loss[loss=4.239, NarTop10Accuracy=0.4628, over 7143.00 frames. ], tot_loss[loss=4.049, NarTop10Accuracy=0.5027, over 6000.61 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 07:33:00,195 INFO [trainer.py:765] (3/8) Epoch 4, batch 1900, train_loss[loss=4.229, NarTop10Accuracy=0.4676, over 5870.00 frames. ], tot_loss[loss=4.055, NarTop10Accuracy=0.5017, over 6060.01 frames. ], batch size: 49, lr: 1.82e-02 +2024-08-06 07:33:25,990 INFO [trainer.py:765] (3/8) Epoch 4, batch 2000, train_loss[loss=4.469, NarTop10Accuracy=0.4207, over 6193.00 frames. ], tot_loss[loss=4.043, NarTop10Accuracy=0.504, over 6028.56 frames. ], batch size: 49, lr: 1.81e-02 +2024-08-06 07:33:51,512 INFO [trainer.py:765] (3/8) Epoch 4, batch 2100, train_loss[loss=3.991, NarTop10Accuracy=0.5148, over 4851.00 frames. ], tot_loss[loss=4.039, NarTop10Accuracy=0.5056, over 6004.92 frames. ], batch size: 5, lr: 1.81e-02 +2024-08-06 07:34:16,906 INFO [trainer.py:765] (3/8) Epoch 4, batch 2200, train_loss[loss=4.081, NarTop10Accuracy=0.5074, over 7336.00 frames. ], tot_loss[loss=4.037, NarTop10Accuracy=0.5064, over 6039.17 frames. ], batch size: 31, lr: 1.80e-02 +2024-08-06 07:34:31,431 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 07:34:41,462 INFO [trainer.py:811] (3/8) Epoch 4, validation: loss=3.858, NarTop10Accuracy=0.5445, over 1907754.00 frames. +2024-08-06 07:34:41,463 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 27325MB +2024-08-06 07:34:41,980 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.721e+02 1.919e+02 2.225e+02 9.682e+02, threshold=3.839e+02, percent-clipped=2.3 +2024-08-06 07:34:52,442 INFO [trainer.py:765] (3/8) Epoch 4, batch 2300, train_loss[loss=3.933, NarTop10Accuracy=0.5236, over 5670.00 frames. ], tot_loss[loss=4.034, NarTop10Accuracy=0.5069, over 6065.45 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 07:35:17,167 INFO [trainer.py:765] (3/8) Epoch 4, batch 2400, train_loss[loss=3.848, NarTop10Accuracy=0.5533, over 5170.00 frames. ], tot_loss[loss=4.03, NarTop10Accuracy=0.5082, over 5868.56 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 07:35:40,622 INFO [trainer.py:765] (3/8) Epoch 4, batch 2500, train_loss[loss=4.281, NarTop10Accuracy=0.4478, over 4977.00 frames. ], tot_loss[loss=4.012, NarTop10Accuracy=0.5114, over 5510.86 frames. ], batch size: 6, lr: 1.78e-02 +2024-08-06 07:36:01,724 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 07:37:02,524 INFO [trainer.py:765] (3/8) Epoch 5, batch 100, train_loss[loss=3.875, NarTop10Accuracy=0.5335, over 7082.00 frames. ], tot_loss[loss=3.949, NarTop10Accuracy=0.5244, over 2362.93 frames. ], batch size: 30, lr: 1.66e-02 +2024-08-06 07:37:39,815 INFO [trainer.py:765] (3/8) Epoch 5, batch 200, train_loss[loss=4.131, NarTop10Accuracy=0.4924, over 6820.00 frames. ], tot_loss[loss=3.931, NarTop10Accuracy=0.5282, over 3857.27 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 07:38:13,471 INFO [trainer.py:765] (3/8) Epoch 5, batch 300, train_loss[loss=4.134, NarTop10Accuracy=0.4751, over 6995.00 frames. ], tot_loss[loss=3.923, NarTop10Accuracy=0.5295, over 4668.00 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 07:38:42,429 INFO [trainer.py:765] (3/8) Epoch 5, batch 400, train_loss[loss=3.93, NarTop10Accuracy=0.5239, over 5173.00 frames. ], tot_loss[loss=3.916, NarTop10Accuracy=0.5311, over 5121.68 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 07:39:17,020 INFO [trainer.py:765] (3/8) Epoch 5, batch 500, train_loss[loss=3.658, NarTop10Accuracy=0.5651, over 5968.00 frames. ], tot_loss[loss=3.924, NarTop10Accuracy=0.5296, over 5406.63 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 07:39:51,943 INFO [trainer.py:765] (3/8) Epoch 5, batch 600, train_loss[loss=4.232, NarTop10Accuracy=0.4756, over 5854.00 frames. ], tot_loss[loss=3.915, NarTop10Accuracy=0.5318, over 5676.43 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 07:40:28,626 INFO [trainer.py:765] (3/8) Epoch 5, batch 700, train_loss[loss=3.771, NarTop10Accuracy=0.5651, over 4893.00 frames. ], tot_loss[loss=3.914, NarTop10Accuracy=0.5323, over 5739.96 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:02,367 INFO [trainer.py:765] (3/8) Epoch 5, batch 800, train_loss[loss=4.037, NarTop10Accuracy=0.4974, over 5198.00 frames. ], tot_loss[loss=3.92, NarTop10Accuracy=0.5307, over 5786.18 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:37,937 INFO [trainer.py:765] (3/8) Epoch 5, batch 900, train_loss[loss=4.148, NarTop10Accuracy=0.4877, over 6659.00 frames. ], tot_loss[loss=3.906, NarTop10Accuracy=0.5331, over 5805.57 frames. ], batch size: 14, lr: 1.61e-02 +2024-08-06 07:42:13,846 INFO [trainer.py:765] (3/8) Epoch 5, batch 1000, train_loss[loss=4.07, NarTop10Accuracy=0.501, over 6308.00 frames. ], tot_loss[loss=3.888, NarTop10Accuracy=0.5364, over 5903.70 frames. ], batch size: 13, lr: 1.60e-02 +2024-08-06 07:42:46,468 INFO [trainer.py:765] (3/8) Epoch 5, batch 1100, train_loss[loss=3.947, NarTop10Accuracy=0.5181, over 6766.00 frames. ], tot_loss[loss=3.893, NarTop10Accuracy=0.5355, over 5928.90 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 07:43:25,226 INFO [trainer.py:765] (3/8) Epoch 5, batch 1200, train_loss[loss=4.097, NarTop10Accuracy=0.4976, over 7610.00 frames. ], tot_loss[loss=3.896, NarTop10Accuracy=0.5349, over 5937.79 frames. ], batch size: 32, lr: 1.59e-02 +2024-08-06 07:44:00,557 INFO [trainer.py:765] (3/8) Epoch 5, batch 1300, train_loss[loss=3.757, NarTop10Accuracy=0.5617, over 4971.00 frames. ], tot_loss[loss=3.887, NarTop10Accuracy=0.5366, over 6009.77 frames. ], batch size: 6, lr: 1.59e-02 +2024-08-06 07:44:30,238 INFO [trainer.py:765] (3/8) Epoch 5, batch 1400, train_loss[loss=4.214, NarTop10Accuracy=0.4822, over 6235.00 frames. ], tot_loss[loss=3.894, NarTop10Accuracy=0.5355, over 6029.30 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 07:45:02,845 INFO [trainer.py:765] (3/8) Epoch 5, batch 1500, train_loss[loss=3.952, NarTop10Accuracy=0.5304, over 6549.00 frames. ], tot_loss[loss=3.898, NarTop10Accuracy=0.5346, over 5961.36 frames. ], batch size: 49, lr: 1.57e-02 +2024-08-06 07:45:31,008 INFO [trainer.py:765] (3/8) Epoch 5, batch 1600, train_loss[loss=3.953, NarTop10Accuracy=0.5204, over 7150.00 frames. ], tot_loss[loss=3.898, NarTop10Accuracy=0.5346, over 5946.50 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 07:45:51,058 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 07:46:01,621 INFO [trainer.py:811] (3/8) Epoch 5, validation: loss=3.749, NarTop10Accuracy=0.5672, over 1907754.00 frames. +2024-08-06 07:46:01,622 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 27642MB +2024-08-06 07:46:02,123 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.669e+02 1.884e+02 2.190e+02 6.243e+02, threshold=3.768e+02, percent-clipped=1.8 +2024-08-06 07:46:08,362 INFO [trainer.py:765] (3/8) Epoch 5, batch 1700, train_loss[loss=3.843, NarTop10Accuracy=0.5522, over 6246.00 frames. ], tot_loss[loss=3.892, NarTop10Accuracy=0.5357, over 5942.01 frames. ], batch size: 13, lr: 1.56e-02 +2024-08-06 07:46:34,967 INFO [trainer.py:765] (3/8) Epoch 5, batch 1800, train_loss[loss=3.916, NarTop10Accuracy=0.5258, over 7096.00 frames. ], tot_loss[loss=3.888, NarTop10Accuracy=0.5365, over 6003.62 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 07:47:01,489 INFO [trainer.py:765] (3/8) Epoch 5, batch 1900, train_loss[loss=3.77, NarTop10Accuracy=0.5578, over 6153.00 frames. ], tot_loss[loss=3.902, NarTop10Accuracy=0.5341, over 6043.18 frames. ], batch size: 49, lr: 1.55e-02 +2024-08-06 07:47:27,147 INFO [trainer.py:765] (3/8) Epoch 5, batch 2000, train_loss[loss=3.915, NarTop10Accuracy=0.5343, over 6012.00 frames. ], tot_loss[loss=3.892, NarTop10Accuracy=0.5357, over 6021.32 frames. ], batch size: 48, lr: 1.55e-02 +2024-08-06 07:47:52,618 INFO [trainer.py:765] (3/8) Epoch 5, batch 2100, train_loss[loss=3.907, NarTop10Accuracy=0.5422, over 3871.00 frames. ], tot_loss[loss=3.898, NarTop10Accuracy=0.5344, over 5992.96 frames. ], batch size: 4, lr: 1.54e-02 +2024-08-06 07:48:17,993 INFO [trainer.py:765] (3/8) Epoch 5, batch 2200, train_loss[loss=3.896, NarTop10Accuracy=0.5351, over 7185.00 frames. ], tot_loss[loss=3.885, NarTop10Accuracy=0.5373, over 6038.56 frames. ], batch size: 30, lr: 1.54e-02 +2024-08-06 07:48:43,421 INFO [trainer.py:765] (3/8) Epoch 5, batch 2300, train_loss[loss=3.972, NarTop10Accuracy=0.5169, over 5743.00 frames. ], tot_loss[loss=3.885, NarTop10Accuracy=0.537, over 6075.87 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 07:49:08,169 INFO [trainer.py:765] (3/8) Epoch 5, batch 2400, train_loss[loss=3.558, NarTop10Accuracy=0.5824, over 5014.00 frames. ], tot_loss[loss=3.887, NarTop10Accuracy=0.5368, over 5866.68 frames. ], batch size: 7, lr: 1.53e-02 +2024-08-06 07:49:31,645 INFO [trainer.py:765] (3/8) Epoch 5, batch 2500, train_loss[loss=3.627, NarTop10Accuracy=0.5685, over 5204.00 frames. ], tot_loss[loss=3.856, NarTop10Accuracy=0.5428, over 5518.69 frames. ], batch size: 6, lr: 1.52e-02 +2024-08-06 07:49:53,460 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 07:50:58,969 INFO [trainer.py:765] (3/8) Epoch 6, batch 100, train_loss[loss=3.621, NarTop10Accuracy=0.5917, over 7294.00 frames. ], tot_loss[loss=3.788, NarTop10Accuracy=0.5585, over 2351.45 frames. ], batch size: 30, lr: 1.42e-02 +2024-08-06 07:51:31,788 INFO [trainer.py:765] (3/8) Epoch 6, batch 200, train_loss[loss=3.603, NarTop10Accuracy=0.5975, over 6808.00 frames. ], tot_loss[loss=3.789, NarTop10Accuracy=0.5579, over 3846.93 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 07:52:04,696 INFO [trainer.py:765] (3/8) Epoch 6, batch 300, train_loss[loss=3.73, NarTop10Accuracy=0.5697, over 7031.00 frames. ], tot_loss[loss=3.784, NarTop10Accuracy=0.5588, over 4666.69 frames. ], batch size: 22, lr: 1.41e-02 +2024-08-06 07:52:36,200 INFO [trainer.py:765] (3/8) Epoch 6, batch 400, train_loss[loss=4.106, NarTop10Accuracy=0.4988, over 5073.00 frames. ], tot_loss[loss=3.781, NarTop10Accuracy=0.5592, over 5114.07 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 07:53:06,102 INFO [trainer.py:765] (3/8) Epoch 6, batch 500, train_loss[loss=3.954, NarTop10Accuracy=0.5249, over 6114.00 frames. ], tot_loss[loss=3.768, NarTop10Accuracy=0.562, over 5404.47 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 07:53:43,285 INFO [trainer.py:765] (3/8) Epoch 6, batch 600, train_loss[loss=3.856, NarTop10Accuracy=0.544, over 5755.00 frames. ], tot_loss[loss=3.773, NarTop10Accuracy=0.561, over 5668.00 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 07:54:15,439 INFO [trainer.py:765] (3/8) Epoch 6, batch 700, train_loss[loss=3.756, NarTop10Accuracy=0.5614, over 5233.00 frames. ], tot_loss[loss=3.787, NarTop10Accuracy=0.5583, over 5751.99 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:54:49,526 INFO [trainer.py:765] (3/8) Epoch 6, batch 800, train_loss[loss=3.567, NarTop10Accuracy=0.597, over 4933.00 frames. ], tot_loss[loss=3.79, NarTop10Accuracy=0.5574, over 5796.79 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:55:21,984 INFO [trainer.py:765] (3/8) Epoch 6, batch 900, train_loss[loss=3.65, NarTop10Accuracy=0.585, over 6629.00 frames. ], tot_loss[loss=3.783, NarTop10Accuracy=0.5585, over 5817.38 frames. ], batch size: 14, lr: 1.38e-02 +2024-08-06 07:56:00,804 INFO [trainer.py:765] (3/8) Epoch 6, batch 1000, train_loss[loss=3.725, NarTop10Accuracy=0.5662, over 6720.00 frames. ], tot_loss[loss=3.8, NarTop10Accuracy=0.5553, over 5919.83 frames. ], batch size: 14, lr: 1.38e-02 +2024-08-06 07:56:34,171 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 07:56:44,742 INFO [trainer.py:811] (3/8) Epoch 6, validation: loss=3.634, NarTop10Accuracy=0.5919, over 1907754.00 frames. +2024-08-06 07:56:44,743 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 27642MB +2024-08-06 07:56:45,277 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.300e+02 1.714e+02 1.918e+02 2.211e+02 6.360e+02, threshold=3.836e+02, percent-clipped=1.6 +2024-08-06 07:56:46,639 INFO [trainer.py:765] (3/8) Epoch 6, batch 1100, train_loss[loss=3.669, NarTop10Accuracy=0.5831, over 6999.00 frames. ], tot_loss[loss=3.801, NarTop10Accuracy=0.5549, over 5950.56 frames. ], batch size: 17, lr: 1.37e-02 +2024-08-06 07:57:24,888 INFO [trainer.py:765] (3/8) Epoch 6, batch 1200, train_loss[loss=3.937, NarTop10Accuracy=0.5209, over 7120.00 frames. ], tot_loss[loss=3.791, NarTop10Accuracy=0.5568, over 5927.94 frames. ], batch size: 30, lr: 1.37e-02 +2024-08-06 07:57:56,612 INFO [trainer.py:765] (3/8) Epoch 6, batch 1300, train_loss[loss=3.727, NarTop10Accuracy=0.551, over 5056.00 frames. ], tot_loss[loss=3.787, NarTop10Accuracy=0.5575, over 6002.53 frames. ], batch size: 6, lr: 1.37e-02 +2024-08-06 07:58:30,736 INFO [trainer.py:765] (3/8) Epoch 6, batch 1400, train_loss[loss=3.85, NarTop10Accuracy=0.5298, over 6116.00 frames. ], tot_loss[loss=3.791, NarTop10Accuracy=0.5563, over 6010.31 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 07:59:00,998 INFO [trainer.py:765] (3/8) Epoch 6, batch 1500, train_loss[loss=4.053, NarTop10Accuracy=0.5085, over 5817.00 frames. ], tot_loss[loss=3.812, NarTop10Accuracy=0.5526, over 5951.46 frames. ], batch size: 48, lr: 1.36e-02 +2024-08-06 07:59:28,934 INFO [trainer.py:765] (3/8) Epoch 6, batch 1600, train_loss[loss=3.695, NarTop10Accuracy=0.582, over 7080.00 frames. ], tot_loss[loss=3.796, NarTop10Accuracy=0.556, over 5949.91 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 07:59:55,617 INFO [trainer.py:765] (3/8) Epoch 6, batch 1700, train_loss[loss=3.934, NarTop10Accuracy=0.5361, over 6231.00 frames. ], tot_loss[loss=3.798, NarTop10Accuracy=0.5557, over 5926.86 frames. ], batch size: 13, lr: 1.35e-02 +2024-08-06 08:00:22,188 INFO [trainer.py:765] (3/8) Epoch 6, batch 1800, train_loss[loss=3.712, NarTop10Accuracy=0.5666, over 7094.00 frames. ], tot_loss[loss=3.795, NarTop10Accuracy=0.556, over 5982.92 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 08:00:48,794 INFO [trainer.py:765] (3/8) Epoch 6, batch 1900, train_loss[loss=4.124, NarTop10Accuracy=0.4847, over 6404.00 frames. ], tot_loss[loss=3.822, NarTop10Accuracy=0.5503, over 6032.76 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 08:01:14,461 INFO [trainer.py:765] (3/8) Epoch 6, batch 2000, train_loss[loss=4.049, NarTop10Accuracy=0.5039, over 6220.00 frames. ], tot_loss[loss=3.806, NarTop10Accuracy=0.5538, over 6019.29 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 08:01:43,134 INFO [trainer.py:765] (3/8) Epoch 6, batch 2100, train_loss[loss=3.492, NarTop10Accuracy=0.6216, over 4685.00 frames. ], tot_loss[loss=3.804, NarTop10Accuracy=0.5543, over 5986.19 frames. ], batch size: 5, lr: 1.33e-02 +2024-08-06 08:02:08,518 INFO [trainer.py:765] (3/8) Epoch 6, batch 2200, train_loss[loss=3.736, NarTop10Accuracy=0.5707, over 7438.00 frames. ], tot_loss[loss=3.799, NarTop10Accuracy=0.5549, over 6025.50 frames. ], batch size: 31, lr: 1.33e-02 +2024-08-06 08:02:33,916 INFO [trainer.py:765] (3/8) Epoch 6, batch 2300, train_loss[loss=3.263, NarTop10Accuracy=0.655, over 5811.00 frames. ], tot_loss[loss=3.801, NarTop10Accuracy=0.5545, over 6053.32 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 08:02:58,617 INFO [trainer.py:765] (3/8) Epoch 6, batch 2400, train_loss[loss=3.837, NarTop10Accuracy=0.5413, over 5073.00 frames. ], tot_loss[loss=3.804, NarTop10Accuracy=0.5541, over 5871.70 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 08:03:21,939 INFO [trainer.py:765] (3/8) Epoch 6, batch 2500, train_loss[loss=3.96, NarTop10Accuracy=0.5137, over 4996.00 frames. ], tot_loss[loss=3.781, NarTop10Accuracy=0.5582, over 5524.22 frames. ], batch size: 6, lr: 1.32e-02 +2024-08-06 08:03:43,225 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 08:04:42,817 INFO [trainer.py:765] (3/8) Epoch 7, batch 100, train_loss[loss=3.903, NarTop10Accuracy=0.5429, over 7260.00 frames. ], tot_loss[loss=3.703, NarTop10Accuracy=0.5766, over 2380.41 frames. ], batch size: 30, lr: 1.23e-02 +2024-08-06 08:05:18,347 INFO [trainer.py:765] (3/8) Epoch 7, batch 200, train_loss[loss=3.792, NarTop10Accuracy=0.5664, over 6906.00 frames. ], tot_loss[loss=3.711, NarTop10Accuracy=0.5755, over 3863.68 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 08:05:46,773 INFO [trainer.py:765] (3/8) Epoch 7, batch 300, train_loss[loss=3.475, NarTop10Accuracy=0.6304, over 7249.00 frames. ], tot_loss[loss=3.718, NarTop10Accuracy=0.5736, over 4677.78 frames. ], batch size: 22, lr: 1.23e-02 +2024-08-06 08:06:22,091 INFO [trainer.py:765] (3/8) Epoch 7, batch 400, train_loss[loss=4.054, NarTop10Accuracy=0.5072, over 5257.00 frames. ], tot_loss[loss=3.718, NarTop10Accuracy=0.5729, over 5148.45 frames. ], batch size: 7, lr: 1.22e-02 +2024-08-06 08:06:52,316 INFO [trainer.py:765] (3/8) Epoch 7, batch 500, train_loss[loss=3.601, NarTop10Accuracy=0.5884, over 6246.00 frames. ], tot_loss[loss=3.72, NarTop10Accuracy=0.5724, over 5419.73 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 08:06:56,086 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 08:07:06,251 INFO [trainer.py:811] (3/8) Epoch 7, validation: loss=3.56, NarTop10Accuracy=0.6069, over 1907754.00 frames. +2024-08-06 08:07:06,252 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 27657MB +2024-08-06 08:07:06,837 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 1.760e+02 1.958e+02 2.227e+02 5.399e+02, threshold=3.916e+02, percent-clipped=0.8 +2024-08-06 08:07:33,151 INFO [trainer.py:765] (3/8) Epoch 7, batch 600, train_loss[loss=3.466, NarTop10Accuracy=0.6305, over 5730.00 frames. ], tot_loss[loss=3.723, NarTop10Accuracy=0.5718, over 5682.15 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 08:08:11,333 INFO [trainer.py:765] (3/8) Epoch 7, batch 700, train_loss[loss=3.809, NarTop10Accuracy=0.5679, over 4960.00 frames. ], tot_loss[loss=3.728, NarTop10Accuracy=0.5707, over 5760.24 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 08:08:45,557 INFO [trainer.py:765] (3/8) Epoch 7, batch 800, train_loss[loss=3.501, NarTop10Accuracy=0.6096, over 4223.00 frames. ], tot_loss[loss=3.712, NarTop10Accuracy=0.5744, over 5780.99 frames. ], batch size: 5, lr: 1.21e-02 +2024-08-06 08:09:17,739 INFO [trainer.py:765] (3/8) Epoch 7, batch 900, train_loss[loss=3.589, NarTop10Accuracy=0.6069, over 6186.00 frames. ], tot_loss[loss=3.711, NarTop10Accuracy=0.574, over 5793.78 frames. ], batch size: 13, lr: 1.21e-02 +2024-08-06 08:09:54,192 INFO [trainer.py:765] (3/8) Epoch 7, batch 1000, train_loss[loss=3.702, NarTop10Accuracy=0.5719, over 6755.00 frames. ], tot_loss[loss=3.714, NarTop10Accuracy=0.5725, over 5908.98 frames. ], batch size: 14, lr: 1.20e-02 +2024-08-06 08:10:29,571 INFO [trainer.py:765] (3/8) Epoch 7, batch 1100, train_loss[loss=3.835, NarTop10Accuracy=0.5481, over 6980.00 frames. ], tot_loss[loss=3.715, NarTop10Accuracy=0.5723, over 5950.84 frames. ], batch size: 18, lr: 1.20e-02 +2024-08-06 08:11:02,491 INFO [trainer.py:765] (3/8) Epoch 7, batch 1200, train_loss[loss=4.133, NarTop10Accuracy=0.491, over 7098.00 frames. ], tot_loss[loss=3.717, NarTop10Accuracy=0.5721, over 5947.99 frames. ], batch size: 30, lr: 1.20e-02 +2024-08-06 08:11:33,447 INFO [trainer.py:765] (3/8) Epoch 7, batch 1300, train_loss[loss=3.164, NarTop10Accuracy=0.6632, over 5056.00 frames. ], tot_loss[loss=3.715, NarTop10Accuracy=0.5719, over 6015.64 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 08:12:10,912 INFO [trainer.py:765] (3/8) Epoch 7, batch 1400, train_loss[loss=3.556, NarTop10Accuracy=0.5912, over 6169.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.571, over 6028.72 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 08:12:42,109 INFO [trainer.py:765] (3/8) Epoch 7, batch 1500, train_loss[loss=3.721, NarTop10Accuracy=0.5728, over 6070.00 frames. ], tot_loss[loss=3.725, NarTop10Accuracy=0.5707, over 5976.78 frames. ], batch size: 49, lr: 1.19e-02 +2024-08-06 08:13:13,238 INFO [trainer.py:765] (3/8) Epoch 7, batch 1600, train_loss[loss=3.644, NarTop10Accuracy=0.5952, over 6875.00 frames. ], tot_loss[loss=3.727, NarTop10Accuracy=0.5704, over 5962.36 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:13:40,016 INFO [trainer.py:765] (3/8) Epoch 7, batch 1700, train_loss[loss=3.94, NarTop10Accuracy=0.5388, over 6397.00 frames. ], tot_loss[loss=3.733, NarTop10Accuracy=0.5689, over 5952.51 frames. ], batch size: 13, lr: 1.18e-02 +2024-08-06 08:14:06,584 INFO [trainer.py:765] (3/8) Epoch 7, batch 1800, train_loss[loss=3.758, NarTop10Accuracy=0.57, over 7165.00 frames. ], tot_loss[loss=3.742, NarTop10Accuracy=0.5676, over 5996.79 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:14:33,223 INFO [trainer.py:765] (3/8) Epoch 7, batch 1900, train_loss[loss=4.123, NarTop10Accuracy=0.4955, over 5873.00 frames. ], tot_loss[loss=3.745, NarTop10Accuracy=0.567, over 6030.96 frames. ], batch size: 50, lr: 1.17e-02 +2024-08-06 08:14:58,995 INFO [trainer.py:765] (3/8) Epoch 7, batch 2000, train_loss[loss=3.805, NarTop10Accuracy=0.5591, over 6454.00 frames. ], tot_loss[loss=3.729, NarTop10Accuracy=0.5702, over 6020.57 frames. ], batch size: 49, lr: 1.17e-02 +2024-08-06 08:15:24,423 INFO [trainer.py:765] (3/8) Epoch 7, batch 2100, train_loss[loss=3.933, NarTop10Accuracy=0.5273, over 4054.00 frames. ], tot_loss[loss=3.731, NarTop10Accuracy=0.5695, over 6001.48 frames. ], batch size: 4, lr: 1.17e-02 +2024-08-06 08:15:49,960 INFO [trainer.py:765] (3/8) Epoch 7, batch 2200, train_loss[loss=3.94, NarTop10Accuracy=0.5247, over 7210.00 frames. ], tot_loss[loss=3.74, NarTop10Accuracy=0.5677, over 6041.33 frames. ], batch size: 30, lr: 1.17e-02 +2024-08-06 08:16:15,490 INFO [trainer.py:765] (3/8) Epoch 7, batch 2300, train_loss[loss=3.761, NarTop10Accuracy=0.5546, over 5777.00 frames. ], tot_loss[loss=3.738, NarTop10Accuracy=0.568, over 6071.63 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 08:16:40,319 INFO [trainer.py:765] (3/8) Epoch 7, batch 2400, train_loss[loss=3.561, NarTop10Accuracy=0.6048, over 5202.00 frames. ], tot_loss[loss=3.743, NarTop10Accuracy=0.5675, over 5870.41 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 08:17:03,739 INFO [trainer.py:765] (3/8) Epoch 7, batch 2500, train_loss[loss=3.725, NarTop10Accuracy=0.5778, over 5073.00 frames. ], tot_loss[loss=3.715, NarTop10Accuracy=0.5721, over 5529.33 frames. ], batch size: 6, lr: 1.16e-02 +2024-08-06 08:17:06,843 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 08:17:17,433 INFO [trainer.py:811] (3/8) Epoch 7, validation: loss=3.591, NarTop10Accuracy=0.6002, over 1907754.00 frames. +2024-08-06 08:17:17,433 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 27657MB +2024-08-06 08:17:17,901 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.794e+02 1.981e+02 2.246e+02 4.644e+02, threshold=3.962e+02, percent-clipped=1.0 +2024-08-06 08:17:35,331 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 08:18:36,194 INFO [trainer.py:765] (3/8) Epoch 8, batch 100, train_loss[loss=3.653, NarTop10Accuracy=0.5878, over 7234.00 frames. ], tot_loss[loss=3.663, NarTop10Accuracy=0.5839, over 2361.51 frames. ], batch size: 30, lr: 1.09e-02 +2024-08-06 08:19:15,019 INFO [trainer.py:765] (3/8) Epoch 8, batch 200, train_loss[loss=3.402, NarTop10Accuracy=0.6265, over 6913.00 frames. ], tot_loss[loss=3.654, NarTop10Accuracy=0.5855, over 3862.93 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 08:19:43,561 INFO [trainer.py:765] (3/8) Epoch 8, batch 300, train_loss[loss=3.708, NarTop10Accuracy=0.5684, over 7171.00 frames. ], tot_loss[loss=3.659, NarTop10Accuracy=0.5847, over 4666.33 frames. ], batch size: 22, lr: 1.08e-02 +2024-08-06 08:20:16,268 INFO [trainer.py:765] (3/8) Epoch 8, batch 400, train_loss[loss=3.538, NarTop10Accuracy=0.615, over 5154.00 frames. ], tot_loss[loss=3.658, NarTop10Accuracy=0.5848, over 5121.96 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 08:20:48,421 INFO [trainer.py:765] (3/8) Epoch 8, batch 500, train_loss[loss=3.454, NarTop10Accuracy=0.6351, over 6312.00 frames. ], tot_loss[loss=3.65, NarTop10Accuracy=0.5868, over 5407.77 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 08:21:23,737 INFO [trainer.py:765] (3/8) Epoch 8, batch 600, train_loss[loss=3.782, NarTop10Accuracy=0.5526, over 5789.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.583, over 5666.38 frames. ], batch size: 9, lr: 1.07e-02 +2024-08-06 08:21:57,607 INFO [trainer.py:765] (3/8) Epoch 8, batch 700, train_loss[loss=3.751, NarTop10Accuracy=0.5622, over 5082.00 frames. ], tot_loss[loss=3.662, NarTop10Accuracy=0.5838, over 5735.68 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:22:27,341 INFO [trainer.py:765] (3/8) Epoch 8, batch 800, train_loss[loss=3.53, NarTop10Accuracy=0.6135, over 5047.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5825, over 5788.89 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:23:06,892 INFO [trainer.py:765] (3/8) Epoch 8, batch 900, train_loss[loss=3.484, NarTop10Accuracy=0.6236, over 6833.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.5836, over 5792.67 frames. ], batch size: 14, lr: 1.07e-02 +2024-08-06 08:23:42,943 INFO [trainer.py:765] (3/8) Epoch 8, batch 1000, train_loss[loss=3.451, NarTop10Accuracy=0.6182, over 6727.00 frames. ], tot_loss[loss=3.659, NarTop10Accuracy=0.5846, over 5902.80 frames. ], batch size: 14, lr: 1.06e-02 +2024-08-06 08:24:15,105 INFO [trainer.py:765] (3/8) Epoch 8, batch 1100, train_loss[loss=3.633, NarTop10Accuracy=0.5923, over 6754.00 frames. ], tot_loss[loss=3.672, NarTop10Accuracy=0.5819, over 5953.84 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 08:24:57,339 INFO [trainer.py:765] (3/8) Epoch 8, batch 1200, train_loss[loss=3.468, NarTop10Accuracy=0.6268, over 7285.00 frames. ], tot_loss[loss=3.678, NarTop10Accuracy=0.5803, over 5959.35 frames. ], batch size: 31, lr: 1.06e-02 +2024-08-06 08:25:26,604 INFO [trainer.py:765] (3/8) Epoch 8, batch 1300, train_loss[loss=3.972, NarTop10Accuracy=0.5256, over 5271.00 frames. ], tot_loss[loss=3.668, NarTop10Accuracy=0.5823, over 6021.17 frames. ], batch size: 6, lr: 1.06e-02 +2024-08-06 08:26:00,605 INFO [trainer.py:765] (3/8) Epoch 8, batch 1400, train_loss[loss=3.699, NarTop10Accuracy=0.5744, over 6057.00 frames. ], tot_loss[loss=3.679, NarTop10Accuracy=0.5799, over 6034.93 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 08:26:28,987 INFO [trainer.py:765] (3/8) Epoch 8, batch 1500, train_loss[loss=3.626, NarTop10Accuracy=0.5867, over 6078.00 frames. ], tot_loss[loss=3.667, NarTop10Accuracy=0.5823, over 5974.29 frames. ], batch size: 49, lr: 1.05e-02 +2024-08-06 08:26:56,933 INFO [trainer.py:765] (3/8) Epoch 8, batch 1600, train_loss[loss=3.903, NarTop10Accuracy=0.5322, over 7028.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5818, over 5950.57 frames. ], batch size: 22, lr: 1.05e-02 +2024-08-06 08:27:23,763 INFO [trainer.py:765] (3/8) Epoch 8, batch 1700, train_loss[loss=3.609, NarTop10Accuracy=0.5905, over 6240.00 frames. ], tot_loss[loss=3.681, NarTop10Accuracy=0.5798, over 5937.72 frames. ], batch size: 13, lr: 1.05e-02 +2024-08-06 08:27:50,461 INFO [trainer.py:765] (3/8) Epoch 8, batch 1800, train_loss[loss=3.781, NarTop10Accuracy=0.5602, over 7233.00 frames. ], tot_loss[loss=3.679, NarTop10Accuracy=0.5803, over 6010.01 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 08:28:17,180 INFO [trainer.py:765] (3/8) Epoch 8, batch 1900, train_loss[loss=4.05, NarTop10Accuracy=0.5041, over 6193.00 frames. ], tot_loss[loss=3.681, NarTop10Accuracy=0.5803, over 6053.37 frames. ], batch size: 50, lr: 1.04e-02 +2024-08-06 08:28:25,164 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 08:28:35,290 INFO [trainer.py:811] (3/8) Epoch 8, validation: loss=3.507, NarTop10Accuracy=0.6181, over 1907754.00 frames. +2024-08-06 08:28:35,291 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 27657MB +2024-08-06 08:28:35,795 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.304e+02 1.789e+02 1.988e+02 2.230e+02 4.452e+02, threshold=3.975e+02, percent-clipped=0.5 +2024-08-06 08:28:52,984 INFO [trainer.py:765] (3/8) Epoch 8, batch 2000, train_loss[loss=3.809, NarTop10Accuracy=0.5551, over 6147.00 frames. ], tot_loss[loss=3.674, NarTop10Accuracy=0.5809, over 6040.43 frames. ], batch size: 49, lr: 1.04e-02 +2024-08-06 08:29:18,486 INFO [trainer.py:765] (3/8) Epoch 8, batch 2100, train_loss[loss=3.242, NarTop10Accuracy=0.6605, over 4783.00 frames. ], tot_loss[loss=3.659, NarTop10Accuracy=0.5836, over 6027.62 frames. ], batch size: 5, lr: 1.04e-02 +2024-08-06 08:29:43,791 INFO [trainer.py:765] (3/8) Epoch 8, batch 2200, train_loss[loss=4.009, NarTop10Accuracy=0.5151, over 7382.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.5824, over 6042.00 frames. ], batch size: 31, lr: 1.03e-02 +2024-08-06 08:30:09,134 INFO [trainer.py:765] (3/8) Epoch 8, batch 2300, train_loss[loss=3.671, NarTop10Accuracy=0.5811, over 5759.00 frames. ], tot_loss[loss=3.678, NarTop10Accuracy=0.5802, over 6082.78 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 08:30:33,791 INFO [trainer.py:765] (3/8) Epoch 8, batch 2400, train_loss[loss=3.761, NarTop10Accuracy=0.5665, over 5190.00 frames. ], tot_loss[loss=3.691, NarTop10Accuracy=0.5777, over 5890.66 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 08:30:57,139 INFO [trainer.py:765] (3/8) Epoch 8, batch 2500, train_loss[loss=3.697, NarTop10Accuracy=0.5898, over 5007.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.5822, over 5550.60 frames. ], batch size: 6, lr: 1.03e-02 +2024-08-06 08:31:18,600 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 08:32:19,098 INFO [trainer.py:765] (3/8) Epoch 9, batch 100, train_loss[loss=3.877, NarTop10Accuracy=0.5435, over 7065.00 frames. ], tot_loss[loss=3.608, NarTop10Accuracy=0.5958, over 2367.52 frames. ], batch size: 30, lr: 9.71e-03 +2024-08-06 08:32:51,461 INFO [trainer.py:765] (3/8) Epoch 9, batch 200, train_loss[loss=3.587, NarTop10Accuracy=0.6019, over 6776.00 frames. ], tot_loss[loss=3.595, NarTop10Accuracy=0.5975, over 3852.01 frames. ], batch size: 17, lr: 9.69e-03 +2024-08-06 08:33:27,115 INFO [trainer.py:765] (3/8) Epoch 9, batch 300, train_loss[loss=3.731, NarTop10Accuracy=0.5795, over 7036.00 frames. ], tot_loss[loss=3.596, NarTop10Accuracy=0.5979, over 4670.84 frames. ], batch size: 22, lr: 9.67e-03 +2024-08-06 08:34:00,963 INFO [trainer.py:765] (3/8) Epoch 9, batch 400, train_loss[loss=3.632, NarTop10Accuracy=0.5785, over 5207.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6001, over 5134.26 frames. ], batch size: 7, lr: 9.64e-03 +2024-08-06 08:34:32,880 INFO [trainer.py:765] (3/8) Epoch 9, batch 500, train_loss[loss=3.74, NarTop10Accuracy=0.561, over 6124.00 frames. ], tot_loss[loss=3.572, NarTop10Accuracy=0.6023, over 5405.61 frames. ], batch size: 11, lr: 9.62e-03 +2024-08-06 08:35:07,498 INFO [trainer.py:765] (3/8) Epoch 9, batch 600, train_loss[loss=3.282, NarTop10Accuracy=0.6554, over 5855.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6002, over 5683.78 frames. ], batch size: 9, lr: 9.60e-03 +2024-08-06 08:35:42,823 INFO [trainer.py:765] (3/8) Epoch 9, batch 700, train_loss[loss=3.957, NarTop10Accuracy=0.5163, over 5150.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.598, over 5762.81 frames. ], batch size: 6, lr: 9.58e-03 +2024-08-06 08:36:14,821 INFO [trainer.py:765] (3/8) Epoch 9, batch 800, train_loss[loss=3.187, NarTop10Accuracy=0.6662, over 5041.00 frames. ], tot_loss[loss=3.61, NarTop10Accuracy=0.5944, over 5791.85 frames. ], batch size: 6, lr: 9.56e-03 +2024-08-06 08:36:46,454 INFO [trainer.py:765] (3/8) Epoch 9, batch 900, train_loss[loss=3.406, NarTop10Accuracy=0.6392, over 6179.00 frames. ], tot_loss[loss=3.624, NarTop10Accuracy=0.5917, over 5841.08 frames. ], batch size: 13, lr: 9.54e-03 +2024-08-06 08:37:26,564 INFO [trainer.py:765] (3/8) Epoch 9, batch 1000, train_loss[loss=3.495, NarTop10Accuracy=0.613, over 6292.00 frames. ], tot_loss[loss=3.615, NarTop10Accuracy=0.593, over 5939.49 frames. ], batch size: 13, lr: 9.52e-03 +2024-08-06 08:37:59,420 INFO [trainer.py:765] (3/8) Epoch 9, batch 1100, train_loss[loss=3.988, NarTop10Accuracy=0.5207, over 6922.00 frames. ], tot_loss[loss=3.633, NarTop10Accuracy=0.5893, over 5972.93 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 08:38:31,995 INFO [trainer.py:765] (3/8) Epoch 9, batch 1200, train_loss[loss=3.655, NarTop10Accuracy=0.5927, over 7424.00 frames. ], tot_loss[loss=3.643, NarTop10Accuracy=0.5874, over 5965.87 frames. ], batch size: 31, lr: 9.48e-03 +2024-08-06 08:39:11,840 INFO [trainer.py:765] (3/8) Epoch 9, batch 1300, train_loss[loss=3.654, NarTop10Accuracy=0.5816, over 4973.00 frames. ], tot_loss[loss=3.648, NarTop10Accuracy=0.5864, over 6037.04 frames. ], batch size: 6, lr: 9.46e-03 +2024-08-06 08:39:27,116 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 08:39:38,196 INFO [trainer.py:811] (3/8) Epoch 9, validation: loss=3.495, NarTop10Accuracy=0.6214, over 1907754.00 frames. +2024-08-06 08:39:38,197 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 27657MB +2024-08-06 08:39:38,758 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 1.781e+02 1.970e+02 2.189e+02 6.315e+02, threshold=3.940e+02, percent-clipped=0.6 +2024-08-06 08:39:52,278 INFO [trainer.py:765] (3/8) Epoch 9, batch 1400, train_loss[loss=3.365, NarTop10Accuracy=0.6292, over 6010.00 frames. ], tot_loss[loss=3.629, NarTop10Accuracy=0.5901, over 6044.98 frames. ], batch size: 11, lr: 9.43e-03 +2024-08-06 08:40:22,332 INFO [trainer.py:765] (3/8) Epoch 9, batch 1500, train_loss[loss=3.907, NarTop10Accuracy=0.5338, over 5846.00 frames. ], tot_loss[loss=3.634, NarTop10Accuracy=0.5894, over 5975.47 frames. ], batch size: 49, lr: 9.41e-03 +2024-08-06 08:40:50,368 INFO [trainer.py:765] (3/8) Epoch 9, batch 1600, train_loss[loss=3.641, NarTop10Accuracy=0.5885, over 7300.00 frames. ], tot_loss[loss=3.633, NarTop10Accuracy=0.5899, over 5961.78 frames. ], batch size: 22, lr: 9.39e-03 +2024-08-06 08:41:17,152 INFO [trainer.py:765] (3/8) Epoch 9, batch 1700, train_loss[loss=3.649, NarTop10Accuracy=0.5894, over 6300.00 frames. ], tot_loss[loss=3.641, NarTop10Accuracy=0.5886, over 5958.51 frames. ], batch size: 13, lr: 9.37e-03 +2024-08-06 08:41:43,812 INFO [trainer.py:765] (3/8) Epoch 9, batch 1800, train_loss[loss=3.913, NarTop10Accuracy=0.5287, over 7265.00 frames. ], tot_loss[loss=3.638, NarTop10Accuracy=0.5894, over 6013.58 frames. ], batch size: 22, lr: 9.35e-03 +2024-08-06 08:42:10,495 INFO [trainer.py:765] (3/8) Epoch 9, batch 1900, train_loss[loss=3.659, NarTop10Accuracy=0.5931, over 6318.00 frames. ], tot_loss[loss=3.635, NarTop10Accuracy=0.5899, over 6050.35 frames. ], batch size: 49, lr: 9.33e-03 +2024-08-06 08:42:36,203 INFO [trainer.py:765] (3/8) Epoch 9, batch 2000, train_loss[loss=4.116, NarTop10Accuracy=0.4901, over 6297.00 frames. ], tot_loss[loss=3.629, NarTop10Accuracy=0.5906, over 6046.47 frames. ], batch size: 49, lr: 9.31e-03 +2024-08-06 08:43:01,668 INFO [trainer.py:765] (3/8) Epoch 9, batch 2100, train_loss[loss=3.44, NarTop10Accuracy=0.6389, over 3888.00 frames. ], tot_loss[loss=3.621, NarTop10Accuracy=0.5919, over 6019.08 frames. ], batch size: 4, lr: 9.30e-03 +2024-08-06 08:43:27,178 INFO [trainer.py:765] (3/8) Epoch 9, batch 2200, train_loss[loss=3.576, NarTop10Accuracy=0.5978, over 7114.00 frames. ], tot_loss[loss=3.631, NarTop10Accuracy=0.59, over 6049.67 frames. ], batch size: 30, lr: 9.28e-03 +2024-08-06 08:43:52,671 INFO [trainer.py:765] (3/8) Epoch 9, batch 2300, train_loss[loss=3.599, NarTop10Accuracy=0.6025, over 5776.00 frames. ], tot_loss[loss=3.652, NarTop10Accuracy=0.5859, over 6063.32 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 08:44:20,550 INFO [trainer.py:765] (3/8) Epoch 9, batch 2400, train_loss[loss=3.328, NarTop10Accuracy=0.6483, over 5217.00 frames. ], tot_loss[loss=3.659, NarTop10Accuracy=0.5849, over 5863.29 frames. ], batch size: 7, lr: 9.24e-03 +2024-08-06 08:44:44,002 INFO [trainer.py:765] (3/8) Epoch 9, batch 2500, train_loss[loss=3.801, NarTop10Accuracy=0.5489, over 4897.00 frames. ], tot_loss[loss=3.635, NarTop10Accuracy=0.5893, over 5526.96 frames. ], batch size: 6, lr: 9.22e-03 +2024-08-06 08:45:05,316 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 08:46:09,065 INFO [trainer.py:765] (3/8) Epoch 10, batch 100, train_loss[loss=3.305, NarTop10Accuracy=0.6521, over 7398.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6016, over 2373.22 frames. ], batch size: 32, lr: 8.75e-03 +2024-08-06 08:46:44,074 INFO [trainer.py:765] (3/8) Epoch 10, batch 200, train_loss[loss=3.671, NarTop10Accuracy=0.5843, over 6916.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.6018, over 3879.02 frames. ], batch size: 17, lr: 8.73e-03 +2024-08-06 08:47:14,444 INFO [trainer.py:765] (3/8) Epoch 10, batch 300, train_loss[loss=3.516, NarTop10Accuracy=0.6229, over 7191.00 frames. ], tot_loss[loss=3.569, NarTop10Accuracy=0.6039, over 4688.14 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 08:47:46,120 INFO [trainer.py:765] (3/8) Epoch 10, batch 400, train_loss[loss=3.809, NarTop10Accuracy=0.5492, over 5153.00 frames. ], tot_loss[loss=3.569, NarTop10Accuracy=0.6031, over 5129.11 frames. ], batch size: 7, lr: 8.70e-03 +2024-08-06 08:48:22,371 INFO [trainer.py:765] (3/8) Epoch 10, batch 500, train_loss[loss=3.383, NarTop10Accuracy=0.6439, over 6176.00 frames. ], tot_loss[loss=3.567, NarTop10Accuracy=0.6035, over 5407.39 frames. ], batch size: 11, lr: 8.68e-03 +2024-08-06 08:48:53,460 INFO [trainer.py:765] (3/8) Epoch 10, batch 600, train_loss[loss=3.327, NarTop10Accuracy=0.6573, over 5722.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.6013, over 5663.97 frames. ], batch size: 9, lr: 8.66e-03 +2024-08-06 08:49:26,707 INFO [trainer.py:765] (3/8) Epoch 10, batch 700, train_loss[loss=3.36, NarTop10Accuracy=0.6403, over 5120.00 frames. ], tot_loss[loss=3.582, NarTop10Accuracy=0.6, over 5723.90 frames. ], batch size: 6, lr: 8.65e-03 +2024-08-06 08:49:49,164 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 08:50:00,983 INFO [trainer.py:811] (3/8) Epoch 10, validation: loss=3.46, NarTop10Accuracy=0.6279, over 1907754.00 frames. +2024-08-06 08:50:00,984 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 08:50:01,725 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.353e+02 1.818e+02 1.985e+02 2.213e+02 4.843e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 08:50:09,801 INFO [trainer.py:765] (3/8) Epoch 10, batch 800, train_loss[loss=3.425, NarTop10Accuracy=0.627, over 5103.00 frames. ], tot_loss[loss=3.587, NarTop10Accuracy=0.5991, over 5794.06 frames. ], batch size: 6, lr: 8.63e-03 +2024-08-06 08:50:42,890 INFO [trainer.py:765] (3/8) Epoch 10, batch 900, train_loss[loss=3.385, NarTop10Accuracy=0.6438, over 6185.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6025, over 5804.87 frames. ], batch size: 13, lr: 8.61e-03 +2024-08-06 08:51:18,460 INFO [trainer.py:765] (3/8) Epoch 10, batch 1000, train_loss[loss=3.829, NarTop10Accuracy=0.549, over 6676.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.5993, over 5905.50 frames. ], batch size: 14, lr: 8.59e-03 +2024-08-06 08:51:57,362 INFO [trainer.py:765] (3/8) Epoch 10, batch 1100, train_loss[loss=3.641, NarTop10Accuracy=0.5889, over 6881.00 frames. ], tot_loss[loss=3.598, NarTop10Accuracy=0.5972, over 5955.59 frames. ], batch size: 17, lr: 8.58e-03 +2024-08-06 08:52:32,048 INFO [trainer.py:765] (3/8) Epoch 10, batch 1200, train_loss[loss=3.656, NarTop10Accuracy=0.588, over 7149.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.598, over 5956.91 frames. ], batch size: 30, lr: 8.56e-03 +2024-08-06 08:53:06,607 INFO [trainer.py:765] (3/8) Epoch 10, batch 1300, train_loss[loss=3.542, NarTop10Accuracy=0.5903, over 5031.00 frames. ], tot_loss[loss=3.589, NarTop10Accuracy=0.5987, over 6008.69 frames. ], batch size: 6, lr: 8.54e-03 +2024-08-06 08:53:46,881 INFO [trainer.py:765] (3/8) Epoch 10, batch 1400, train_loss[loss=3.533, NarTop10Accuracy=0.6114, over 6107.00 frames. ], tot_loss[loss=3.598, NarTop10Accuracy=0.5964, over 6029.95 frames. ], batch size: 11, lr: 8.53e-03 +2024-08-06 08:54:17,501 INFO [trainer.py:765] (3/8) Epoch 10, batch 1500, train_loss[loss=3.558, NarTop10Accuracy=0.6059, over 6236.00 frames. ], tot_loss[loss=3.59, NarTop10Accuracy=0.5989, over 5972.29 frames. ], batch size: 49, lr: 8.51e-03 +2024-08-06 08:54:45,526 INFO [trainer.py:765] (3/8) Epoch 10, batch 1600, train_loss[loss=3.612, NarTop10Accuracy=0.6038, over 7255.00 frames. ], tot_loss[loss=3.596, NarTop10Accuracy=0.5975, over 5944.45 frames. ], batch size: 22, lr: 8.49e-03 +2024-08-06 08:55:12,302 INFO [trainer.py:765] (3/8) Epoch 10, batch 1700, train_loss[loss=3.423, NarTop10Accuracy=0.6321, over 6163.00 frames. ], tot_loss[loss=3.606, NarTop10Accuracy=0.5958, over 5939.41 frames. ], batch size: 13, lr: 8.48e-03 +2024-08-06 08:55:41,989 INFO [trainer.py:765] (3/8) Epoch 10, batch 1800, train_loss[loss=3.582, NarTop10Accuracy=0.604, over 7146.00 frames. ], tot_loss[loss=3.599, NarTop10Accuracy=0.5971, over 5990.52 frames. ], batch size: 22, lr: 8.46e-03 +2024-08-06 08:56:08,572 INFO [trainer.py:765] (3/8) Epoch 10, batch 1900, train_loss[loss=4.017, NarTop10Accuracy=0.5107, over 5918.00 frames. ], tot_loss[loss=3.596, NarTop10Accuracy=0.5976, over 6030.76 frames. ], batch size: 49, lr: 8.45e-03 +2024-08-06 08:56:34,288 INFO [trainer.py:765] (3/8) Epoch 10, batch 2000, train_loss[loss=3.744, NarTop10Accuracy=0.5715, over 6436.00 frames. ], tot_loss[loss=3.598, NarTop10Accuracy=0.5971, over 5998.41 frames. ], batch size: 49, lr: 8.43e-03 +2024-08-06 08:56:59,751 INFO [trainer.py:765] (3/8) Epoch 10, batch 2100, train_loss[loss=3.361, NarTop10Accuracy=0.6529, over 3813.00 frames. ], tot_loss[loss=3.604, NarTop10Accuracy=0.5955, over 5992.60 frames. ], batch size: 4, lr: 8.41e-03 +2024-08-06 08:57:25,280 INFO [trainer.py:765] (3/8) Epoch 10, batch 2200, train_loss[loss=3.744, NarTop10Accuracy=0.575, over 7210.00 frames. ], tot_loss[loss=3.601, NarTop10Accuracy=0.5955, over 6024.21 frames. ], batch size: 30, lr: 8.40e-03 +2024-08-06 08:57:50,682 INFO [trainer.py:765] (3/8) Epoch 10, batch 2300, train_loss[loss=3.266, NarTop10Accuracy=0.6616, over 5775.00 frames. ], tot_loss[loss=3.606, NarTop10Accuracy=0.5949, over 6069.11 frames. ], batch size: 9, lr: 8.38e-03 +2024-08-06 08:58:15,346 INFO [trainer.py:765] (3/8) Epoch 10, batch 2400, train_loss[loss=3.421, NarTop10Accuracy=0.6383, over 5108.00 frames. ], tot_loss[loss=3.607, NarTop10Accuracy=0.5945, over 5863.83 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 08:58:38,809 INFO [trainer.py:765] (3/8) Epoch 10, batch 2500, train_loss[loss=3.224, NarTop10Accuracy=0.6486, over 4329.00 frames. ], tot_loss[loss=3.601, NarTop10Accuracy=0.5953, over 5510.61 frames. ], batch size: 5, lr: 8.35e-03 +2024-08-06 08:59:00,050 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 09:00:03,681 INFO [trainer.py:765] (3/8) Epoch 11, batch 100, train_loss[loss=3.414, NarTop10Accuracy=0.6377, over 7185.00 frames. ], tot_loss[loss=3.528, NarTop10Accuracy=0.6121, over 2360.52 frames. ], batch size: 30, lr: 7.96e-03 +2024-08-06 09:00:30,915 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 09:00:41,217 INFO [trainer.py:811] (3/8) Epoch 11, validation: loss=3.404, NarTop10Accuracy=0.6396, over 1907754.00 frames. +2024-08-06 09:00:41,218 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 09:00:41,774 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 1.800e+02 1.980e+02 2.200e+02 4.491e+02, threshold=3.959e+02, percent-clipped=0.2 +2024-08-06 09:00:46,859 INFO [trainer.py:765] (3/8) Epoch 11, batch 200, train_loss[loss=3.839, NarTop10Accuracy=0.5508, over 6860.00 frames. ], tot_loss[loss=3.523, NarTop10Accuracy=0.6131, over 3845.99 frames. ], batch size: 17, lr: 7.94e-03 +2024-08-06 09:01:17,853 INFO [trainer.py:765] (3/8) Epoch 11, batch 300, train_loss[loss=3.405, NarTop10Accuracy=0.6396, over 7150.00 frames. ], tot_loss[loss=3.532, NarTop10Accuracy=0.6115, over 4658.39 frames. ], batch size: 22, lr: 7.93e-03 +2024-08-06 09:01:50,535 INFO [trainer.py:765] (3/8) Epoch 11, batch 400, train_loss[loss=3.396, NarTop10Accuracy=0.6398, over 5798.00 frames. ], tot_loss[loss=3.528, NarTop10Accuracy=0.6114, over 5135.56 frames. ], batch size: 8, lr: 7.91e-03 +2024-08-06 09:02:21,239 INFO [trainer.py:765] (3/8) Epoch 11, batch 500, train_loss[loss=3.322, NarTop10Accuracy=0.6566, over 6188.00 frames. ], tot_loss[loss=3.531, NarTop10Accuracy=0.611, over 5424.55 frames. ], batch size: 11, lr: 7.90e-03 +2024-08-06 09:03:01,743 INFO [trainer.py:765] (3/8) Epoch 11, batch 600, train_loss[loss=3.657, NarTop10Accuracy=0.5852, over 5782.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6095, over 5680.79 frames. ], batch size: 9, lr: 7.88e-03 +2024-08-06 09:03:38,237 INFO [trainer.py:765] (3/8) Epoch 11, batch 700, train_loss[loss=3.445, NarTop10Accuracy=0.62, over 5068.00 frames. ], tot_loss[loss=3.54, NarTop10Accuracy=0.6093, over 5736.27 frames. ], batch size: 6, lr: 7.87e-03 +2024-08-06 09:04:10,756 INFO [trainer.py:765] (3/8) Epoch 11, batch 800, train_loss[loss=3.306, NarTop10Accuracy=0.6485, over 5034.00 frames. ], tot_loss[loss=3.563, NarTop10Accuracy=0.6046, over 5803.09 frames. ], batch size: 6, lr: 7.86e-03 +2024-08-06 09:04:50,084 INFO [trainer.py:765] (3/8) Epoch 11, batch 900, train_loss[loss=3.662, NarTop10Accuracy=0.5839, over 6388.00 frames. ], tot_loss[loss=3.553, NarTop10Accuracy=0.6063, over 5810.22 frames. ], batch size: 13, lr: 7.84e-03 +2024-08-06 09:05:27,013 INFO [trainer.py:765] (3/8) Epoch 11, batch 1000, train_loss[loss=3.523, NarTop10Accuracy=0.6132, over 6195.00 frames. ], tot_loss[loss=3.56, NarTop10Accuracy=0.6049, over 5930.02 frames. ], batch size: 13, lr: 7.83e-03 +2024-08-06 09:06:00,351 INFO [trainer.py:765] (3/8) Epoch 11, batch 1100, train_loss[loss=3.684, NarTop10Accuracy=0.5853, over 6761.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6025, over 5956.41 frames. ], batch size: 17, lr: 7.81e-03 +2024-08-06 09:06:40,946 INFO [trainer.py:765] (3/8) Epoch 11, batch 1200, train_loss[loss=3.606, NarTop10Accuracy=0.5924, over 7381.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.6022, over 5958.91 frames. ], batch size: 31, lr: 7.80e-03 +2024-08-06 09:07:15,494 INFO [trainer.py:765] (3/8) Epoch 11, batch 1300, train_loss[loss=3.09, NarTop10Accuracy=0.6751, over 5000.00 frames. ], tot_loss[loss=3.565, NarTop10Accuracy=0.6033, over 6024.85 frames. ], batch size: 6, lr: 7.79e-03 +2024-08-06 09:07:47,629 INFO [trainer.py:765] (3/8) Epoch 11, batch 1400, train_loss[loss=3.597, NarTop10Accuracy=0.6055, over 6045.00 frames. ], tot_loss[loss=3.572, NarTop10Accuracy=0.6017, over 6038.88 frames. ], batch size: 11, lr: 7.77e-03 +2024-08-06 09:08:18,988 INFO [trainer.py:765] (3/8) Epoch 11, batch 1500, train_loss[loss=3.547, NarTop10Accuracy=0.6197, over 6175.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6015, over 5973.92 frames. ], batch size: 50, lr: 7.76e-03 +2024-08-06 09:08:47,149 INFO [trainer.py:765] (3/8) Epoch 11, batch 1600, train_loss[loss=3.47, NarTop10Accuracy=0.6244, over 7009.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6014, over 5949.02 frames. ], batch size: 22, lr: 7.74e-03 +2024-08-06 09:09:13,951 INFO [trainer.py:765] (3/8) Epoch 11, batch 1700, train_loss[loss=3.717, NarTop10Accuracy=0.5748, over 6245.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.602, over 5940.08 frames. ], batch size: 13, lr: 7.73e-03 +2024-08-06 09:09:40,733 INFO [trainer.py:765] (3/8) Epoch 11, batch 1800, train_loss[loss=3.593, NarTop10Accuracy=0.5987, over 7087.00 frames. ], tot_loss[loss=3.584, NarTop10Accuracy=0.6, over 5999.21 frames. ], batch size: 21, lr: 7.72e-03 +2024-08-06 09:10:07,342 INFO [trainer.py:765] (3/8) Epoch 11, batch 1900, train_loss[loss=3.869, NarTop10Accuracy=0.5426, over 5865.00 frames. ], tot_loss[loss=3.594, NarTop10Accuracy=0.5979, over 6036.20 frames. ], batch size: 48, lr: 7.70e-03 +2024-08-06 09:10:33,040 INFO [trainer.py:765] (3/8) Epoch 11, batch 2000, train_loss[loss=3.534, NarTop10Accuracy=0.6122, over 6032.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.6006, over 6020.50 frames. ], batch size: 51, lr: 7.69e-03 +2024-08-06 09:10:58,442 INFO [trainer.py:765] (3/8) Epoch 11, batch 2100, train_loss[loss=3.585, NarTop10Accuracy=0.5985, over 4781.00 frames. ], tot_loss[loss=3.564, NarTop10Accuracy=0.6037, over 6004.14 frames. ], batch size: 5, lr: 7.68e-03 +2024-08-06 09:11:20,709 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 09:11:31,457 INFO [trainer.py:811] (3/8) Epoch 11, validation: loss=3.372, NarTop10Accuracy=0.6462, over 1907754.00 frames. +2024-08-06 09:11:31,458 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 09:11:31,929 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.800e+02 1.966e+02 2.160e+02 4.000e+02, threshold=3.933e+02, percent-clipped=0.1 +2024-08-06 09:11:34,518 INFO [trainer.py:765] (3/8) Epoch 11, batch 2200, train_loss[loss=3.68, NarTop10Accuracy=0.5784, over 7548.00 frames. ], tot_loss[loss=3.573, NarTop10Accuracy=0.602, over 6040.55 frames. ], batch size: 31, lr: 7.66e-03 +2024-08-06 09:11:59,939 INFO [trainer.py:765] (3/8) Epoch 11, batch 2300, train_loss[loss=3.508, NarTop10Accuracy=0.6175, over 5834.00 frames. ], tot_loss[loss=3.59, NarTop10Accuracy=0.5989, over 6076.10 frames. ], batch size: 9, lr: 7.65e-03 +2024-08-06 09:12:24,695 INFO [trainer.py:765] (3/8) Epoch 11, batch 2400, train_loss[loss=3.746, NarTop10Accuracy=0.5668, over 5148.00 frames. ], tot_loss[loss=3.61, NarTop10Accuracy=0.5951, over 5875.30 frames. ], batch size: 7, lr: 7.64e-03 +2024-08-06 09:12:47,878 INFO [trainer.py:765] (3/8) Epoch 11, batch 2500, train_loss[loss=3.835, NarTop10Accuracy=0.5475, over 4992.00 frames. ], tot_loss[loss=3.59, NarTop10Accuracy=0.5992, over 5541.33 frames. ], batch size: 6, lr: 7.62e-03 +2024-08-06 09:13:08,826 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 09:14:12,279 INFO [trainer.py:765] (3/8) Epoch 12, batch 100, train_loss[loss=3.389, NarTop10Accuracy=0.6419, over 7252.00 frames. ], tot_loss[loss=3.525, NarTop10Accuracy=0.6135, over 2367.65 frames. ], batch size: 30, lr: 7.29e-03 +2024-08-06 09:14:48,096 INFO [trainer.py:765] (3/8) Epoch 12, batch 200, train_loss[loss=3.209, NarTop10Accuracy=0.6784, over 6335.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6155, over 3842.29 frames. ], batch size: 16, lr: 7.28e-03 +2024-08-06 09:15:20,021 INFO [trainer.py:765] (3/8) Epoch 12, batch 300, train_loss[loss=3.412, NarTop10Accuracy=0.6472, over 7166.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6185, over 4655.78 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 09:15:52,633 INFO [trainer.py:765] (3/8) Epoch 12, batch 400, train_loss[loss=3.398, NarTop10Accuracy=0.6427, over 5149.00 frames. ], tot_loss[loss=3.51, NarTop10Accuracy=0.6157, over 5110.40 frames. ], batch size: 7, lr: 7.25e-03 +2024-08-06 09:16:26,433 INFO [trainer.py:765] (3/8) Epoch 12, batch 500, train_loss[loss=3.557, NarTop10Accuracy=0.5995, over 6063.00 frames. ], tot_loss[loss=3.516, NarTop10Accuracy=0.6147, over 5390.54 frames. ], batch size: 11, lr: 7.24e-03 +2024-08-06 09:16:59,239 INFO [trainer.py:765] (3/8) Epoch 12, batch 600, train_loss[loss=3.439, NarTop10Accuracy=0.6382, over 5814.00 frames. ], tot_loss[loss=3.526, NarTop10Accuracy=0.6126, over 5679.52 frames. ], batch size: 9, lr: 7.23e-03 +2024-08-06 09:17:36,319 INFO [trainer.py:765] (3/8) Epoch 12, batch 700, train_loss[loss=3.35, NarTop10Accuracy=0.6571, over 4995.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6149, over 5746.02 frames. ], batch size: 6, lr: 7.22e-03 +2024-08-06 09:18:07,753 INFO [trainer.py:765] (3/8) Epoch 12, batch 800, train_loss[loss=3.349, NarTop10Accuracy=0.6419, over 5100.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.6134, over 5802.76 frames. ], batch size: 6, lr: 7.21e-03 +2024-08-06 09:18:43,779 INFO [trainer.py:765] (3/8) Epoch 12, batch 900, train_loss[loss=3.691, NarTop10Accuracy=0.5761, over 6291.00 frames. ], tot_loss[loss=3.526, NarTop10Accuracy=0.6116, over 5832.46 frames. ], batch size: 13, lr: 7.19e-03 +2024-08-06 09:19:17,689 INFO [trainer.py:765] (3/8) Epoch 12, batch 1000, train_loss[loss=3.545, NarTop10Accuracy=0.613, over 6309.00 frames. ], tot_loss[loss=3.531, NarTop10Accuracy=0.6109, over 5941.91 frames. ], batch size: 13, lr: 7.18e-03 +2024-08-06 09:19:52,427 INFO [trainer.py:765] (3/8) Epoch 12, batch 1100, train_loss[loss=3.766, NarTop10Accuracy=0.5665, over 6773.00 frames. ], tot_loss[loss=3.532, NarTop10Accuracy=0.611, over 5940.18 frames. ], batch size: 17, lr: 7.17e-03 +2024-08-06 09:20:29,443 INFO [trainer.py:765] (3/8) Epoch 12, batch 1200, train_loss[loss=3.481, NarTop10Accuracy=0.633, over 7400.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6095, over 5943.80 frames. ], batch size: 31, lr: 7.16e-03 +2024-08-06 09:21:02,826 INFO [trainer.py:765] (3/8) Epoch 12, batch 1300, train_loss[loss=3.651, NarTop10Accuracy=0.5869, over 4925.00 frames. ], tot_loss[loss=3.546, NarTop10Accuracy=0.6077, over 6014.68 frames. ], batch size: 6, lr: 7.15e-03 +2024-08-06 09:21:36,981 INFO [trainer.py:765] (3/8) Epoch 12, batch 1400, train_loss[loss=3.35, NarTop10Accuracy=0.6485, over 6000.00 frames. ], tot_loss[loss=3.556, NarTop10Accuracy=0.6055, over 6037.85 frames. ], batch size: 11, lr: 7.13e-03 +2024-08-06 09:22:09,920 INFO [trainer.py:765] (3/8) Epoch 12, batch 1500, train_loss[loss=3.635, NarTop10Accuracy=0.5956, over 6232.00 frames. ], tot_loss[loss=3.555, NarTop10Accuracy=0.6057, over 5991.58 frames. ], batch size: 49, lr: 7.12e-03 +2024-08-06 09:22:38,026 INFO [trainer.py:765] (3/8) Epoch 12, batch 1600, train_loss[loss=3.627, NarTop10Accuracy=0.5908, over 7051.00 frames. ], tot_loss[loss=3.559, NarTop10Accuracy=0.6048, over 5943.65 frames. ], batch size: 22, lr: 7.11e-03 +2024-08-06 09:22:39,860 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 09:22:49,889 INFO [trainer.py:811] (3/8) Epoch 12, validation: loss=3.364, NarTop10Accuracy=0.6481, over 1907754.00 frames. +2024-08-06 09:22:49,889 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 09:22:50,413 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.796e+02 1.978e+02 2.176e+02 4.603e+02, threshold=3.957e+02, percent-clipped=0.2 +2024-08-06 09:23:14,785 INFO [trainer.py:765] (3/8) Epoch 12, batch 1700, train_loss[loss=3.462, NarTop10Accuracy=0.6206, over 6914.00 frames. ], tot_loss[loss=3.552, NarTop10Accuracy=0.6057, over 5942.57 frames. ], batch size: 14, lr: 7.10e-03 +2024-08-06 09:23:41,386 INFO [trainer.py:765] (3/8) Epoch 12, batch 1800, train_loss[loss=3.429, NarTop10Accuracy=0.6364, over 7046.00 frames. ], tot_loss[loss=3.551, NarTop10Accuracy=0.6065, over 6000.47 frames. ], batch size: 22, lr: 7.09e-03 +2024-08-06 09:24:07,957 INFO [trainer.py:765] (3/8) Epoch 12, batch 1900, train_loss[loss=3.602, NarTop10Accuracy=0.5982, over 6121.00 frames. ], tot_loss[loss=3.562, NarTop10Accuracy=0.6043, over 6034.73 frames. ], batch size: 48, lr: 7.08e-03 +2024-08-06 09:24:33,618 INFO [trainer.py:765] (3/8) Epoch 12, batch 2000, train_loss[loss=3.581, NarTop10Accuracy=0.6006, over 6094.00 frames. ], tot_loss[loss=3.56, NarTop10Accuracy=0.6043, over 6021.83 frames. ], batch size: 49, lr: 7.07e-03 +2024-08-06 09:24:59,038 INFO [trainer.py:765] (3/8) Epoch 12, batch 2100, train_loss[loss=3.416, NarTop10Accuracy=0.6226, over 4803.00 frames. ], tot_loss[loss=3.547, NarTop10Accuracy=0.6068, over 6000.46 frames. ], batch size: 5, lr: 7.05e-03 +2024-08-06 09:25:24,509 INFO [trainer.py:765] (3/8) Epoch 12, batch 2200, train_loss[loss=3.41, NarTop10Accuracy=0.6314, over 7174.00 frames. ], tot_loss[loss=3.555, NarTop10Accuracy=0.6055, over 6048.57 frames. ], batch size: 30, lr: 7.04e-03 +2024-08-06 09:25:49,926 INFO [trainer.py:765] (3/8) Epoch 12, batch 2300, train_loss[loss=3.923, NarTop10Accuracy=0.5356, over 5688.00 frames. ], tot_loss[loss=3.563, NarTop10Accuracy=0.6044, over 6047.48 frames. ], batch size: 9, lr: 7.03e-03 +2024-08-06 09:26:14,656 INFO [trainer.py:765] (3/8) Epoch 12, batch 2400, train_loss[loss=3.48, NarTop10Accuracy=0.6309, over 5230.00 frames. ], tot_loss[loss=3.563, NarTop10Accuracy=0.6042, over 5880.14 frames. ], batch size: 7, lr: 7.02e-03 +2024-08-06 09:26:38,154 INFO [trainer.py:765] (3/8) Epoch 12, batch 2500, train_loss[loss=3.607, NarTop10Accuracy=0.5919, over 4991.00 frames. ], tot_loss[loss=3.546, NarTop10Accuracy=0.6066, over 5549.46 frames. ], batch size: 6, lr: 7.01e-03 +2024-08-06 09:26:59,508 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 09:28:03,611 INFO [trainer.py:765] (3/8) Epoch 13, batch 100, train_loss[loss=3.444, NarTop10Accuracy=0.63, over 7113.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.6112, over 2378.13 frames. ], batch size: 30, lr: 6.72e-03 +2024-08-06 09:28:36,905 INFO [trainer.py:765] (3/8) Epoch 13, batch 200, train_loss[loss=3.278, NarTop10Accuracy=0.6647, over 6952.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6159, over 3871.25 frames. ], batch size: 17, lr: 6.71e-03 +2024-08-06 09:29:07,170 INFO [trainer.py:765] (3/8) Epoch 13, batch 300, train_loss[loss=3.323, NarTop10Accuracy=0.6568, over 6882.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.618, over 4673.81 frames. ], batch size: 21, lr: 6.70e-03 +2024-08-06 09:29:41,038 INFO [trainer.py:765] (3/8) Epoch 13, batch 400, train_loss[loss=3.176, NarTop10Accuracy=0.6774, over 5134.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6209, over 5126.86 frames. ], batch size: 7, lr: 6.69e-03 +2024-08-06 09:30:13,730 INFO [trainer.py:765] (3/8) Epoch 13, batch 500, train_loss[loss=3.741, NarTop10Accuracy=0.5651, over 6260.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6196, over 5401.09 frames. ], batch size: 11, lr: 6.68e-03 +2024-08-06 09:30:47,198 INFO [trainer.py:765] (3/8) Epoch 13, batch 600, train_loss[loss=3.452, NarTop10Accuracy=0.6376, over 5877.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.6177, over 5688.18 frames. ], batch size: 9, lr: 6.67e-03 +2024-08-06 09:31:23,821 INFO [trainer.py:765] (3/8) Epoch 13, batch 700, train_loss[loss=3.589, NarTop10Accuracy=0.5951, over 5257.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6158, over 5758.87 frames. ], batch size: 6, lr: 6.66e-03 +2024-08-06 09:31:58,208 INFO [trainer.py:765] (3/8) Epoch 13, batch 800, train_loss[loss=3.475, NarTop10Accuracy=0.6221, over 5248.00 frames. ], tot_loss[loss=3.51, NarTop10Accuracy=0.6155, over 5803.20 frames. ], batch size: 6, lr: 6.65e-03 +2024-08-06 09:32:29,193 INFO [trainer.py:765] (3/8) Epoch 13, batch 900, train_loss[loss=3.471, NarTop10Accuracy=0.6253, over 6364.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6149, over 5809.29 frames. ], batch size: 13, lr: 6.64e-03 +2024-08-06 09:33:03,133 INFO [trainer.py:765] (3/8) Epoch 13, batch 1000, train_loss[loss=3.655, NarTop10Accuracy=0.5832, over 6333.00 frames. ], tot_loss[loss=3.521, NarTop10Accuracy=0.6132, over 5920.48 frames. ], batch size: 13, lr: 6.63e-03 +2024-08-06 09:33:14,219 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 09:33:24,525 INFO [trainer.py:811] (3/8) Epoch 13, validation: loss=3.389, NarTop10Accuracy=0.6428, over 1907754.00 frames. +2024-08-06 09:33:24,526 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 09:33:25,133 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.457e+02 1.794e+02 1.964e+02 2.145e+02 3.608e+02, threshold=3.929e+02, percent-clipped=0.0 +2024-08-06 09:33:51,715 INFO [trainer.py:765] (3/8) Epoch 13, batch 1100, train_loss[loss=3.661, NarTop10Accuracy=0.5783, over 6848.00 frames. ], tot_loss[loss=3.542, NarTop10Accuracy=0.6086, over 5943.44 frames. ], batch size: 17, lr: 6.62e-03 +2024-08-06 09:34:25,486 INFO [trainer.py:765] (3/8) Epoch 13, batch 1200, train_loss[loss=3.456, NarTop10Accuracy=0.6223, over 7235.00 frames. ], tot_loss[loss=3.527, NarTop10Accuracy=0.6118, over 5946.47 frames. ], batch size: 30, lr: 6.61e-03 +2024-08-06 09:35:05,085 INFO [trainer.py:765] (3/8) Epoch 13, batch 1300, train_loss[loss=3.462, NarTop10Accuracy=0.6353, over 4163.00 frames. ], tot_loss[loss=3.529, NarTop10Accuracy=0.611, over 6002.23 frames. ], batch size: 5, lr: 6.60e-03 +2024-08-06 09:35:36,404 INFO [trainer.py:765] (3/8) Epoch 13, batch 1400, train_loss[loss=3.643, NarTop10Accuracy=0.5972, over 6092.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.6082, over 6035.19 frames. ], batch size: 11, lr: 6.59e-03 +2024-08-06 09:36:07,320 INFO [trainer.py:765] (3/8) Epoch 13, batch 1500, train_loss[loss=3.769, NarTop10Accuracy=0.5721, over 6199.00 frames. ], tot_loss[loss=3.558, NarTop10Accuracy=0.6052, over 5974.27 frames. ], batch size: 49, lr: 6.58e-03 +2024-08-06 09:36:35,389 INFO [trainer.py:765] (3/8) Epoch 13, batch 1600, train_loss[loss=3.506, NarTop10Accuracy=0.6123, over 7117.00 frames. ], tot_loss[loss=3.554, NarTop10Accuracy=0.6061, over 5963.00 frames. ], batch size: 22, lr: 6.57e-03 +2024-08-06 09:37:02,143 INFO [trainer.py:765] (3/8) Epoch 13, batch 1700, train_loss[loss=3.656, NarTop10Accuracy=0.5845, over 6241.00 frames. ], tot_loss[loss=3.55, NarTop10Accuracy=0.6068, over 5946.64 frames. ], batch size: 13, lr: 6.56e-03 +2024-08-06 09:37:28,778 INFO [trainer.py:765] (3/8) Epoch 13, batch 1800, train_loss[loss=3.427, NarTop10Accuracy=0.6302, over 7116.00 frames. ], tot_loss[loss=3.539, NarTop10Accuracy=0.6089, over 6015.57 frames. ], batch size: 22, lr: 6.55e-03 +2024-08-06 09:37:55,386 INFO [trainer.py:765] (3/8) Epoch 13, batch 1900, train_loss[loss=3.513, NarTop10Accuracy=0.6151, over 6165.00 frames. ], tot_loss[loss=3.552, NarTop10Accuracy=0.6066, over 6043.13 frames. ], batch size: 50, lr: 6.54e-03 +2024-08-06 09:38:21,123 INFO [trainer.py:765] (3/8) Epoch 13, batch 2000, train_loss[loss=3.631, NarTop10Accuracy=0.5854, over 5745.00 frames. ], tot_loss[loss=3.543, NarTop10Accuracy=0.6081, over 6019.06 frames. ], batch size: 51, lr: 6.53e-03 +2024-08-06 09:38:49,691 INFO [trainer.py:765] (3/8) Epoch 13, batch 2100, train_loss[loss=3.397, NarTop10Accuracy=0.6347, over 3922.00 frames. ], tot_loss[loss=3.536, NarTop10Accuracy=0.6095, over 5997.26 frames. ], batch size: 4, lr: 6.52e-03 +2024-08-06 09:39:15,107 INFO [trainer.py:765] (3/8) Epoch 13, batch 2200, train_loss[loss=3.62, NarTop10Accuracy=0.5845, over 7168.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6093, over 6033.47 frames. ], batch size: 30, lr: 6.51e-03 +2024-08-06 09:39:40,618 INFO [trainer.py:765] (3/8) Epoch 13, batch 2300, train_loss[loss=3.384, NarTop10Accuracy=0.6491, over 5773.00 frames. ], tot_loss[loss=3.541, NarTop10Accuracy=0.6088, over 6061.58 frames. ], batch size: 9, lr: 6.50e-03 +2024-08-06 09:40:05,343 INFO [trainer.py:765] (3/8) Epoch 13, batch 2400, train_loss[loss=3.547, NarTop10Accuracy=0.6033, over 5230.00 frames. ], tot_loss[loss=3.552, NarTop10Accuracy=0.6066, over 5880.50 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 09:40:28,767 INFO [trainer.py:765] (3/8) Epoch 13, batch 2500, train_loss[loss=3.354, NarTop10Accuracy=0.6474, over 4980.00 frames. ], tot_loss[loss=3.527, NarTop10Accuracy=0.6114, over 5537.42 frames. ], batch size: 6, lr: 6.48e-03 +2024-08-06 09:40:49,969 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 09:41:48,980 INFO [trainer.py:765] (3/8) Epoch 14, batch 100, train_loss[loss=3.364, NarTop10Accuracy=0.639, over 7407.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6195, over 2356.39 frames. ], batch size: 31, lr: 6.24e-03 +2024-08-06 09:42:22,938 INFO [trainer.py:765] (3/8) Epoch 14, batch 200, train_loss[loss=3.587, NarTop10Accuracy=0.5993, over 6859.00 frames. ], tot_loss[loss=3.474, NarTop10Accuracy=0.6233, over 3861.42 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 09:42:58,415 INFO [trainer.py:765] (3/8) Epoch 14, batch 300, train_loss[loss=3.774, NarTop10Accuracy=0.5629, over 7363.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6182, over 4687.54 frames. ], batch size: 23, lr: 6.22e-03 +2024-08-06 09:43:30,440 INFO [trainer.py:765] (3/8) Epoch 14, batch 400, train_loss[loss=3.169, NarTop10Accuracy=0.6706, over 5130.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6203, over 5131.72 frames. ], batch size: 7, lr: 6.21e-03 +2024-08-06 09:43:42,487 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 09:43:53,651 INFO [trainer.py:811] (3/8) Epoch 14, validation: loss=3.321, NarTop10Accuracy=0.6566, over 1907754.00 frames. +2024-08-06 09:43:53,651 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 09:43:54,211 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.805e+02 1.968e+02 2.158e+02 4.264e+02, threshold=3.936e+02, percent-clipped=0.2 +2024-08-06 09:44:11,700 INFO [trainer.py:765] (3/8) Epoch 14, batch 500, train_loss[loss=3.577, NarTop10Accuracy=0.6075, over 6032.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6194, over 5410.34 frames. ], batch size: 11, lr: 6.20e-03 +2024-08-06 09:44:47,166 INFO [trainer.py:765] (3/8) Epoch 14, batch 600, train_loss[loss=3.558, NarTop10Accuracy=0.6105, over 5714.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6195, over 5678.45 frames. ], batch size: 9, lr: 6.19e-03 +2024-08-06 09:45:19,804 INFO [trainer.py:765] (3/8) Epoch 14, batch 700, train_loss[loss=3.612, NarTop10Accuracy=0.5999, over 5114.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6209, over 5739.14 frames. ], batch size: 6, lr: 6.18e-03 +2024-08-06 09:45:58,435 INFO [trainer.py:765] (3/8) Epoch 14, batch 800, train_loss[loss=3.687, NarTop10Accuracy=0.5833, over 5017.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6201, over 5807.18 frames. ], batch size: 6, lr: 6.17e-03 +2024-08-06 09:46:35,420 INFO [trainer.py:765] (3/8) Epoch 14, batch 900, train_loss[loss=3.677, NarTop10Accuracy=0.5854, over 6598.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.62, over 5834.58 frames. ], batch size: 14, lr: 6.17e-03 +2024-08-06 09:47:08,399 INFO [trainer.py:765] (3/8) Epoch 14, batch 1000, train_loss[loss=3.337, NarTop10Accuracy=0.654, over 6251.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6186, over 5932.37 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 09:47:47,663 INFO [trainer.py:765] (3/8) Epoch 14, batch 1100, train_loss[loss=3.207, NarTop10Accuracy=0.6682, over 6930.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.6177, over 5968.20 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 09:48:23,500 INFO [trainer.py:765] (3/8) Epoch 14, batch 1200, train_loss[loss=3.4, NarTop10Accuracy=0.6397, over 7365.00 frames. ], tot_loss[loss=3.499, NarTop10Accuracy=0.6176, over 5949.70 frames. ], batch size: 31, lr: 6.14e-03 +2024-08-06 09:48:57,971 INFO [trainer.py:765] (3/8) Epoch 14, batch 1300, train_loss[loss=3.227, NarTop10Accuracy=0.656, over 5231.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6187, over 6015.78 frames. ], batch size: 6, lr: 6.13e-03 +2024-08-06 09:49:30,234 INFO [trainer.py:765] (3/8) Epoch 14, batch 1400, train_loss[loss=3.328, NarTop10Accuracy=0.6449, over 6171.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.6148, over 6023.52 frames. ], batch size: 11, lr: 6.12e-03 +2024-08-06 09:50:07,531 INFO [trainer.py:765] (3/8) Epoch 14, batch 1500, train_loss[loss=3.654, NarTop10Accuracy=0.5881, over 6385.00 frames. ], tot_loss[loss=3.517, NarTop10Accuracy=0.6141, over 5963.31 frames. ], batch size: 49, lr: 6.11e-03 +2024-08-06 09:50:35,637 INFO [trainer.py:765] (3/8) Epoch 14, batch 1600, train_loss[loss=3.41, NarTop10Accuracy=0.6375, over 7284.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.615, over 5953.63 frames. ], batch size: 22, lr: 6.10e-03 +2024-08-06 09:51:02,378 INFO [trainer.py:765] (3/8) Epoch 14, batch 1700, train_loss[loss=3.16, NarTop10Accuracy=0.6842, over 6106.00 frames. ], tot_loss[loss=3.509, NarTop10Accuracy=0.6163, over 5956.05 frames. ], batch size: 13, lr: 6.10e-03 +2024-08-06 09:51:28,994 INFO [trainer.py:765] (3/8) Epoch 14, batch 1800, train_loss[loss=3.394, NarTop10Accuracy=0.6318, over 7261.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6182, over 6003.81 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 09:51:55,729 INFO [trainer.py:765] (3/8) Epoch 14, batch 1900, train_loss[loss=3.742, NarTop10Accuracy=0.5678, over 6591.00 frames. ], tot_loss[loss=3.523, NarTop10Accuracy=0.6137, over 6034.11 frames. ], batch size: 50, lr: 6.08e-03 +2024-08-06 09:52:21,503 INFO [trainer.py:765] (3/8) Epoch 14, batch 2000, train_loss[loss=3.521, NarTop10Accuracy=0.6076, over 6399.00 frames. ], tot_loss[loss=3.525, NarTop10Accuracy=0.6128, over 6011.84 frames. ], batch size: 50, lr: 6.07e-03 +2024-08-06 09:52:47,011 INFO [trainer.py:765] (3/8) Epoch 14, batch 2100, train_loss[loss=3.681, NarTop10Accuracy=0.5655, over 3901.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6146, over 5998.03 frames. ], batch size: 4, lr: 6.06e-03 +2024-08-06 09:53:12,480 INFO [trainer.py:765] (3/8) Epoch 14, batch 2200, train_loss[loss=3.371, NarTop10Accuracy=0.6459, over 7374.00 frames. ], tot_loss[loss=3.52, NarTop10Accuracy=0.6129, over 6032.19 frames. ], batch size: 31, lr: 6.05e-03 +2024-08-06 09:53:37,975 INFO [trainer.py:765] (3/8) Epoch 14, batch 2300, train_loss[loss=3.632, NarTop10Accuracy=0.5924, over 5878.00 frames. ], tot_loss[loss=3.539, NarTop10Accuracy=0.6091, over 6070.98 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 09:54:02,717 INFO [trainer.py:765] (3/8) Epoch 14, batch 2400, train_loss[loss=3.607, NarTop10Accuracy=0.6059, over 5098.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6091, over 5868.68 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 09:54:12,820 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 09:54:24,304 INFO [trainer.py:811] (3/8) Epoch 14, validation: loss=3.364, NarTop10Accuracy=0.6477, over 1907754.00 frames. +2024-08-06 09:54:24,304 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 09:54:24,752 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.815e+02 1.970e+02 2.165e+02 3.684e+02, threshold=3.939e+02, percent-clipped=0.0 +2024-08-06 09:54:37,620 INFO [trainer.py:765] (3/8) Epoch 14, batch 2500, train_loss[loss=3.903, NarTop10Accuracy=0.5445, over 4181.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6139, over 5517.81 frames. ], batch size: 5, lr: 6.03e-03 +2024-08-06 09:54:58,746 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 09:56:03,097 INFO [trainer.py:765] (3/8) Epoch 15, batch 100, train_loss[loss=3.419, NarTop10Accuracy=0.6411, over 7264.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6285, over 2382.61 frames. ], batch size: 30, lr: 5.81e-03 +2024-08-06 09:56:35,980 INFO [trainer.py:765] (3/8) Epoch 15, batch 200, train_loss[loss=3.428, NarTop10Accuracy=0.6381, over 6960.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6292, over 3884.67 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 09:57:07,653 INFO [trainer.py:765] (3/8) Epoch 15, batch 300, train_loss[loss=3.36, NarTop10Accuracy=0.6449, over 7292.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6295, over 4666.49 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 09:57:38,464 INFO [trainer.py:765] (3/8) Epoch 15, batch 400, train_loss[loss=3.458, NarTop10Accuracy=0.6359, over 5066.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6269, over 5116.96 frames. ], batch size: 7, lr: 5.79e-03 +2024-08-06 09:58:12,235 INFO [trainer.py:765] (3/8) Epoch 15, batch 500, train_loss[loss=3.511, NarTop10Accuracy=0.6168, over 6122.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6259, over 5399.14 frames. ], batch size: 11, lr: 5.78e-03 +2024-08-06 09:58:47,543 INFO [trainer.py:765] (3/8) Epoch 15, batch 600, train_loss[loss=3.253, NarTop10Accuracy=0.6544, over 5875.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.6238, over 5675.32 frames. ], batch size: 9, lr: 5.77e-03 +2024-08-06 09:59:17,062 INFO [trainer.py:765] (3/8) Epoch 15, batch 700, train_loss[loss=3.337, NarTop10Accuracy=0.6349, over 5026.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6223, over 5731.11 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 09:59:55,588 INFO [trainer.py:765] (3/8) Epoch 15, batch 800, train_loss[loss=3.489, NarTop10Accuracy=0.6122, over 5148.00 frames. ], tot_loss[loss=3.48, NarTop10Accuracy=0.6218, over 5797.14 frames. ], batch size: 6, lr: 5.76e-03 +2024-08-06 10:00:32,024 INFO [trainer.py:765] (3/8) Epoch 15, batch 900, train_loss[loss=3.478, NarTop10Accuracy=0.6189, over 6465.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6219, over 5823.54 frames. ], batch size: 13, lr: 5.75e-03 +2024-08-06 10:01:05,538 INFO [trainer.py:765] (3/8) Epoch 15, batch 1000, train_loss[loss=3.41, NarTop10Accuracy=0.6402, over 6179.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6228, over 5926.47 frames. ], batch size: 13, lr: 5.74e-03 +2024-08-06 10:01:45,154 INFO [trainer.py:765] (3/8) Epoch 15, batch 1100, train_loss[loss=3.507, NarTop10Accuracy=0.611, over 7127.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6196, over 5950.83 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 10:02:18,756 INFO [trainer.py:765] (3/8) Epoch 15, batch 1200, train_loss[loss=3.792, NarTop10Accuracy=0.558, over 7048.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6201, over 5945.76 frames. ], batch size: 30, lr: 5.73e-03 +2024-08-06 10:02:51,921 INFO [trainer.py:765] (3/8) Epoch 15, batch 1300, train_loss[loss=3.463, NarTop10Accuracy=0.6238, over 5063.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.619, over 6019.14 frames. ], batch size: 6, lr: 5.72e-03 +2024-08-06 10:03:25,435 INFO [trainer.py:765] (3/8) Epoch 15, batch 1400, train_loss[loss=3.713, NarTop10Accuracy=0.5688, over 6138.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6182, over 6039.80 frames. ], batch size: 11, lr: 5.71e-03 +2024-08-06 10:03:59,042 INFO [trainer.py:765] (3/8) Epoch 15, batch 1500, train_loss[loss=3.511, NarTop10Accuracy=0.6206, over 6315.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6195, over 5952.06 frames. ], batch size: 52, lr: 5.71e-03 +2024-08-06 10:04:27,107 INFO [trainer.py:765] (3/8) Epoch 15, batch 1600, train_loss[loss=3.589, NarTop10Accuracy=0.5921, over 7073.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6215, over 5944.34 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 10:04:53,907 INFO [trainer.py:765] (3/8) Epoch 15, batch 1700, train_loss[loss=3.449, NarTop10Accuracy=0.6263, over 6082.00 frames. ], tot_loss[loss=3.48, NarTop10Accuracy=0.6209, over 5922.16 frames. ], batch size: 13, lr: 5.69e-03 +2024-08-06 10:05:20,728 INFO [trainer.py:765] (3/8) Epoch 15, batch 1800, train_loss[loss=3.717, NarTop10Accuracy=0.5714, over 7232.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6172, over 5980.13 frames. ], batch size: 22, lr: 5.68e-03 +2024-08-06 10:05:37,266 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 10:05:47,411 INFO [trainer.py:811] (3/8) Epoch 15, validation: loss=3.325, NarTop10Accuracy=0.6551, over 1907754.00 frames. +2024-08-06 10:05:47,412 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 10:05:47,919 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.835e+02 1.986e+02 2.156e+02 4.531e+02, threshold=3.972e+02, percent-clipped=0.1 +2024-08-06 10:05:57,569 INFO [trainer.py:765] (3/8) Epoch 15, batch 1900, train_loss[loss=3.702, NarTop10Accuracy=0.5717, over 6316.00 frames. ], tot_loss[loss=3.502, NarTop10Accuracy=0.6167, over 6022.81 frames. ], batch size: 52, lr: 5.68e-03 +2024-08-06 10:06:23,371 INFO [trainer.py:765] (3/8) Epoch 15, batch 2000, train_loss[loss=3.571, NarTop10Accuracy=0.6085, over 5744.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6174, over 6002.70 frames. ], batch size: 49, lr: 5.67e-03 +2024-08-06 10:06:48,758 INFO [trainer.py:765] (3/8) Epoch 15, batch 2100, train_loss[loss=3.141, NarTop10Accuracy=0.6838, over 3911.00 frames. ], tot_loss[loss=3.505, NarTop10Accuracy=0.6162, over 5983.31 frames. ], batch size: 4, lr: 5.66e-03 +2024-08-06 10:07:14,171 INFO [trainer.py:765] (3/8) Epoch 15, batch 2200, train_loss[loss=3.492, NarTop10Accuracy=0.6252, over 7704.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.6173, over 6011.71 frames. ], batch size: 31, lr: 5.65e-03 +2024-08-06 10:07:39,630 INFO [trainer.py:765] (3/8) Epoch 15, batch 2300, train_loss[loss=3.193, NarTop10Accuracy=0.6826, over 5705.00 frames. ], tot_loss[loss=3.509, NarTop10Accuracy=0.6157, over 6048.72 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 10:08:04,361 INFO [trainer.py:765] (3/8) Epoch 15, batch 2400, train_loss[loss=3.672, NarTop10Accuracy=0.5876, over 5108.00 frames. ], tot_loss[loss=3.513, NarTop10Accuracy=0.6152, over 5876.90 frames. ], batch size: 7, lr: 5.64e-03 +2024-08-06 10:08:27,713 INFO [trainer.py:765] (3/8) Epoch 15, batch 2500, train_loss[loss=3.822, NarTop10Accuracy=0.5547, over 5207.00 frames. ], tot_loss[loss=3.496, NarTop10Accuracy=0.6181, over 5535.65 frames. ], batch size: 6, lr: 5.63e-03 +2024-08-06 10:08:49,216 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 10:09:44,183 INFO [trainer.py:765] (3/8) Epoch 16, batch 100, train_loss[loss=3.461, NarTop10Accuracy=0.6235, over 7028.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6316, over 2360.75 frames. ], batch size: 30, lr: 5.44e-03 +2024-08-06 10:10:23,207 INFO [trainer.py:765] (3/8) Epoch 16, batch 200, train_loss[loss=3.419, NarTop10Accuracy=0.6299, over 6813.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6284, over 3863.49 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 10:10:58,841 INFO [trainer.py:765] (3/8) Epoch 16, batch 300, train_loss[loss=3.389, NarTop10Accuracy=0.6482, over 7337.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6293, over 4658.03 frames. ], batch size: 23, lr: 5.43e-03 +2024-08-06 10:11:29,595 INFO [trainer.py:765] (3/8) Epoch 16, batch 400, train_loss[loss=3.338, NarTop10Accuracy=0.6533, over 4961.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6274, over 5115.86 frames. ], batch size: 7, lr: 5.42e-03 +2024-08-06 10:12:02,297 INFO [trainer.py:765] (3/8) Epoch 16, batch 500, train_loss[loss=3.601, NarTop10Accuracy=0.602, over 6195.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6284, over 5400.39 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 10:12:42,340 INFO [trainer.py:765] (3/8) Epoch 16, batch 600, train_loss[loss=3.352, NarTop10Accuracy=0.6436, over 5670.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6298, over 5672.32 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 10:13:13,950 INFO [trainer.py:765] (3/8) Epoch 16, batch 700, train_loss[loss=3.472, NarTop10Accuracy=0.6276, over 5034.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6259, over 5728.13 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:13:46,285 INFO [trainer.py:765] (3/8) Epoch 16, batch 800, train_loss[loss=3.451, NarTop10Accuracy=0.623, over 5109.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6255, over 5798.19 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:14:23,295 INFO [trainer.py:765] (3/8) Epoch 16, batch 900, train_loss[loss=3.456, NarTop10Accuracy=0.6257, over 6799.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6271, over 5825.74 frames. ], batch size: 14, lr: 5.39e-03 +2024-08-06 10:15:00,059 INFO [trainer.py:765] (3/8) Epoch 16, batch 1000, train_loss[loss=3.57, NarTop10Accuracy=0.6131, over 6318.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6245, over 5909.37 frames. ], batch size: 13, lr: 5.38e-03 +2024-08-06 10:15:30,509 INFO [trainer.py:765] (3/8) Epoch 16, batch 1100, train_loss[loss=3.471, NarTop10Accuracy=0.6229, over 6927.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6223, over 5960.05 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 10:16:11,384 INFO [trainer.py:765] (3/8) Epoch 16, batch 1200, train_loss[loss=3.492, NarTop10Accuracy=0.6129, over 7200.00 frames. ], tot_loss[loss=3.465, NarTop10Accuracy=0.6239, over 5948.22 frames. ], batch size: 30, lr: 5.37e-03 +2024-08-06 10:16:39,396 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 10:16:49,676 INFO [trainer.py:811] (3/8) Epoch 16, validation: loss=3.375, NarTop10Accuracy=0.6455, over 1907754.00 frames. +2024-08-06 10:16:49,676 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 10:16:52,482 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 1.814e+02 1.975e+02 2.151e+02 4.776e+02, threshold=3.950e+02, percent-clipped=0.2 +2024-08-06 10:16:58,041 INFO [trainer.py:765] (3/8) Epoch 16, batch 1300, train_loss[loss=3.362, NarTop10Accuracy=0.6214, over 5048.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6232, over 6008.00 frames. ], batch size: 6, lr: 5.36e-03 +2024-08-06 10:17:29,374 INFO [trainer.py:765] (3/8) Epoch 16, batch 1400, train_loss[loss=3.475, NarTop10Accuracy=0.6266, over 6184.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6226, over 6021.88 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 10:18:02,355 INFO [trainer.py:765] (3/8) Epoch 16, batch 1500, train_loss[loss=3.524, NarTop10Accuracy=0.6088, over 6110.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6223, over 5985.14 frames. ], batch size: 48, lr: 5.35e-03 +2024-08-06 10:18:30,468 INFO [trainer.py:765] (3/8) Epoch 16, batch 1600, train_loss[loss=3.652, NarTop10Accuracy=0.5804, over 7348.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6202, over 5975.03 frames. ], batch size: 22, lr: 5.34e-03 +2024-08-06 10:18:57,271 INFO [trainer.py:765] (3/8) Epoch 16, batch 1700, train_loss[loss=3.785, NarTop10Accuracy=0.5633, over 6259.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6217, over 5962.31 frames. ], batch size: 13, lr: 5.34e-03 +2024-08-06 10:19:23,977 INFO [trainer.py:765] (3/8) Epoch 16, batch 1800, train_loss[loss=3.767, NarTop10Accuracy=0.5674, over 7216.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.619, over 6022.42 frames. ], batch size: 22, lr: 5.33e-03 +2024-08-06 10:19:50,771 INFO [trainer.py:765] (3/8) Epoch 16, batch 1900, train_loss[loss=3.687, NarTop10Accuracy=0.579, over 6060.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6195, over 6041.69 frames. ], batch size: 49, lr: 5.32e-03 +2024-08-06 10:20:16,600 INFO [trainer.py:765] (3/8) Epoch 16, batch 2000, train_loss[loss=3.44, NarTop10Accuracy=0.6311, over 5679.00 frames. ], tot_loss[loss=3.502, NarTop10Accuracy=0.6174, over 6017.94 frames. ], batch size: 49, lr: 5.32e-03 +2024-08-06 10:20:42,158 INFO [trainer.py:765] (3/8) Epoch 16, batch 2100, train_loss[loss=3.366, NarTop10Accuracy=0.6437, over 3920.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.6163, over 6000.17 frames. ], batch size: 4, lr: 5.31e-03 +2024-08-06 10:21:07,650 INFO [trainer.py:765] (3/8) Epoch 16, batch 2200, train_loss[loss=3.42, NarTop10Accuracy=0.6293, over 7536.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6172, over 6035.47 frames. ], batch size: 31, lr: 5.30e-03 +2024-08-06 10:21:36,080 INFO [trainer.py:765] (3/8) Epoch 16, batch 2300, train_loss[loss=3.582, NarTop10Accuracy=0.594, over 5899.00 frames. ], tot_loss[loss=3.505, NarTop10Accuracy=0.6164, over 6059.83 frames. ], batch size: 9, lr: 5.30e-03 +2024-08-06 10:22:00,906 INFO [trainer.py:765] (3/8) Epoch 16, batch 2400, train_loss[loss=3.18, NarTop10Accuracy=0.6867, over 5076.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.6167, over 5862.48 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 10:22:24,289 INFO [trainer.py:765] (3/8) Epoch 16, batch 2500, train_loss[loss=3.145, NarTop10Accuracy=0.6792, over 5072.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6224, over 5524.43 frames. ], batch size: 6, lr: 5.28e-03 +2024-08-06 10:22:45,707 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 10:23:45,727 INFO [trainer.py:765] (3/8) Epoch 17, batch 100, train_loss[loss=3.436, NarTop10Accuracy=0.6263, over 7223.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6332, over 2373.52 frames. ], batch size: 30, lr: 5.12e-03 +2024-08-06 10:24:19,033 INFO [trainer.py:765] (3/8) Epoch 17, batch 200, train_loss[loss=3.312, NarTop10Accuracy=0.666, over 6929.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.632, over 3873.00 frames. ], batch size: 17, lr: 5.11e-03 +2024-08-06 10:24:53,441 INFO [trainer.py:765] (3/8) Epoch 17, batch 300, train_loss[loss=3.601, NarTop10Accuracy=0.5902, over 7236.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6299, over 4674.80 frames. ], batch size: 22, lr: 5.10e-03 +2024-08-06 10:25:28,013 INFO [trainer.py:765] (3/8) Epoch 17, batch 400, train_loss[loss=3.605, NarTop10Accuracy=0.5897, over 5122.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6305, over 5125.88 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 10:25:58,606 INFO [trainer.py:765] (3/8) Epoch 17, batch 500, train_loss[loss=3.467, NarTop10Accuracy=0.6109, over 6078.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6317, over 5397.74 frames. ], batch size: 11, lr: 5.09e-03 +2024-08-06 10:26:29,756 INFO [trainer.py:765] (3/8) Epoch 17, batch 600, train_loss[loss=3.698, NarTop10Accuracy=0.577, over 5904.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6316, over 5667.79 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 10:27:07,498 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 10:27:17,547 INFO [trainer.py:811] (3/8) Epoch 17, validation: loss=3.327, NarTop10Accuracy=0.6554, over 1907754.00 frames. +2024-08-06 10:27:17,548 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 10:27:18,066 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 1.825e+02 1.985e+02 2.150e+02 4.169e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 10:27:18,071 INFO [trainer.py:765] (3/8) Epoch 17, batch 700, train_loss[loss=3.141, NarTop10Accuracy=0.6872, over 4978.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6271, over 5743.65 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 10:27:49,841 INFO [trainer.py:765] (3/8) Epoch 17, batch 800, train_loss[loss=3.368, NarTop10Accuracy=0.6443, over 5022.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6282, over 5801.04 frames. ], batch size: 6, lr: 5.07e-03 +2024-08-06 10:28:24,838 INFO [trainer.py:765] (3/8) Epoch 17, batch 900, train_loss[loss=3.373, NarTop10Accuracy=0.6451, over 6253.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6281, over 5838.19 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 10:28:59,683 INFO [trainer.py:765] (3/8) Epoch 17, batch 1000, train_loss[loss=3.45, NarTop10Accuracy=0.6237, over 6281.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6283, over 5930.93 frames. ], batch size: 13, lr: 5.06e-03 +2024-08-06 10:29:36,659 INFO [trainer.py:765] (3/8) Epoch 17, batch 1100, train_loss[loss=3.296, NarTop10Accuracy=0.66, over 6828.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6248, over 5943.13 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 10:30:08,242 INFO [trainer.py:765] (3/8) Epoch 17, batch 1200, train_loss[loss=3.446, NarTop10Accuracy=0.6326, over 7050.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6258, over 5937.37 frames. ], batch size: 30, lr: 5.05e-03 +2024-08-06 10:30:47,101 INFO [trainer.py:765] (3/8) Epoch 17, batch 1300, train_loss[loss=3.369, NarTop10Accuracy=0.6442, over 5023.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6248, over 6023.60 frames. ], batch size: 6, lr: 5.04e-03 +2024-08-06 10:31:20,893 INFO [trainer.py:765] (3/8) Epoch 17, batch 1400, train_loss[loss=3.394, NarTop10Accuracy=0.6345, over 6096.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6242, over 6035.39 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 10:31:51,401 INFO [trainer.py:765] (3/8) Epoch 17, batch 1500, train_loss[loss=3.572, NarTop10Accuracy=0.6159, over 5896.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6231, over 5969.55 frames. ], batch size: 49, lr: 5.03e-03 +2024-08-06 10:32:19,400 INFO [trainer.py:765] (3/8) Epoch 17, batch 1600, train_loss[loss=3.472, NarTop10Accuracy=0.6244, over 7376.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6219, over 5949.88 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 10:32:50,394 INFO [trainer.py:765] (3/8) Epoch 17, batch 1700, train_loss[loss=3.558, NarTop10Accuracy=0.609, over 6271.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6206, over 5937.75 frames. ], batch size: 13, lr: 5.02e-03 +2024-08-06 10:33:17,035 INFO [trainer.py:765] (3/8) Epoch 17, batch 1800, train_loss[loss=3.639, NarTop10Accuracy=0.5931, over 7087.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6196, over 6003.59 frames. ], batch size: 22, lr: 5.02e-03 +2024-08-06 10:33:43,597 INFO [trainer.py:765] (3/8) Epoch 17, batch 1900, train_loss[loss=3.956, NarTop10Accuracy=0.5225, over 5892.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6201, over 6036.17 frames. ], batch size: 49, lr: 5.01e-03 +2024-08-06 10:34:09,287 INFO [trainer.py:765] (3/8) Epoch 17, batch 2000, train_loss[loss=3.94, NarTop10Accuracy=0.5275, over 6341.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6213, over 6020.53 frames. ], batch size: 50, lr: 5.00e-03 +2024-08-06 10:34:34,801 INFO [trainer.py:765] (3/8) Epoch 17, batch 2100, train_loss[loss=3.45, NarTop10Accuracy=0.6183, over 3884.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6193, over 6011.87 frames. ], batch size: 4, lr: 5.00e-03 +2024-08-06 10:35:00,245 INFO [trainer.py:765] (3/8) Epoch 17, batch 2200, train_loss[loss=3.342, NarTop10Accuracy=0.6533, over 7160.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.6233, over 6058.76 frames. ], batch size: 30, lr: 4.99e-03 +2024-08-06 10:35:25,733 INFO [trainer.py:765] (3/8) Epoch 17, batch 2300, train_loss[loss=3.336, NarTop10Accuracy=0.6489, over 5762.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6198, over 6069.58 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 10:35:50,526 INFO [trainer.py:765] (3/8) Epoch 17, batch 2400, train_loss[loss=3.342, NarTop10Accuracy=0.6424, over 5196.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6183, over 5889.74 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 10:36:14,105 INFO [trainer.py:765] (3/8) Epoch 17, batch 2500, train_loss[loss=3.542, NarTop10Accuracy=0.6086, over 4250.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.6228, over 5549.08 frames. ], batch size: 5, lr: 4.98e-03 +2024-08-06 10:36:35,042 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 10:37:32,052 INFO [trainer.py:765] (3/8) Epoch 18, batch 100, train_loss[loss=3.416, NarTop10Accuracy=0.629, over 7121.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6381, over 2362.27 frames. ], batch size: 30, lr: 4.83e-03 +2024-08-06 10:37:39,163 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 10:37:49,085 INFO [trainer.py:811] (3/8) Epoch 18, validation: loss=3.339, NarTop10Accuracy=0.6526, over 1907754.00 frames. +2024-08-06 10:37:49,085 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 10:37:49,685 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.841e+02 1.993e+02 2.161e+02 3.871e+02, threshold=3.985e+02, percent-clipped=0.0 +2024-08-06 10:38:18,144 INFO [trainer.py:765] (3/8) Epoch 18, batch 200, train_loss[loss=3.571, NarTop10Accuracy=0.5989, over 6752.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6343, over 3855.66 frames. ], batch size: 17, lr: 4.82e-03 +2024-08-06 10:38:50,198 INFO [trainer.py:765] (3/8) Epoch 18, batch 300, train_loss[loss=3.518, NarTop10Accuracy=0.6123, over 7156.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6357, over 4679.46 frames. ], batch size: 22, lr: 4.81e-03 +2024-08-06 10:39:23,743 INFO [trainer.py:765] (3/8) Epoch 18, batch 400, train_loss[loss=3.507, NarTop10Accuracy=0.6125, over 5183.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6327, over 5127.15 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 10:39:54,103 INFO [trainer.py:765] (3/8) Epoch 18, batch 500, train_loss[loss=3.411, NarTop10Accuracy=0.6456, over 6079.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6336, over 5417.76 frames. ], batch size: 11, lr: 4.80e-03 +2024-08-06 10:40:28,526 INFO [trainer.py:765] (3/8) Epoch 18, batch 600, train_loss[loss=3.5, NarTop10Accuracy=0.6251, over 5739.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.634, over 5685.76 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 10:41:02,143 INFO [trainer.py:765] (3/8) Epoch 18, batch 700, train_loss[loss=3.338, NarTop10Accuracy=0.6481, over 5138.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6312, over 5755.40 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:41:38,519 INFO [trainer.py:765] (3/8) Epoch 18, batch 800, train_loss[loss=3.336, NarTop10Accuracy=0.6468, over 5059.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.633, over 5793.01 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:42:12,611 INFO [trainer.py:765] (3/8) Epoch 18, batch 900, train_loss[loss=3.6, NarTop10Accuracy=0.5951, over 6205.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6303, over 5814.43 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:42:46,702 INFO [trainer.py:765] (3/8) Epoch 18, batch 1000, train_loss[loss=3.146, NarTop10Accuracy=0.6825, over 6156.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6307, over 5908.15 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:43:24,182 INFO [trainer.py:765] (3/8) Epoch 18, batch 1100, train_loss[loss=3.889, NarTop10Accuracy=0.5424, over 6769.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6257, over 5942.28 frames. ], batch size: 17, lr: 4.77e-03 +2024-08-06 10:44:02,363 INFO [trainer.py:765] (3/8) Epoch 18, batch 1200, train_loss[loss=3.469, NarTop10Accuracy=0.6244, over 7487.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.627, over 5934.61 frames. ], batch size: 31, lr: 4.77e-03 +2024-08-06 10:44:35,920 INFO [trainer.py:765] (3/8) Epoch 18, batch 1300, train_loss[loss=3.27, NarTop10Accuracy=0.669, over 5154.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6285, over 6008.21 frames. ], batch size: 6, lr: 4.76e-03 +2024-08-06 10:45:10,238 INFO [trainer.py:765] (3/8) Epoch 18, batch 1400, train_loss[loss=3.303, NarTop10Accuracy=0.663, over 6101.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6264, over 6039.40 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 10:45:40,975 INFO [trainer.py:765] (3/8) Epoch 18, batch 1500, train_loss[loss=3.782, NarTop10Accuracy=0.5631, over 6210.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6255, over 5973.58 frames. ], batch size: 49, lr: 4.75e-03 +2024-08-06 10:46:09,055 INFO [trainer.py:765] (3/8) Epoch 18, batch 1600, train_loss[loss=3.289, NarTop10Accuracy=0.6592, over 7198.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6244, over 5946.22 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 10:46:35,858 INFO [trainer.py:765] (3/8) Epoch 18, batch 1700, train_loss[loss=3.894, NarTop10Accuracy=0.5421, over 6268.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6265, over 5939.64 frames. ], batch size: 13, lr: 4.74e-03 +2024-08-06 10:47:02,438 INFO [trainer.py:765] (3/8) Epoch 18, batch 1800, train_loss[loss=3.361, NarTop10Accuracy=0.6439, over 6957.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6231, over 6002.21 frames. ], batch size: 22, lr: 4.74e-03 +2024-08-06 10:47:29,093 INFO [trainer.py:765] (3/8) Epoch 18, batch 1900, train_loss[loss=3.74, NarTop10Accuracy=0.5768, over 6018.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6223, over 6028.81 frames. ], batch size: 49, lr: 4.73e-03 +2024-08-06 10:47:54,884 INFO [trainer.py:765] (3/8) Epoch 18, batch 2000, train_loss[loss=3.534, NarTop10Accuracy=0.6127, over 5847.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6217, over 6012.18 frames. ], batch size: 49, lr: 4.73e-03 +2024-08-06 10:48:20,370 INFO [trainer.py:765] (3/8) Epoch 18, batch 2100, train_loss[loss=3.293, NarTop10Accuracy=0.6534, over 3892.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6222, over 5994.24 frames. ], batch size: 4, lr: 4.72e-03 +2024-08-06 10:48:24,748 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 10:48:35,039 INFO [trainer.py:811] (3/8) Epoch 18, validation: loss=3.307, NarTop10Accuracy=0.6593, over 1907754.00 frames. +2024-08-06 10:48:35,040 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 10:48:35,534 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 1.855e+02 2.003e+02 2.193e+02 3.481e+02, threshold=4.005e+02, percent-clipped=0.0 +2024-08-06 10:48:56,095 INFO [trainer.py:765] (3/8) Epoch 18, batch 2200, train_loss[loss=3.462, NarTop10Accuracy=0.6321, over 7051.00 frames. ], tot_loss[loss=3.474, NarTop10Accuracy=0.6227, over 6040.18 frames. ], batch size: 30, lr: 4.72e-03 +2024-08-06 10:49:21,521 INFO [trainer.py:765] (3/8) Epoch 18, batch 2300, train_loss[loss=3.541, NarTop10Accuracy=0.6094, over 5722.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6224, over 6075.75 frames. ], batch size: 9, lr: 4.71e-03 +2024-08-06 10:49:46,256 INFO [trainer.py:765] (3/8) Epoch 18, batch 2400, train_loss[loss=3.45, NarTop10Accuracy=0.6265, over 5084.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6219, over 5883.96 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 10:50:09,708 INFO [trainer.py:765] (3/8) Epoch 18, batch 2500, train_loss[loss=3.36, NarTop10Accuracy=0.6509, over 5034.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6259, over 5542.23 frames. ], batch size: 6, lr: 4.70e-03 +2024-08-06 10:50:31,343 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 10:51:33,564 INFO [trainer.py:765] (3/8) Epoch 19, batch 100, train_loss[loss=3.358, NarTop10Accuracy=0.653, over 7290.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6411, over 2367.09 frames. ], batch size: 30, lr: 4.57e-03 +2024-08-06 10:52:06,164 INFO [trainer.py:765] (3/8) Epoch 19, batch 200, train_loss[loss=3.638, NarTop10Accuracy=0.5982, over 6774.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6401, over 3865.48 frames. ], batch size: 17, lr: 4.56e-03 +2024-08-06 10:52:40,031 INFO [trainer.py:765] (3/8) Epoch 19, batch 300, train_loss[loss=3.424, NarTop10Accuracy=0.6245, over 7066.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6393, over 4659.35 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 10:53:12,829 INFO [trainer.py:765] (3/8) Epoch 19, batch 400, train_loss[loss=3.224, NarTop10Accuracy=0.6754, over 5142.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.637, over 5134.68 frames. ], batch size: 7, lr: 4.55e-03 +2024-08-06 10:53:45,020 INFO [trainer.py:765] (3/8) Epoch 19, batch 500, train_loss[loss=3.423, NarTop10Accuracy=0.6459, over 6047.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6377, over 5407.92 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 10:54:18,601 INFO [trainer.py:765] (3/8) Epoch 19, batch 600, train_loss[loss=3.173, NarTop10Accuracy=0.679, over 5771.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6354, over 5672.30 frames. ], batch size: 9, lr: 4.54e-03 +2024-08-06 10:54:54,112 INFO [trainer.py:765] (3/8) Epoch 19, batch 700, train_loss[loss=3.082, NarTop10Accuracy=0.6712, over 5146.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6338, over 5739.67 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 10:55:29,925 INFO [trainer.py:765] (3/8) Epoch 19, batch 800, train_loss[loss=3.379, NarTop10Accuracy=0.646, over 4354.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6325, over 5792.71 frames. ], batch size: 5, lr: 4.53e-03 +2024-08-06 10:56:02,238 INFO [trainer.py:765] (3/8) Epoch 19, batch 900, train_loss[loss=3.68, NarTop10Accuracy=0.5804, over 6545.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.633, over 5811.88 frames. ], batch size: 14, lr: 4.53e-03 +2024-08-06 10:56:38,299 INFO [trainer.py:765] (3/8) Epoch 19, batch 1000, train_loss[loss=3.217, NarTop10Accuracy=0.6655, over 6251.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6307, over 5925.04 frames. ], batch size: 13, lr: 4.52e-03 +2024-08-06 10:57:15,187 INFO [trainer.py:765] (3/8) Epoch 19, batch 1100, train_loss[loss=3.372, NarTop10Accuracy=0.6517, over 6800.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6269, over 5956.96 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 10:57:46,665 INFO [trainer.py:765] (3/8) Epoch 19, batch 1200, train_loss[loss=3.306, NarTop10Accuracy=0.6637, over 7287.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6277, over 5961.64 frames. ], batch size: 30, lr: 4.51e-03 +2024-08-06 10:58:23,901 INFO [trainer.py:765] (3/8) Epoch 19, batch 1300, train_loss[loss=3.308, NarTop10Accuracy=0.6606, over 4236.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6266, over 6012.61 frames. ], batch size: 5, lr: 4.51e-03 +2024-08-06 10:58:58,029 INFO [trainer.py:765] (3/8) Epoch 19, batch 1400, train_loss[loss=3.431, NarTop10Accuracy=0.6356, over 6133.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6268, over 6022.35 frames. ], batch size: 11, lr: 4.50e-03 +2024-08-06 10:59:30,770 INFO [trainer.py:765] (3/8) Epoch 19, batch 1500, train_loss[loss=3.827, NarTop10Accuracy=0.5519, over 6484.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6289, over 5956.21 frames. ], batch size: 49, lr: 4.50e-03 +2024-08-06 10:59:40,832 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 10:59:50,899 INFO [trainer.py:811] (3/8) Epoch 19, validation: loss=3.276, NarTop10Accuracy=0.6653, over 1907754.00 frames. +2024-08-06 10:59:50,899 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 10:59:51,426 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.829e+02 1.984e+02 2.176e+02 3.542e+02, threshold=3.967e+02, percent-clipped=0.0 +2024-08-06 11:00:08,816 INFO [trainer.py:765] (3/8) Epoch 19, batch 1600, train_loss[loss=3.63, NarTop10Accuracy=0.5836, over 7163.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6286, over 5958.84 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:00:35,588 INFO [trainer.py:765] (3/8) Epoch 19, batch 1700, train_loss[loss=3.675, NarTop10Accuracy=0.5816, over 6335.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6281, over 5943.37 frames. ], batch size: 13, lr: 4.49e-03 +2024-08-06 11:01:02,258 INFO [trainer.py:765] (3/8) Epoch 19, batch 1800, train_loss[loss=3.308, NarTop10Accuracy=0.6639, over 7249.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6301, over 6000.77 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:01:28,930 INFO [trainer.py:765] (3/8) Epoch 19, batch 1900, train_loss[loss=3.679, NarTop10Accuracy=0.575, over 5964.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6266, over 6041.91 frames. ], batch size: 49, lr: 4.48e-03 +2024-08-06 11:01:54,633 INFO [trainer.py:765] (3/8) Epoch 19, batch 2000, train_loss[loss=3.541, NarTop10Accuracy=0.6076, over 6384.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6276, over 6013.57 frames. ], batch size: 50, lr: 4.48e-03 +2024-08-06 11:02:20,186 INFO [trainer.py:765] (3/8) Epoch 19, batch 2100, train_loss[loss=3.611, NarTop10Accuracy=0.6045, over 3992.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6274, over 5979.81 frames. ], batch size: 4, lr: 4.47e-03 +2024-08-06 11:02:45,694 INFO [trainer.py:765] (3/8) Epoch 19, batch 2200, train_loss[loss=3.285, NarTop10Accuracy=0.6623, over 7270.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6262, over 6025.34 frames. ], batch size: 30, lr: 4.47e-03 +2024-08-06 11:03:11,131 INFO [trainer.py:765] (3/8) Epoch 19, batch 2300, train_loss[loss=3.203, NarTop10Accuracy=0.6843, over 5853.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6265, over 6060.49 frames. ], batch size: 9, lr: 4.46e-03 +2024-08-06 11:03:35,951 INFO [trainer.py:765] (3/8) Epoch 19, batch 2400, train_loss[loss=3.454, NarTop10Accuracy=0.6404, over 5807.00 frames. ], tot_loss[loss=3.465, NarTop10Accuracy=0.6246, over 5880.47 frames. ], batch size: 8, lr: 4.46e-03 +2024-08-06 11:03:59,406 INFO [trainer.py:765] (3/8) Epoch 19, batch 2500, train_loss[loss=3.735, NarTop10Accuracy=0.5639, over 5049.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6306, over 5528.67 frames. ], batch size: 6, lr: 4.45e-03 +2024-08-06 11:04:23,800 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 11:05:26,561 INFO [trainer.py:765] (3/8) Epoch 20, batch 100, train_loss[loss=3.475, NarTop10Accuracy=0.6181, over 7070.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6389, over 2361.87 frames. ], batch size: 30, lr: 4.33e-03 +2024-08-06 11:05:57,409 INFO [trainer.py:765] (3/8) Epoch 20, batch 200, train_loss[loss=3.154, NarTop10Accuracy=0.6807, over 7135.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6394, over 3875.21 frames. ], batch size: 18, lr: 4.33e-03 +2024-08-06 11:06:30,634 INFO [trainer.py:765] (3/8) Epoch 20, batch 300, train_loss[loss=3.247, NarTop10Accuracy=0.6733, over 7130.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6397, over 4682.60 frames. ], batch size: 22, lr: 4.32e-03 +2024-08-06 11:07:06,396 INFO [trainer.py:765] (3/8) Epoch 20, batch 400, train_loss[loss=3.392, NarTop10Accuracy=0.641, over 5232.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6384, over 5129.03 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 11:07:38,166 INFO [trainer.py:765] (3/8) Epoch 20, batch 500, train_loss[loss=3.367, NarTop10Accuracy=0.6369, over 6212.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6399, over 5412.43 frames. ], batch size: 11, lr: 4.31e-03 +2024-08-06 11:08:11,568 INFO [trainer.py:765] (3/8) Epoch 20, batch 600, train_loss[loss=3.294, NarTop10Accuracy=0.6633, over 5788.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6403, over 5673.83 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 11:08:46,274 INFO [trainer.py:765] (3/8) Epoch 20, batch 700, train_loss[loss=3.408, NarTop10Accuracy=0.6368, over 5114.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6374, over 5748.56 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 11:09:23,425 INFO [trainer.py:765] (3/8) Epoch 20, batch 800, train_loss[loss=3.294, NarTop10Accuracy=0.6619, over 5165.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6354, over 5798.15 frames. ], batch size: 6, lr: 4.30e-03 +2024-08-06 11:09:53,513 INFO [trainer.py:765] (3/8) Epoch 20, batch 900, train_loss[loss=3.37, NarTop10Accuracy=0.6316, over 6555.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6331, over 5822.44 frames. ], batch size: 14, lr: 4.30e-03 +2024-08-06 11:10:12,199 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 11:10:23,738 INFO [trainer.py:811] (3/8) Epoch 20, validation: loss=3.279, NarTop10Accuracy=0.6658, over 1907754.00 frames. +2024-08-06 11:10:23,739 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 11:10:24,298 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.847e+02 2.007e+02 2.180e+02 4.417e+02, threshold=4.013e+02, percent-clipped=0.1 +2024-08-06 11:10:42,965 INFO [trainer.py:765] (3/8) Epoch 20, batch 1000, train_loss[loss=3.274, NarTop10Accuracy=0.6722, over 6653.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6343, over 5905.93 frames. ], batch size: 14, lr: 4.29e-03 +2024-08-06 11:11:21,022 INFO [trainer.py:765] (3/8) Epoch 20, batch 1100, train_loss[loss=3.211, NarTop10Accuracy=0.6752, over 6744.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6322, over 5931.64 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 11:11:55,394 INFO [trainer.py:765] (3/8) Epoch 20, batch 1200, train_loss[loss=3.301, NarTop10Accuracy=0.6572, over 6997.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6318, over 5933.84 frames. ], batch size: 30, lr: 4.28e-03 +2024-08-06 11:12:30,752 INFO [trainer.py:765] (3/8) Epoch 20, batch 1300, train_loss[loss=3.583, NarTop10Accuracy=0.5922, over 5060.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6324, over 6017.64 frames. ], batch size: 6, lr: 4.28e-03 +2024-08-06 11:13:10,292 INFO [trainer.py:765] (3/8) Epoch 20, batch 1400, train_loss[loss=3.639, NarTop10Accuracy=0.5936, over 6194.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6294, over 6037.41 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 11:13:38,990 INFO [trainer.py:765] (3/8) Epoch 20, batch 1500, train_loss[loss=3.446, NarTop10Accuracy=0.6296, over 5902.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6287, over 5978.26 frames. ], batch size: 48, lr: 4.27e-03 +2024-08-06 11:14:07,052 INFO [trainer.py:765] (3/8) Epoch 20, batch 1600, train_loss[loss=3.567, NarTop10Accuracy=0.6071, over 7179.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6281, over 5959.45 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 11:14:33,911 INFO [trainer.py:765] (3/8) Epoch 20, batch 1700, train_loss[loss=3.435, NarTop10Accuracy=0.6266, over 6652.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6304, over 5950.13 frames. ], batch size: 14, lr: 4.26e-03 +2024-08-06 11:15:00,591 INFO [trainer.py:765] (3/8) Epoch 20, batch 1800, train_loss[loss=3.287, NarTop10Accuracy=0.6618, over 7226.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6286, over 6005.99 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 11:15:27,277 INFO [trainer.py:765] (3/8) Epoch 20, batch 1900, train_loss[loss=3.41, NarTop10Accuracy=0.6319, over 6052.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.627, over 6053.34 frames. ], batch size: 49, lr: 4.26e-03 +2024-08-06 11:15:56,438 INFO [trainer.py:765] (3/8) Epoch 20, batch 2000, train_loss[loss=3.494, NarTop10Accuracy=0.618, over 6079.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6272, over 6014.46 frames. ], batch size: 49, lr: 4.25e-03 +2024-08-06 11:16:21,958 INFO [trainer.py:765] (3/8) Epoch 20, batch 2100, train_loss[loss=3.278, NarTop10Accuracy=0.6569, over 4856.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6276, over 5994.64 frames. ], batch size: 5, lr: 4.25e-03 +2024-08-06 11:16:47,406 INFO [trainer.py:765] (3/8) Epoch 20, batch 2200, train_loss[loss=3.419, NarTop10Accuracy=0.6392, over 7499.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6275, over 6052.55 frames. ], batch size: 31, lr: 4.24e-03 +2024-08-06 11:17:12,908 INFO [trainer.py:765] (3/8) Epoch 20, batch 2300, train_loss[loss=3.696, NarTop10Accuracy=0.5753, over 5783.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6249, over 6093.30 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 11:17:37,716 INFO [trainer.py:765] (3/8) Epoch 20, batch 2400, train_loss[loss=3.314, NarTop10Accuracy=0.6669, over 5013.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6254, over 5874.55 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 11:18:01,248 INFO [trainer.py:765] (3/8) Epoch 20, batch 2500, train_loss[loss=3.372, NarTop10Accuracy=0.6309, over 5188.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6309, over 5538.22 frames. ], batch size: 6, lr: 4.23e-03 +2024-08-06 11:18:22,210 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 11:19:21,459 INFO [trainer.py:765] (3/8) Epoch 21, batch 100, train_loss[loss=3.387, NarTop10Accuracy=0.6434, over 7190.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6389, over 2358.20 frames. ], batch size: 31, lr: 4.12e-03 +2024-08-06 11:19:56,522 INFO [trainer.py:765] (3/8) Epoch 21, batch 200, train_loss[loss=3.196, NarTop10Accuracy=0.6614, over 6840.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6388, over 3868.34 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 11:20:26,597 INFO [trainer.py:765] (3/8) Epoch 21, batch 300, train_loss[loss=3.658, NarTop10Accuracy=0.586, over 7199.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6416, over 4689.12 frames. ], batch size: 23, lr: 4.11e-03 +2024-08-06 11:20:54,240 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 11:21:04,970 INFO [trainer.py:811] (3/8) Epoch 21, validation: loss=3.291, NarTop10Accuracy=0.6625, over 1907754.00 frames. +2024-08-06 11:21:04,970 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 11:21:05,486 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 1.858e+02 2.007e+02 2.193e+02 3.729e+02, threshold=4.015e+02, percent-clipped=0.0 +2024-08-06 11:21:12,220 INFO [trainer.py:765] (3/8) Epoch 21, batch 400, train_loss[loss=3.507, NarTop10Accuracy=0.622, over 5726.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6403, over 5139.45 frames. ], batch size: 8, lr: 4.11e-03 +2024-08-06 11:21:47,569 INFO [trainer.py:765] (3/8) Epoch 21, batch 500, train_loss[loss=3.255, NarTop10Accuracy=0.6617, over 6073.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6417, over 5416.19 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 11:22:18,238 INFO [trainer.py:765] (3/8) Epoch 21, batch 600, train_loss[loss=3.674, NarTop10Accuracy=0.5941, over 5863.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6392, over 5670.65 frames. ], batch size: 9, lr: 4.10e-03 +2024-08-06 11:22:56,841 INFO [trainer.py:765] (3/8) Epoch 21, batch 700, train_loss[loss=3.275, NarTop10Accuracy=0.6683, over 4926.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6369, over 5719.69 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 11:23:33,074 INFO [trainer.py:765] (3/8) Epoch 21, batch 800, train_loss[loss=3.458, NarTop10Accuracy=0.6277, over 5234.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6365, over 5781.02 frames. ], batch size: 6, lr: 4.09e-03 +2024-08-06 11:24:03,020 INFO [trainer.py:765] (3/8) Epoch 21, batch 900, train_loss[loss=3.384, NarTop10Accuracy=0.6401, over 6348.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6354, over 5797.53 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 11:24:37,088 INFO [trainer.py:765] (3/8) Epoch 21, batch 1000, train_loss[loss=3.501, NarTop10Accuracy=0.6216, over 6302.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6341, over 5896.87 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 11:25:16,427 INFO [trainer.py:765] (3/8) Epoch 21, batch 1100, train_loss[loss=3.551, NarTop10Accuracy=0.612, over 7014.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6317, over 5925.94 frames. ], batch size: 17, lr: 4.08e-03 +2024-08-06 11:25:47,739 INFO [trainer.py:765] (3/8) Epoch 21, batch 1200, train_loss[loss=3.139, NarTop10Accuracy=0.6873, over 7161.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6335, over 5931.29 frames. ], batch size: 30, lr: 4.08e-03 +2024-08-06 11:26:23,055 INFO [trainer.py:765] (3/8) Epoch 21, batch 1300, train_loss[loss=3.34, NarTop10Accuracy=0.645, over 5065.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6348, over 6010.29 frames. ], batch size: 6, lr: 4.07e-03 +2024-08-06 11:27:00,081 INFO [trainer.py:765] (3/8) Epoch 21, batch 1400, train_loss[loss=3.333, NarTop10Accuracy=0.6467, over 6108.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6312, over 6034.82 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 11:27:35,325 INFO [trainer.py:765] (3/8) Epoch 21, batch 1500, train_loss[loss=3.883, NarTop10Accuracy=0.5441, over 5786.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6322, over 5967.70 frames. ], batch size: 49, lr: 4.07e-03 +2024-08-06 11:28:03,314 INFO [trainer.py:765] (3/8) Epoch 21, batch 1600, train_loss[loss=3.321, NarTop10Accuracy=0.6466, over 7188.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6307, over 5950.42 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 11:28:30,104 INFO [trainer.py:765] (3/8) Epoch 21, batch 1700, train_loss[loss=3.58, NarTop10Accuracy=0.6046, over 6154.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6291, over 5940.36 frames. ], batch size: 13, lr: 4.06e-03 +2024-08-06 11:28:56,640 INFO [trainer.py:765] (3/8) Epoch 21, batch 1800, train_loss[loss=3.626, NarTop10Accuracy=0.5967, over 7240.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6287, over 6007.31 frames. ], batch size: 23, lr: 4.06e-03 +2024-08-06 11:29:23,197 INFO [trainer.py:765] (3/8) Epoch 21, batch 1900, train_loss[loss=3.431, NarTop10Accuracy=0.6355, over 6450.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6282, over 6049.97 frames. ], batch size: 49, lr: 4.05e-03 +2024-08-06 11:29:49,027 INFO [trainer.py:765] (3/8) Epoch 21, batch 2000, train_loss[loss=3.532, NarTop10Accuracy=0.6137, over 6243.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6274, over 6021.76 frames. ], batch size: 49, lr: 4.05e-03 +2024-08-06 11:30:14,527 INFO [trainer.py:765] (3/8) Epoch 21, batch 2100, train_loss[loss=3.273, NarTop10Accuracy=0.6622, over 4822.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6273, over 6003.36 frames. ], batch size: 5, lr: 4.04e-03 +2024-08-06 11:30:39,869 INFO [trainer.py:765] (3/8) Epoch 21, batch 2200, train_loss[loss=3.625, NarTop10Accuracy=0.5934, over 7128.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6277, over 6047.31 frames. ], batch size: 30, lr: 4.04e-03 +2024-08-06 11:31:05,470 INFO [trainer.py:765] (3/8) Epoch 21, batch 2300, train_loss[loss=3.278, NarTop10Accuracy=0.6606, over 5855.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6274, over 6073.02 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 11:31:23,872 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 11:31:34,439 INFO [trainer.py:811] (3/8) Epoch 21, validation: loss=3.272, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 11:31:34,439 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 11:31:34,937 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 1.892e+02 2.038e+02 2.210e+02 4.910e+02, threshold=4.076e+02, percent-clipped=0.1 +2024-08-06 11:31:40,753 INFO [trainer.py:765] (3/8) Epoch 21, batch 2400, train_loss[loss=3.118, NarTop10Accuracy=0.6945, over 5234.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6251, over 5888.97 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 11:32:04,057 INFO [trainer.py:765] (3/8) Epoch 21, batch 2500, train_loss[loss=3.107, NarTop10Accuracy=0.6986, over 5076.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6294, over 5521.30 frames. ], batch size: 6, lr: 4.03e-03 +2024-08-06 11:32:25,533 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 11:33:29,683 INFO [trainer.py:765] (3/8) Epoch 22, batch 100, train_loss[loss=3.733, NarTop10Accuracy=0.5741, over 7593.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6451, over 2388.67 frames. ], batch size: 31, lr: 3.93e-03 +2024-08-06 11:34:05,036 INFO [trainer.py:765] (3/8) Epoch 22, batch 200, train_loss[loss=3.229, NarTop10Accuracy=0.6795, over 6827.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6466, over 3873.89 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 11:34:37,619 INFO [trainer.py:765] (3/8) Epoch 22, batch 300, train_loss[loss=3.175, NarTop10Accuracy=0.6824, over 7169.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6454, over 4671.26 frames. ], batch size: 22, lr: 3.92e-03 +2024-08-06 11:35:09,969 INFO [trainer.py:765] (3/8) Epoch 22, batch 400, train_loss[loss=3.379, NarTop10Accuracy=0.6495, over 5670.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6429, over 5127.29 frames. ], batch size: 8, lr: 3.92e-03 +2024-08-06 11:35:42,508 INFO [trainer.py:765] (3/8) Epoch 22, batch 500, train_loss[loss=3.147, NarTop10Accuracy=0.6822, over 6161.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6426, over 5405.96 frames. ], batch size: 11, lr: 3.91e-03 +2024-08-06 11:36:16,059 INFO [trainer.py:765] (3/8) Epoch 22, batch 600, train_loss[loss=3.108, NarTop10Accuracy=0.6976, over 5792.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6422, over 5673.07 frames. ], batch size: 9, lr: 3.91e-03 +2024-08-06 11:36:53,858 INFO [trainer.py:765] (3/8) Epoch 22, batch 700, train_loss[loss=3.412, NarTop10Accuracy=0.6393, over 5170.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6395, over 5738.36 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 11:37:28,480 INFO [trainer.py:765] (3/8) Epoch 22, batch 800, train_loss[loss=3.149, NarTop10Accuracy=0.6801, over 5032.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6393, over 5804.76 frames. ], batch size: 6, lr: 3.90e-03 +2024-08-06 11:38:03,950 INFO [trainer.py:765] (3/8) Epoch 22, batch 900, train_loss[loss=3.582, NarTop10Accuracy=0.6093, over 6294.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6387, over 5825.12 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 11:38:38,330 INFO [trainer.py:765] (3/8) Epoch 22, batch 1000, train_loss[loss=3.119, NarTop10Accuracy=0.6936, over 6722.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6384, over 5918.54 frames. ], batch size: 14, lr: 3.90e-03 +2024-08-06 11:39:14,789 INFO [trainer.py:765] (3/8) Epoch 22, batch 1100, train_loss[loss=3.543, NarTop10Accuracy=0.607, over 6953.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6366, over 5950.52 frames. ], batch size: 17, lr: 3.89e-03 +2024-08-06 11:39:48,523 INFO [trainer.py:765] (3/8) Epoch 22, batch 1200, train_loss[loss=3.315, NarTop10Accuracy=0.6638, over 7455.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6354, over 5951.24 frames. ], batch size: 31, lr: 3.89e-03 +2024-08-06 11:40:25,246 INFO [trainer.py:765] (3/8) Epoch 22, batch 1300, train_loss[loss=3.199, NarTop10Accuracy=0.6753, over 5113.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6333, over 6021.84 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 11:41:00,609 INFO [trainer.py:765] (3/8) Epoch 22, batch 1400, train_loss[loss=3.584, NarTop10Accuracy=0.5975, over 6155.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6336, over 6048.45 frames. ], batch size: 11, lr: 3.88e-03 +2024-08-06 11:41:31,585 INFO [trainer.py:765] (3/8) Epoch 22, batch 1500, train_loss[loss=3.611, NarTop10Accuracy=0.5983, over 6153.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6331, over 5967.04 frames. ], batch size: 54, lr: 3.88e-03 +2024-08-06 11:41:59,677 INFO [trainer.py:765] (3/8) Epoch 22, batch 1600, train_loss[loss=3.306, NarTop10Accuracy=0.656, over 7030.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6327, over 5963.03 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 11:42:26,464 INFO [trainer.py:765] (3/8) Epoch 22, batch 1700, train_loss[loss=3.421, NarTop10Accuracy=0.6409, over 6789.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6302, over 5964.75 frames. ], batch size: 14, lr: 3.87e-03 +2024-08-06 11:42:50,724 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 11:43:00,818 INFO [trainer.py:811] (3/8) Epoch 22, validation: loss=3.305, NarTop10Accuracy=0.6597, over 1907754.00 frames. +2024-08-06 11:43:00,819 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 11:43:01,327 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.500e+02 1.900e+02 2.042e+02 2.234e+02 3.494e+02, threshold=4.085e+02, percent-clipped=0.0 +2024-08-06 11:43:03,219 INFO [trainer.py:765] (3/8) Epoch 22, batch 1800, train_loss[loss=3.381, NarTop10Accuracy=0.6423, over 7351.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6319, over 6018.72 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 11:43:29,752 INFO [trainer.py:765] (3/8) Epoch 22, batch 1900, train_loss[loss=3.592, NarTop10Accuracy=0.5999, over 5728.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6317, over 6046.59 frames. ], batch size: 49, lr: 3.87e-03 +2024-08-06 11:43:55,485 INFO [trainer.py:765] (3/8) Epoch 22, batch 2000, train_loss[loss=3.885, NarTop10Accuracy=0.5467, over 6239.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6321, over 6019.72 frames. ], batch size: 49, lr: 3.86e-03 +2024-08-06 11:44:20,932 INFO [trainer.py:765] (3/8) Epoch 22, batch 2100, train_loss[loss=3.284, NarTop10Accuracy=0.6635, over 3925.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6318, over 5990.95 frames. ], batch size: 4, lr: 3.86e-03 +2024-08-06 11:44:46,456 INFO [trainer.py:765] (3/8) Epoch 22, batch 2200, train_loss[loss=3.573, NarTop10Accuracy=0.5981, over 7494.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6318, over 6026.67 frames. ], batch size: 31, lr: 3.86e-03 +2024-08-06 11:45:11,882 INFO [trainer.py:765] (3/8) Epoch 22, batch 2300, train_loss[loss=3.355, NarTop10Accuracy=0.6549, over 5844.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6296, over 6066.61 frames. ], batch size: 9, lr: 3.85e-03 +2024-08-06 11:45:36,583 INFO [trainer.py:765] (3/8) Epoch 22, batch 2400, train_loss[loss=3.371, NarTop10Accuracy=0.6454, over 5207.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6272, over 5880.14 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 11:46:00,081 INFO [trainer.py:765] (3/8) Epoch 22, batch 2500, train_loss[loss=3.432, NarTop10Accuracy=0.6332, over 5148.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6308, over 5535.00 frames. ], batch size: 6, lr: 3.85e-03 +2024-08-06 11:46:21,577 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 11:47:20,476 INFO [trainer.py:765] (3/8) Epoch 23, batch 100, train_loss[loss=3.298, NarTop10Accuracy=0.6579, over 7517.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6444, over 2369.88 frames. ], batch size: 31, lr: 3.75e-03 +2024-08-06 11:47:52,035 INFO [trainer.py:765] (3/8) Epoch 23, batch 200, train_loss[loss=3.419, NarTop10Accuracy=0.6333, over 6815.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6484, over 3866.10 frames. ], batch size: 17, lr: 3.75e-03 +2024-08-06 11:48:33,921 INFO [trainer.py:765] (3/8) Epoch 23, batch 300, train_loss[loss=3.214, NarTop10Accuracy=0.6859, over 7171.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6439, over 4658.75 frames. ], batch size: 22, lr: 3.75e-03 +2024-08-06 11:49:06,656 INFO [trainer.py:765] (3/8) Epoch 23, batch 400, train_loss[loss=3.252, NarTop10Accuracy=0.6657, over 5769.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6443, over 5108.99 frames. ], batch size: 8, lr: 3.74e-03 +2024-08-06 11:49:37,619 INFO [trainer.py:765] (3/8) Epoch 23, batch 500, train_loss[loss=3.666, NarTop10Accuracy=0.583, over 6245.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6424, over 5388.92 frames. ], batch size: 11, lr: 3.74e-03 +2024-08-06 11:50:06,740 INFO [trainer.py:765] (3/8) Epoch 23, batch 600, train_loss[loss=3.408, NarTop10Accuracy=0.6355, over 5725.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6424, over 5676.13 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 11:50:47,601 INFO [trainer.py:765] (3/8) Epoch 23, batch 700, train_loss[loss=3.33, NarTop10Accuracy=0.6678, over 5089.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6393, over 5737.74 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:21,344 INFO [trainer.py:765] (3/8) Epoch 23, batch 800, train_loss[loss=3.048, NarTop10Accuracy=0.7153, over 5070.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6387, over 5811.98 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:52,396 INFO [trainer.py:765] (3/8) Epoch 23, batch 900, train_loss[loss=3.508, NarTop10Accuracy=0.6198, over 6179.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6409, over 5830.59 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 11:52:33,918 INFO [trainer.py:765] (3/8) Epoch 23, batch 1000, train_loss[loss=3.279, NarTop10Accuracy=0.6692, over 6521.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6409, over 5926.63 frames. ], batch size: 14, lr: 3.73e-03 +2024-08-06 11:53:08,608 INFO [trainer.py:765] (3/8) Epoch 23, batch 1100, train_loss[loss=3.515, NarTop10Accuracy=0.6094, over 6836.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6371, over 5967.04 frames. ], batch size: 17, lr: 3.72e-03 +2024-08-06 11:53:40,339 INFO [trainer.py:765] (3/8) Epoch 23, batch 1200, train_loss[loss=3.42, NarTop10Accuracy=0.6361, over 7445.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6358, over 5959.15 frames. ], batch size: 30, lr: 3.72e-03 +2024-08-06 11:53:42,825 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 11:53:53,935 INFO [trainer.py:811] (3/8) Epoch 23, validation: loss=3.236, NarTop10Accuracy=0.6739, over 1907754.00 frames. +2024-08-06 11:53:53,936 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 11:53:54,457 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.901e+02 2.047e+02 2.234e+02 4.368e+02, threshold=4.093e+02, percent-clipped=0.1 +2024-08-06 11:54:30,447 INFO [trainer.py:765] (3/8) Epoch 23, batch 1300, train_loss[loss=3.12, NarTop10Accuracy=0.692, over 5054.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6355, over 6025.07 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 11:55:04,197 INFO [trainer.py:765] (3/8) Epoch 23, batch 1400, train_loss[loss=3.481, NarTop10Accuracy=0.6178, over 6069.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6359, over 6043.29 frames. ], batch size: 11, lr: 3.71e-03 +2024-08-06 11:55:35,398 INFO [trainer.py:765] (3/8) Epoch 23, batch 1500, train_loss[loss=3.536, NarTop10Accuracy=0.6128, over 6348.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6334, over 5982.24 frames. ], batch size: 48, lr: 3.71e-03 +2024-08-06 11:56:03,427 INFO [trainer.py:765] (3/8) Epoch 23, batch 1600, train_loss[loss=3.263, NarTop10Accuracy=0.6528, over 7264.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6346, over 5953.00 frames. ], batch size: 22, lr: 3.71e-03 +2024-08-06 11:56:30,202 INFO [trainer.py:765] (3/8) Epoch 23, batch 1700, train_loss[loss=3.403, NarTop10Accuracy=0.6353, over 6225.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.632, over 5958.87 frames. ], batch size: 13, lr: 3.70e-03 +2024-08-06 11:56:56,969 INFO [trainer.py:765] (3/8) Epoch 23, batch 1800, train_loss[loss=3.228, NarTop10Accuracy=0.6639, over 7326.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6324, over 6011.70 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 11:57:23,597 INFO [trainer.py:765] (3/8) Epoch 23, batch 1900, train_loss[loss=3.467, NarTop10Accuracy=0.6242, over 6496.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6313, over 6052.98 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 11:57:49,251 INFO [trainer.py:765] (3/8) Epoch 23, batch 2000, train_loss[loss=3.739, NarTop10Accuracy=0.5699, over 5663.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6315, over 6025.76 frames. ], batch size: 49, lr: 3.69e-03 +2024-08-06 11:58:14,770 INFO [trainer.py:765] (3/8) Epoch 23, batch 2100, train_loss[loss=3.729, NarTop10Accuracy=0.5627, over 4001.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6322, over 6001.97 frames. ], batch size: 4, lr: 3.69e-03 +2024-08-06 11:58:40,237 INFO [trainer.py:765] (3/8) Epoch 23, batch 2200, train_loss[loss=3.611, NarTop10Accuracy=0.6032, over 7204.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6348, over 6051.58 frames. ], batch size: 30, lr: 3.69e-03 +2024-08-06 11:59:08,916 INFO [trainer.py:765] (3/8) Epoch 23, batch 2300, train_loss[loss=3.198, NarTop10Accuracy=0.6721, over 5761.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.63, over 6086.85 frames. ], batch size: 9, lr: 3.68e-03 +2024-08-06 11:59:33,601 INFO [trainer.py:765] (3/8) Epoch 23, batch 2400, train_loss[loss=3.313, NarTop10Accuracy=0.6554, over 5173.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6281, over 5894.50 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 11:59:57,011 INFO [trainer.py:765] (3/8) Epoch 23, batch 2500, train_loss[loss=3.301, NarTop10Accuracy=0.6486, over 5112.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.632, over 5543.34 frames. ], batch size: 6, lr: 3.68e-03 +2024-08-06 12:00:17,993 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 12:01:22,111 INFO [trainer.py:765] (3/8) Epoch 24, batch 100, train_loss[loss=3.615, NarTop10Accuracy=0.599, over 7754.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6432, over 2387.02 frames. ], batch size: 32, lr: 3.59e-03 +2024-08-06 12:01:51,341 INFO [trainer.py:765] (3/8) Epoch 24, batch 200, train_loss[loss=3.441, NarTop10Accuracy=0.633, over 7015.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6479, over 3884.02 frames. ], batch size: 17, lr: 3.59e-03 +2024-08-06 12:02:23,512 INFO [trainer.py:765] (3/8) Epoch 24, batch 300, train_loss[loss=3.228, NarTop10Accuracy=0.6761, over 6979.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6442, over 4673.38 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 12:03:02,847 INFO [trainer.py:765] (3/8) Epoch 24, batch 400, train_loss[loss=3.286, NarTop10Accuracy=0.6575, over 5114.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6461, over 5112.48 frames. ], batch size: 7, lr: 3.59e-03 +2024-08-06 12:03:31,256 INFO [trainer.py:765] (3/8) Epoch 24, batch 500, train_loss[loss=3.193, NarTop10Accuracy=0.6862, over 6039.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6465, over 5391.93 frames. ], batch size: 11, lr: 3.58e-03 +2024-08-06 12:04:00,173 INFO [trainer.py:765] (3/8) Epoch 24, batch 600, train_loss[loss=3.441, NarTop10Accuracy=0.6208, over 5809.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.646, over 5663.76 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 12:04:12,531 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 12:04:22,775 INFO [trainer.py:811] (3/8) Epoch 24, validation: loss=3.282, NarTop10Accuracy=0.6644, over 1907754.00 frames. +2024-08-06 12:04:22,776 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 12:04:23,310 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 1.905e+02 2.071e+02 2.258e+02 3.709e+02, threshold=4.142e+02, percent-clipped=0.0 +2024-08-06 12:04:51,732 INFO [trainer.py:765] (3/8) Epoch 24, batch 700, train_loss[loss=3.214, NarTop10Accuracy=0.6805, over 4884.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6438, over 5726.84 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 12:05:21,273 INFO [trainer.py:765] (3/8) Epoch 24, batch 800, train_loss[loss=3.665, NarTop10Accuracy=0.5965, over 4963.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.642, over 5779.05 frames. ], batch size: 6, lr: 3.57e-03 +2024-08-06 12:05:51,753 INFO [trainer.py:765] (3/8) Epoch 24, batch 900, train_loss[loss=3.663, NarTop10Accuracy=0.5885, over 6266.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6413, over 5799.82 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 12:06:32,811 INFO [trainer.py:765] (3/8) Epoch 24, batch 1000, train_loss[loss=3.236, NarTop10Accuracy=0.6628, over 6682.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6396, over 5915.33 frames. ], batch size: 14, lr: 3.57e-03 +2024-08-06 12:07:09,039 INFO [trainer.py:765] (3/8) Epoch 24, batch 1100, train_loss[loss=3.353, NarTop10Accuracy=0.6486, over 6862.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6375, over 5950.51 frames. ], batch size: 17, lr: 3.56e-03 +2024-08-06 12:07:38,134 INFO [trainer.py:765] (3/8) Epoch 24, batch 1200, train_loss[loss=3.38, NarTop10Accuracy=0.6348, over 7289.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6369, over 5963.73 frames. ], batch size: 30, lr: 3.56e-03 +2024-08-06 12:08:20,730 INFO [trainer.py:765] (3/8) Epoch 24, batch 1300, train_loss[loss=3.504, NarTop10Accuracy=0.6273, over 5061.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6368, over 6038.48 frames. ], batch size: 6, lr: 3.56e-03 +2024-08-06 12:08:56,065 INFO [trainer.py:765] (3/8) Epoch 24, batch 1400, train_loss[loss=3.266, NarTop10Accuracy=0.658, over 6069.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.636, over 6041.84 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 12:09:24,337 INFO [trainer.py:765] (3/8) Epoch 24, batch 1500, train_loss[loss=3.603, NarTop10Accuracy=0.6032, over 6091.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6351, over 5970.16 frames. ], batch size: 49, lr: 3.55e-03 +2024-08-06 12:09:52,524 INFO [trainer.py:765] (3/8) Epoch 24, batch 1600, train_loss[loss=3.208, NarTop10Accuracy=0.6766, over 7110.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6353, over 5953.51 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 12:10:22,545 INFO [trainer.py:765] (3/8) Epoch 24, batch 1700, train_loss[loss=3.602, NarTop10Accuracy=0.5932, over 6673.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.633, over 5950.76 frames. ], batch size: 14, lr: 3.55e-03 +2024-08-06 12:10:49,272 INFO [trainer.py:765] (3/8) Epoch 24, batch 1800, train_loss[loss=3.257, NarTop10Accuracy=0.6668, over 7390.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6346, over 6020.61 frames. ], batch size: 22, lr: 3.54e-03 +2024-08-06 12:11:15,846 INFO [trainer.py:765] (3/8) Epoch 24, batch 1900, train_loss[loss=3.477, NarTop10Accuracy=0.6249, over 6158.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6346, over 6057.90 frames. ], batch size: 49, lr: 3.54e-03 +2024-08-06 12:11:41,666 INFO [trainer.py:765] (3/8) Epoch 24, batch 2000, train_loss[loss=3.548, NarTop10Accuracy=0.6124, over 6254.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6346, over 6032.22 frames. ], batch size: 49, lr: 3.54e-03 +2024-08-06 12:12:07,103 INFO [trainer.py:765] (3/8) Epoch 24, batch 2100, train_loss[loss=2.972, NarTop10Accuracy=0.7322, over 4011.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6363, over 6018.17 frames. ], batch size: 4, lr: 3.54e-03 +2024-08-06 12:12:33,372 INFO [trainer.py:765] (3/8) Epoch 24, batch 2200, train_loss[loss=3.651, NarTop10Accuracy=0.5953, over 7114.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.635, over 6048.77 frames. ], batch size: 31, lr: 3.53e-03 +2024-08-06 12:12:58,771 INFO [trainer.py:765] (3/8) Epoch 24, batch 2300, train_loss[loss=3.8, NarTop10Accuracy=0.5554, over 5894.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6303, over 6089.36 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 12:13:23,486 INFO [trainer.py:765] (3/8) Epoch 24, batch 2400, train_loss[loss=3.336, NarTop10Accuracy=0.6529, over 5189.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6294, over 5905.09 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 12:13:47,004 INFO [trainer.py:765] (3/8) Epoch 24, batch 2500, train_loss[loss=3.123, NarTop10Accuracy=0.6947, over 5094.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6359, over 5544.86 frames. ], batch size: 6, lr: 3.52e-03 +2024-08-06 12:14:07,990 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 12:14:50,196 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 12:15:00,657 INFO [trainer.py:811] (3/8) Epoch 25, validation: loss=3.279, NarTop10Accuracy=0.6656, over 1907754.00 frames. +2024-08-06 12:15:00,658 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 12:15:01,363 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.921e+02 2.068e+02 2.276e+02 6.228e+02, threshold=4.136e+02, percent-clipped=0.3 +2024-08-06 12:15:17,917 INFO [trainer.py:765] (3/8) Epoch 25, batch 100, train_loss[loss=3.159, NarTop10Accuracy=0.69, over 7319.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6463, over 2370.44 frames. ], batch size: 31, lr: 3.45e-03 +2024-08-06 12:15:53,499 INFO [trainer.py:765] (3/8) Epoch 25, batch 200, train_loss[loss=3.247, NarTop10Accuracy=0.6692, over 6765.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6474, over 3872.53 frames. ], batch size: 17, lr: 3.44e-03 +2024-08-06 12:16:23,595 INFO [trainer.py:765] (3/8) Epoch 25, batch 300, train_loss[loss=3.365, NarTop10Accuracy=0.6527, over 7095.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6466, over 4679.02 frames. ], batch size: 22, lr: 3.44e-03 +2024-08-06 12:16:59,163 INFO [trainer.py:765] (3/8) Epoch 25, batch 400, train_loss[loss=3.379, NarTop10Accuracy=0.6428, over 5142.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6455, over 5124.20 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 12:17:32,096 INFO [trainer.py:765] (3/8) Epoch 25, batch 500, train_loss[loss=3.111, NarTop10Accuracy=0.6924, over 5992.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6492, over 5391.54 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 12:18:05,181 INFO [trainer.py:765] (3/8) Epoch 25, batch 600, train_loss[loss=3.053, NarTop10Accuracy=0.6935, over 5768.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.648, over 5676.93 frames. ], batch size: 9, lr: 3.43e-03 +2024-08-06 12:18:39,598 INFO [trainer.py:765] (3/8) Epoch 25, batch 700, train_loss[loss=3.299, NarTop10Accuracy=0.6557, over 5102.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6455, over 5738.93 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 12:19:16,015 INFO [trainer.py:765] (3/8) Epoch 25, batch 800, train_loss[loss=3.202, NarTop10Accuracy=0.6781, over 4312.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6433, over 5799.44 frames. ], batch size: 5, lr: 3.43e-03 +2024-08-06 12:19:49,558 INFO [trainer.py:765] (3/8) Epoch 25, batch 900, train_loss[loss=3.226, NarTop10Accuracy=0.6777, over 6193.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6428, over 5829.97 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 12:20:23,876 INFO [trainer.py:765] (3/8) Epoch 25, batch 1000, train_loss[loss=3.34, NarTop10Accuracy=0.6495, over 6335.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6424, over 5941.24 frames. ], batch size: 13, lr: 3.42e-03 +2024-08-06 12:21:01,915 INFO [trainer.py:765] (3/8) Epoch 25, batch 1100, train_loss[loss=3.368, NarTop10Accuracy=0.6474, over 6778.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6378, over 5952.26 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 12:21:40,638 INFO [trainer.py:765] (3/8) Epoch 25, batch 1200, train_loss[loss=3.417, NarTop10Accuracy=0.6206, over 7085.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6406, over 5938.56 frames. ], batch size: 30, lr: 3.42e-03 +2024-08-06 12:22:11,837 INFO [trainer.py:765] (3/8) Epoch 25, batch 1300, train_loss[loss=3.534, NarTop10Accuracy=0.6093, over 5089.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6406, over 6008.91 frames. ], batch size: 6, lr: 3.41e-03 +2024-08-06 12:22:48,550 INFO [trainer.py:765] (3/8) Epoch 25, batch 1400, train_loss[loss=3.664, NarTop10Accuracy=0.5724, over 6177.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6387, over 6021.16 frames. ], batch size: 11, lr: 3.41e-03 +2024-08-06 12:23:21,655 INFO [trainer.py:765] (3/8) Epoch 25, batch 1500, train_loss[loss=3.852, NarTop10Accuracy=0.5507, over 6161.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6385, over 5954.16 frames. ], batch size: 49, lr: 3.41e-03 +2024-08-06 12:23:49,717 INFO [trainer.py:765] (3/8) Epoch 25, batch 1600, train_loss[loss=3.228, NarTop10Accuracy=0.6789, over 7048.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6373, over 5942.29 frames. ], batch size: 22, lr: 3.41e-03 +2024-08-06 12:24:16,372 INFO [trainer.py:765] (3/8) Epoch 25, batch 1700, train_loss[loss=3.628, NarTop10Accuracy=0.5914, over 6348.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6356, over 5917.56 frames. ], batch size: 13, lr: 3.40e-03 +2024-08-06 12:24:43,092 INFO [trainer.py:765] (3/8) Epoch 25, batch 1800, train_loss[loss=3.334, NarTop10Accuracy=0.6531, over 7178.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6333, over 5994.94 frames. ], batch size: 22, lr: 3.40e-03 +2024-08-06 12:25:09,776 INFO [trainer.py:765] (3/8) Epoch 25, batch 1900, train_loss[loss=3.547, NarTop10Accuracy=0.6129, over 5924.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6313, over 6042.54 frames. ], batch size: 49, lr: 3.40e-03 +2024-08-06 12:25:35,710 INFO [trainer.py:765] (3/8) Epoch 25, batch 2000, train_loss[loss=3.683, NarTop10Accuracy=0.5881, over 6061.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.633, over 6015.21 frames. ], batch size: 49, lr: 3.40e-03 +2024-08-06 12:25:47,854 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 12:25:58,846 INFO [trainer.py:811] (3/8) Epoch 25, validation: loss=3.265, NarTop10Accuracy=0.667, over 1907754.00 frames. +2024-08-06 12:25:58,847 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 12:25:59,343 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 1.947e+02 2.092e+02 2.280e+02 8.190e+02, threshold=4.185e+02, percent-clipped=0.2 +2024-08-06 12:26:12,224 INFO [trainer.py:765] (3/8) Epoch 25, batch 2100, train_loss[loss=3.19, NarTop10Accuracy=0.6882, over 4775.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6338, over 6005.36 frames. ], batch size: 5, lr: 3.39e-03 +2024-08-06 12:26:37,833 INFO [trainer.py:765] (3/8) Epoch 25, batch 2200, train_loss[loss=3.756, NarTop10Accuracy=0.5664, over 7416.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6358, over 6037.85 frames. ], batch size: 31, lr: 3.39e-03 +2024-08-06 12:27:03,344 INFO [trainer.py:765] (3/8) Epoch 25, batch 2300, train_loss[loss=3.218, NarTop10Accuracy=0.6723, over 5686.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6335, over 6067.58 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 12:27:28,151 INFO [trainer.py:765] (3/8) Epoch 25, batch 2400, train_loss[loss=3.395, NarTop10Accuracy=0.6299, over 5282.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6325, over 5871.19 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 12:27:51,732 INFO [trainer.py:765] (3/8) Epoch 25, batch 2500, train_loss[loss=3.53, NarTop10Accuracy=0.6099, over 5211.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6368, over 5539.99 frames. ], batch size: 6, lr: 3.38e-03 +2024-08-06 12:28:13,004 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 12:29:08,881 INFO [trainer.py:765] (3/8) Epoch 26, batch 100, train_loss[loss=3.587, NarTop10Accuracy=0.5989, over 7365.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6486, over 2377.65 frames. ], batch size: 30, lr: 3.31e-03 +2024-08-06 12:29:44,318 INFO [trainer.py:765] (3/8) Epoch 26, batch 200, train_loss[loss=3.292, NarTop10Accuracy=0.6615, over 6875.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6477, over 3869.58 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 12:30:19,754 INFO [trainer.py:765] (3/8) Epoch 26, batch 300, train_loss[loss=3.326, NarTop10Accuracy=0.6486, over 7160.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6466, over 4665.09 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 12:30:52,509 INFO [trainer.py:765] (3/8) Epoch 26, batch 400, train_loss[loss=3.232, NarTop10Accuracy=0.6827, over 5070.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6472, over 5132.03 frames. ], batch size: 7, lr: 3.30e-03 +2024-08-06 12:31:26,531 INFO [trainer.py:765] (3/8) Epoch 26, batch 500, train_loss[loss=3.271, NarTop10Accuracy=0.6576, over 6193.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.65, over 5386.51 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 12:31:59,782 INFO [trainer.py:765] (3/8) Epoch 26, batch 600, train_loss[loss=3.526, NarTop10Accuracy=0.612, over 5708.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6477, over 5680.91 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 12:32:36,966 INFO [trainer.py:765] (3/8) Epoch 26, batch 700, train_loss[loss=3.301, NarTop10Accuracy=0.6576, over 4910.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6457, over 5759.51 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 12:33:10,809 INFO [trainer.py:765] (3/8) Epoch 26, batch 800, train_loss[loss=3.282, NarTop10Accuracy=0.6626, over 5310.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6458, over 5805.70 frames. ], batch size: 6, lr: 3.29e-03 +2024-08-06 12:33:46,257 INFO [trainer.py:765] (3/8) Epoch 26, batch 900, train_loss[loss=3.519, NarTop10Accuracy=0.5993, over 6343.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6436, over 5813.56 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 12:34:22,902 INFO [trainer.py:765] (3/8) Epoch 26, batch 1000, train_loss[loss=3.168, NarTop10Accuracy=0.6767, over 6630.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6438, over 5919.63 frames. ], batch size: 14, lr: 3.29e-03 +2024-08-06 12:34:57,798 INFO [trainer.py:765] (3/8) Epoch 26, batch 1100, train_loss[loss=3.504, NarTop10Accuracy=0.6105, over 6825.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6417, over 5951.09 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 12:35:31,893 INFO [trainer.py:765] (3/8) Epoch 26, batch 1200, train_loss[loss=3.354, NarTop10Accuracy=0.6351, over 7054.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6415, over 5941.21 frames. ], batch size: 30, lr: 3.28e-03 +2024-08-06 12:36:10,658 INFO [trainer.py:765] (3/8) Epoch 26, batch 1300, train_loss[loss=3.489, NarTop10Accuracy=0.6128, over 5247.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6403, over 6014.25 frames. ], batch size: 6, lr: 3.28e-03 +2024-08-06 12:36:44,564 INFO [trainer.py:765] (3/8) Epoch 26, batch 1400, train_loss[loss=3.297, NarTop10Accuracy=0.6664, over 6022.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.639, over 6039.15 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 12:37:03,594 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 12:37:13,567 INFO [trainer.py:811] (3/8) Epoch 26, validation: loss=3.231, NarTop10Accuracy=0.6753, over 1907754.00 frames. +2024-08-06 12:37:13,568 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 12:37:14,078 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.928e+02 2.102e+02 2.299e+02 4.602e+02, threshold=4.203e+02, percent-clipped=0.2 +2024-08-06 12:37:23,028 INFO [trainer.py:765] (3/8) Epoch 26, batch 1500, train_loss[loss=3.617, NarTop10Accuracy=0.5944, over 6653.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6405, over 5977.38 frames. ], batch size: 49, lr: 3.28e-03 +2024-08-06 12:37:51,060 INFO [trainer.py:765] (3/8) Epoch 26, batch 1600, train_loss[loss=3.353, NarTop10Accuracy=0.6495, over 7166.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6423, over 5960.75 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:38:17,854 INFO [trainer.py:765] (3/8) Epoch 26, batch 1700, train_loss[loss=3.517, NarTop10Accuracy=0.6121, over 6181.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6385, over 5936.58 frames. ], batch size: 13, lr: 3.27e-03 +2024-08-06 12:38:44,384 INFO [trainer.py:765] (3/8) Epoch 26, batch 1800, train_loss[loss=3.085, NarTop10Accuracy=0.6923, over 6930.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6378, over 5991.86 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:39:10,952 INFO [trainer.py:765] (3/8) Epoch 26, batch 1900, train_loss[loss=3.58, NarTop10Accuracy=0.6027, over 6254.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6347, over 6043.52 frames. ], batch size: 49, lr: 3.27e-03 +2024-08-06 12:39:36,610 INFO [trainer.py:765] (3/8) Epoch 26, batch 2000, train_loss[loss=3.533, NarTop10Accuracy=0.6169, over 6214.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6369, over 6032.33 frames. ], batch size: 49, lr: 3.26e-03 +2024-08-06 12:40:02,149 INFO [trainer.py:765] (3/8) Epoch 26, batch 2100, train_loss[loss=3.413, NarTop10Accuracy=0.6313, over 4803.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6392, over 6014.33 frames. ], batch size: 5, lr: 3.26e-03 +2024-08-06 12:40:27,759 INFO [trainer.py:765] (3/8) Epoch 26, batch 2200, train_loss[loss=3.419, NarTop10Accuracy=0.634, over 7192.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.638, over 6036.46 frames. ], batch size: 30, lr: 3.26e-03 +2024-08-06 12:40:53,233 INFO [trainer.py:765] (3/8) Epoch 26, batch 2300, train_loss[loss=3.318, NarTop10Accuracy=0.651, over 5713.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6371, over 6079.17 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 12:41:17,931 INFO [trainer.py:765] (3/8) Epoch 26, batch 2400, train_loss[loss=3.485, NarTop10Accuracy=0.6308, over 5202.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6361, over 5885.67 frames. ], batch size: 7, lr: 3.25e-03 +2024-08-06 12:41:44,478 INFO [trainer.py:765] (3/8) Epoch 26, batch 2500, train_loss[loss=3.314, NarTop10Accuracy=0.6565, over 5129.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6408, over 5539.84 frames. ], batch size: 6, lr: 3.25e-03 +2024-08-06 12:42:05,630 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 12:43:12,533 INFO [trainer.py:765] (3/8) Epoch 27, batch 100, train_loss[loss=3.678, NarTop10Accuracy=0.5889, over 7229.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6479, over 2377.73 frames. ], batch size: 30, lr: 3.19e-03 +2024-08-06 12:43:43,575 INFO [trainer.py:765] (3/8) Epoch 27, batch 200, train_loss[loss=3.41, NarTop10Accuracy=0.6344, over 6715.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6524, over 3867.96 frames. ], batch size: 17, lr: 3.18e-03 +2024-08-06 12:44:13,786 INFO [trainer.py:765] (3/8) Epoch 27, batch 300, train_loss[loss=3.262, NarTop10Accuracy=0.668, over 7120.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6508, over 4673.60 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 12:44:50,461 INFO [trainer.py:765] (3/8) Epoch 27, batch 400, train_loss[loss=3.141, NarTop10Accuracy=0.6943, over 5128.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6506, over 5120.60 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 12:45:20,670 INFO [trainer.py:765] (3/8) Epoch 27, batch 500, train_loss[loss=3.263, NarTop10Accuracy=0.6618, over 6153.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.649, over 5408.06 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 12:45:55,260 INFO [trainer.py:765] (3/8) Epoch 27, batch 600, train_loss[loss=3.174, NarTop10Accuracy=0.6912, over 5814.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6479, over 5688.97 frames. ], batch size: 9, lr: 3.17e-03 +2024-08-06 12:46:26,747 INFO [trainer.py:765] (3/8) Epoch 27, batch 700, train_loss[loss=3.376, NarTop10Accuracy=0.6482, over 5054.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6483, over 5745.60 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:05,016 INFO [trainer.py:765] (3/8) Epoch 27, batch 800, train_loss[loss=3.293, NarTop10Accuracy=0.6638, over 4868.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6454, over 5797.91 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:32,741 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 12:47:42,765 INFO [trainer.py:811] (3/8) Epoch 27, validation: loss=3.258, NarTop10Accuracy=0.6695, over 1907754.00 frames. +2024-08-06 12:47:42,766 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 12:47:43,335 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 1.939e+02 2.100e+02 2.298e+02 4.859e+02, threshold=4.201e+02, percent-clipped=0.2 +2024-08-06 12:47:47,258 INFO [trainer.py:765] (3/8) Epoch 27, batch 900, train_loss[loss=3.323, NarTop10Accuracy=0.6456, over 6590.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6465, over 5816.59 frames. ], batch size: 14, lr: 3.17e-03 +2024-08-06 12:48:22,863 INFO [trainer.py:765] (3/8) Epoch 27, batch 1000, train_loss[loss=3.253, NarTop10Accuracy=0.6568, over 6188.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6447, over 5919.69 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 12:48:58,084 INFO [trainer.py:765] (3/8) Epoch 27, batch 1100, train_loss[loss=3.588, NarTop10Accuracy=0.5945, over 7132.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6429, over 5957.50 frames. ], batch size: 17, lr: 3.16e-03 +2024-08-06 12:49:34,896 INFO [trainer.py:765] (3/8) Epoch 27, batch 1200, train_loss[loss=3.259, NarTop10Accuracy=0.6702, over 7221.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6435, over 5952.84 frames. ], batch size: 31, lr: 3.16e-03 +2024-08-06 12:50:06,242 INFO [trainer.py:765] (3/8) Epoch 27, batch 1300, train_loss[loss=3.151, NarTop10Accuracy=0.6993, over 5071.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6435, over 6025.17 frames. ], batch size: 6, lr: 3.16e-03 +2024-08-06 12:50:42,950 INFO [trainer.py:765] (3/8) Epoch 27, batch 1400, train_loss[loss=3.26, NarTop10Accuracy=0.6592, over 6170.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6411, over 6038.94 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 12:51:11,278 INFO [trainer.py:765] (3/8) Epoch 27, batch 1500, train_loss[loss=3.325, NarTop10Accuracy=0.6502, over 5417.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6406, over 5967.63 frames. ], batch size: 48, lr: 3.15e-03 +2024-08-06 12:51:39,352 INFO [trainer.py:765] (3/8) Epoch 27, batch 1600, train_loss[loss=3.373, NarTop10Accuracy=0.6438, over 7082.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6398, over 5965.89 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:52:06,061 INFO [trainer.py:765] (3/8) Epoch 27, batch 1700, train_loss[loss=3.479, NarTop10Accuracy=0.626, over 6158.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6385, over 5954.93 frames. ], batch size: 13, lr: 3.15e-03 +2024-08-06 12:52:32,668 INFO [trainer.py:765] (3/8) Epoch 27, batch 1800, train_loss[loss=3.387, NarTop10Accuracy=0.6477, over 7078.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6403, over 6015.61 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:53:02,289 INFO [trainer.py:765] (3/8) Epoch 27, batch 1900, train_loss[loss=3.773, NarTop10Accuracy=0.5693, over 6067.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6375, over 6059.25 frames. ], batch size: 49, lr: 3.14e-03 +2024-08-06 12:53:27,999 INFO [trainer.py:765] (3/8) Epoch 27, batch 2000, train_loss[loss=3.436, NarTop10Accuracy=0.6348, over 6133.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.638, over 6024.94 frames. ], batch size: 49, lr: 3.14e-03 +2024-08-06 12:53:53,538 INFO [trainer.py:765] (3/8) Epoch 27, batch 2100, train_loss[loss=3.415, NarTop10Accuracy=0.6264, over 3957.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6395, over 5990.15 frames. ], batch size: 4, lr: 3.14e-03 +2024-08-06 12:54:18,997 INFO [trainer.py:765] (3/8) Epoch 27, batch 2200, train_loss[loss=3.192, NarTop10Accuracy=0.6814, over 7343.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6401, over 6039.41 frames. ], batch size: 31, lr: 3.14e-03 +2024-08-06 12:54:44,480 INFO [trainer.py:765] (3/8) Epoch 27, batch 2300, train_loss[loss=3.224, NarTop10Accuracy=0.6836, over 5794.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6372, over 6057.08 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 12:55:09,218 INFO [trainer.py:765] (3/8) Epoch 27, batch 2400, train_loss[loss=3.246, NarTop10Accuracy=0.6727, over 5125.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6334, over 5861.81 frames. ], batch size: 7, lr: 3.13e-03 +2024-08-06 12:55:32,726 INFO [trainer.py:765] (3/8) Epoch 27, batch 2500, train_loss[loss=3.2, NarTop10Accuracy=0.6811, over 4950.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6391, over 5525.11 frames. ], batch size: 6, lr: 3.13e-03 +2024-08-06 12:55:54,706 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 12:56:46,803 INFO [trainer.py:765] (3/8) Epoch 28, batch 100, train_loss[loss=3.162, NarTop10Accuracy=0.6898, over 7332.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6532, over 2368.36 frames. ], batch size: 30, lr: 3.07e-03 +2024-08-06 12:57:23,205 INFO [trainer.py:765] (3/8) Epoch 28, batch 200, train_loss[loss=3.338, NarTop10Accuracy=0.6508, over 6787.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6519, over 3858.36 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 12:57:55,704 INFO [trainer.py:765] (3/8) Epoch 28, batch 300, train_loss[loss=3.422, NarTop10Accuracy=0.6306, over 6823.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6478, over 4676.49 frames. ], batch size: 21, lr: 3.07e-03 +2024-08-06 12:57:56,457 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 12:58:06,828 INFO [trainer.py:811] (3/8) Epoch 28, validation: loss=3.275, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 12:58:06,828 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 12:58:07,334 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 1.944e+02 2.106e+02 2.298e+02 4.786e+02, threshold=4.211e+02, percent-clipped=0.1 +2024-08-06 12:58:34,933 INFO [trainer.py:765] (3/8) Epoch 28, batch 400, train_loss[loss=3.304, NarTop10Accuracy=0.6632, over 5224.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6481, over 5138.95 frames. ], batch size: 7, lr: 3.06e-03 +2024-08-06 12:59:11,438 INFO [trainer.py:765] (3/8) Epoch 28, batch 500, train_loss[loss=3.205, NarTop10Accuracy=0.691, over 6075.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6492, over 5409.40 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 12:59:44,487 INFO [trainer.py:765] (3/8) Epoch 28, batch 600, train_loss[loss=3.349, NarTop10Accuracy=0.6635, over 5749.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6496, over 5680.37 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 13:00:20,013 INFO [trainer.py:765] (3/8) Epoch 28, batch 700, train_loss[loss=3.097, NarTop10Accuracy=0.6852, over 5064.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6491, over 5737.08 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 13:00:56,434 INFO [trainer.py:765] (3/8) Epoch 28, batch 800, train_loss[loss=3.536, NarTop10Accuracy=0.6206, over 5051.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6477, over 5810.54 frames. ], batch size: 6, lr: 3.05e-03 +2024-08-06 13:01:31,042 INFO [trainer.py:765] (3/8) Epoch 28, batch 900, train_loss[loss=3.11, NarTop10Accuracy=0.6968, over 6649.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.645, over 5838.10 frames. ], batch size: 14, lr: 3.05e-03 +2024-08-06 13:02:06,495 INFO [trainer.py:765] (3/8) Epoch 28, batch 1000, train_loss[loss=3.837, NarTop10Accuracy=0.5561, over 6264.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6419, over 5933.01 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 13:02:41,229 INFO [trainer.py:765] (3/8) Epoch 28, batch 1100, train_loss[loss=3.362, NarTop10Accuracy=0.6467, over 6895.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6406, over 5976.56 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 13:03:16,895 INFO [trainer.py:765] (3/8) Epoch 28, batch 1200, train_loss[loss=3.39, NarTop10Accuracy=0.6404, over 7319.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6411, over 5964.57 frames. ], batch size: 31, lr: 3.05e-03 +2024-08-06 13:03:54,154 INFO [trainer.py:765] (3/8) Epoch 28, batch 1300, train_loss[loss=2.998, NarTop10Accuracy=0.7226, over 5203.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6435, over 6012.60 frames. ], batch size: 6, lr: 3.04e-03 +2024-08-06 13:04:28,712 INFO [trainer.py:765] (3/8) Epoch 28, batch 1400, train_loss[loss=3.338, NarTop10Accuracy=0.6405, over 6027.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6412, over 6028.46 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 13:05:02,349 INFO [trainer.py:765] (3/8) Epoch 28, batch 1500, train_loss[loss=3.379, NarTop10Accuracy=0.6522, over 5550.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6412, over 5964.44 frames. ], batch size: 49, lr: 3.04e-03 +2024-08-06 13:05:30,371 INFO [trainer.py:765] (3/8) Epoch 28, batch 1600, train_loss[loss=3.695, NarTop10Accuracy=0.5765, over 7238.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6405, over 5961.96 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 13:05:57,131 INFO [trainer.py:765] (3/8) Epoch 28, batch 1700, train_loss[loss=3.562, NarTop10Accuracy=0.6036, over 6267.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6398, over 5952.15 frames. ], batch size: 13, lr: 3.04e-03 +2024-08-06 13:06:23,732 INFO [trainer.py:765] (3/8) Epoch 28, batch 1800, train_loss[loss=3.655, NarTop10Accuracy=0.5882, over 7139.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6401, over 6021.81 frames. ], batch size: 22, lr: 3.03e-03 +2024-08-06 13:06:50,373 INFO [trainer.py:765] (3/8) Epoch 28, batch 1900, train_loss[loss=3.518, NarTop10Accuracy=0.6135, over 6625.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6405, over 6051.63 frames. ], batch size: 49, lr: 3.03e-03 +2024-08-06 13:07:16,115 INFO [trainer.py:765] (3/8) Epoch 28, batch 2000, train_loss[loss=3.556, NarTop10Accuracy=0.6072, over 6712.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.642, over 6030.91 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 13:07:41,546 INFO [trainer.py:765] (3/8) Epoch 28, batch 2100, train_loss[loss=3.704, NarTop10Accuracy=0.584, over 4899.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6399, over 6014.76 frames. ], batch size: 5, lr: 3.03e-03 +2024-08-06 13:08:06,932 INFO [trainer.py:765] (3/8) Epoch 28, batch 2200, train_loss[loss=3.672, NarTop10Accuracy=0.5893, over 7183.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6385, over 6051.45 frames. ], batch size: 30, lr: 3.02e-03 +2024-08-06 13:08:32,387 INFO [trainer.py:765] (3/8) Epoch 28, batch 2300, train_loss[loss=3.099, NarTop10Accuracy=0.6875, over 5907.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6354, over 6079.33 frames. ], batch size: 9, lr: 3.02e-03 +2024-08-06 13:08:33,135 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 13:08:43,385 INFO [trainer.py:811] (3/8) Epoch 28, validation: loss=3.224, NarTop10Accuracy=0.676, over 1907754.00 frames. +2024-08-06 13:08:43,386 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 13:08:43,891 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 1.997e+02 2.131e+02 2.314e+02 6.875e+02, threshold=4.261e+02, percent-clipped=0.5 +2024-08-06 13:09:07,390 INFO [trainer.py:765] (3/8) Epoch 28, batch 2400, train_loss[loss=3.652, NarTop10Accuracy=0.5887, over 5144.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6335, over 5883.52 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 13:09:30,781 INFO [trainer.py:765] (3/8) Epoch 28, batch 2500, train_loss[loss=3.251, NarTop10Accuracy=0.6558, over 5126.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6416, over 5526.72 frames. ], batch size: 6, lr: 3.02e-03 +2024-08-06 13:09:51,780 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 13:10:48,193 INFO [trainer.py:765] (3/8) Epoch 29, batch 100, train_loss[loss=3.581, NarTop10Accuracy=0.5952, over 7260.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6532, over 2373.09 frames. ], batch size: 30, lr: 2.96e-03 +2024-08-06 13:11:20,841 INFO [trainer.py:765] (3/8) Epoch 29, batch 200, train_loss[loss=3.42, NarTop10Accuracy=0.6388, over 6962.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6544, over 3875.13 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 13:11:56,950 INFO [trainer.py:765] (3/8) Epoch 29, batch 300, train_loss[loss=3.278, NarTop10Accuracy=0.6667, over 6950.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6509, over 4679.22 frames. ], batch size: 22, lr: 2.96e-03 +2024-08-06 13:12:29,716 INFO [trainer.py:765] (3/8) Epoch 29, batch 400, train_loss[loss=3.486, NarTop10Accuracy=0.6227, over 5198.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6512, over 5139.49 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 13:12:59,921 INFO [trainer.py:765] (3/8) Epoch 29, batch 500, train_loss[loss=3.41, NarTop10Accuracy=0.6436, over 5999.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6513, over 5412.98 frames. ], batch size: 11, lr: 2.95e-03 +2024-08-06 13:13:33,547 INFO [trainer.py:765] (3/8) Epoch 29, batch 600, train_loss[loss=3.621, NarTop10Accuracy=0.6019, over 5790.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6492, over 5678.50 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 13:14:09,937 INFO [trainer.py:765] (3/8) Epoch 29, batch 700, train_loss[loss=3.701, NarTop10Accuracy=0.5686, over 5124.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6478, over 5756.32 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:14:46,677 INFO [trainer.py:765] (3/8) Epoch 29, batch 800, train_loss[loss=3.514, NarTop10Accuracy=0.6022, over 5040.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6416, over 5807.03 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:15:17,114 INFO [trainer.py:765] (3/8) Epoch 29, batch 900, train_loss[loss=3.356, NarTop10Accuracy=0.6496, over 6204.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6427, over 5822.29 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 13:15:59,363 INFO [trainer.py:765] (3/8) Epoch 29, batch 1000, train_loss[loss=3.536, NarTop10Accuracy=0.6167, over 6231.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6444, over 5913.82 frames. ], batch size: 13, lr: 2.94e-03 +2024-08-06 13:16:31,713 INFO [trainer.py:765] (3/8) Epoch 29, batch 1100, train_loss[loss=3.434, NarTop10Accuracy=0.6304, over 6742.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6411, over 5956.23 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 13:17:04,933 INFO [trainer.py:765] (3/8) Epoch 29, batch 1200, train_loss[loss=3.239, NarTop10Accuracy=0.6611, over 7254.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6427, over 5964.01 frames. ], batch size: 31, lr: 2.94e-03 +2024-08-06 13:17:43,957 INFO [trainer.py:765] (3/8) Epoch 29, batch 1300, train_loss[loss=3.185, NarTop10Accuracy=0.673, over 5022.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.642, over 6029.60 frames. ], batch size: 6, lr: 2.94e-03 +2024-08-06 13:18:17,925 INFO [trainer.py:765] (3/8) Epoch 29, batch 1400, train_loss[loss=3.525, NarTop10Accuracy=0.6186, over 6140.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6403, over 6059.09 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 13:18:48,306 INFO [trainer.py:765] (3/8) Epoch 29, batch 1500, train_loss[loss=3.799, NarTop10Accuracy=0.562, over 5924.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6428, over 5988.80 frames. ], batch size: 49, lr: 2.93e-03 +2024-08-06 13:19:16,409 INFO [trainer.py:765] (3/8) Epoch 29, batch 1600, train_loss[loss=3.087, NarTop10Accuracy=0.6974, over 7122.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.641, over 5960.29 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:19:43,242 INFO [trainer.py:765] (3/8) Epoch 29, batch 1700, train_loss[loss=3.327, NarTop10Accuracy=0.6567, over 6283.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6396, over 5939.55 frames. ], batch size: 13, lr: 2.93e-03 +2024-08-06 13:19:49,091 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 13:19:59,386 INFO [trainer.py:811] (3/8) Epoch 29, validation: loss=3.233, NarTop10Accuracy=0.6754, over 1907754.00 frames. +2024-08-06 13:19:59,387 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 13:19:59,903 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.964e+02 2.123e+02 2.299e+02 5.520e+02, threshold=4.246e+02, percent-clipped=0.2 +2024-08-06 13:20:20,108 INFO [trainer.py:765] (3/8) Epoch 29, batch 1800, train_loss[loss=3.345, NarTop10Accuracy=0.6535, over 7141.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6405, over 6006.88 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:20:46,845 INFO [trainer.py:765] (3/8) Epoch 29, batch 1900, train_loss[loss=3.42, NarTop10Accuracy=0.6342, over 6117.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6371, over 6053.74 frames. ], batch size: 49, lr: 2.93e-03 +2024-08-06 13:21:12,479 INFO [trainer.py:765] (3/8) Epoch 29, batch 2000, train_loss[loss=3.702, NarTop10Accuracy=0.583, over 6039.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6402, over 6027.69 frames. ], batch size: 48, lr: 2.92e-03 +2024-08-06 13:21:37,983 INFO [trainer.py:765] (3/8) Epoch 29, batch 2100, train_loss[loss=2.949, NarTop10Accuracy=0.7141, over 3891.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.639, over 5998.26 frames. ], batch size: 4, lr: 2.92e-03 +2024-08-06 13:22:03,361 INFO [trainer.py:765] (3/8) Epoch 29, batch 2200, train_loss[loss=3.26, NarTop10Accuracy=0.6714, over 7305.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6398, over 6029.17 frames. ], batch size: 31, lr: 2.92e-03 +2024-08-06 13:22:28,832 INFO [trainer.py:765] (3/8) Epoch 29, batch 2300, train_loss[loss=3.333, NarTop10Accuracy=0.6358, over 5800.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.638, over 6077.45 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 13:22:53,621 INFO [trainer.py:765] (3/8) Epoch 29, batch 2400, train_loss[loss=3.393, NarTop10Accuracy=0.6435, over 5735.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6382, over 5875.49 frames. ], batch size: 8, lr: 2.92e-03 +2024-08-06 13:23:16,979 INFO [trainer.py:765] (3/8) Epoch 29, batch 2500, train_loss[loss=3.326, NarTop10Accuracy=0.6664, over 4947.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6423, over 5525.51 frames. ], batch size: 6, lr: 2.91e-03 +2024-08-06 13:23:38,349 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 13:24:38,391 INFO [trainer.py:765] (3/8) Epoch 30, batch 100, train_loss[loss=3.342, NarTop10Accuracy=0.6551, over 6927.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6608, over 2373.36 frames. ], batch size: 30, lr: 2.86e-03 +2024-08-06 13:25:14,782 INFO [trainer.py:765] (3/8) Epoch 30, batch 200, train_loss[loss=3.402, NarTop10Accuracy=0.6476, over 6821.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6586, over 3866.52 frames. ], batch size: 17, lr: 2.86e-03 +2024-08-06 13:25:46,846 INFO [trainer.py:765] (3/8) Epoch 30, batch 300, train_loss[loss=3.121, NarTop10Accuracy=0.6967, over 7075.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6555, over 4670.26 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 13:26:17,539 INFO [trainer.py:765] (3/8) Epoch 30, batch 400, train_loss[loss=3.18, NarTop10Accuracy=0.6748, over 5102.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6522, over 5121.39 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 13:26:53,919 INFO [trainer.py:765] (3/8) Epoch 30, batch 500, train_loss[loss=3.301, NarTop10Accuracy=0.6617, over 6120.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6556, over 5400.13 frames. ], batch size: 11, lr: 2.85e-03 +2024-08-06 13:27:25,422 INFO [trainer.py:765] (3/8) Epoch 30, batch 600, train_loss[loss=3.338, NarTop10Accuracy=0.649, over 5740.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6542, over 5667.78 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 13:28:00,307 INFO [trainer.py:765] (3/8) Epoch 30, batch 700, train_loss[loss=3.301, NarTop10Accuracy=0.6685, over 5281.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6524, over 5741.80 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:28:37,477 INFO [trainer.py:765] (3/8) Epoch 30, batch 800, train_loss[loss=3.545, NarTop10Accuracy=0.5993, over 5101.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6495, over 5807.57 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:29:10,424 INFO [trainer.py:765] (3/8) Epoch 30, batch 900, train_loss[loss=3.366, NarTop10Accuracy=0.6416, over 6713.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6472, over 5845.60 frames. ], batch size: 14, lr: 2.85e-03 +2024-08-06 13:29:45,913 INFO [trainer.py:765] (3/8) Epoch 30, batch 1000, train_loss[loss=3.215, NarTop10Accuracy=0.6743, over 6150.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6452, over 5931.71 frames. ], batch size: 13, lr: 2.84e-03 +2024-08-06 13:30:24,172 INFO [trainer.py:765] (3/8) Epoch 30, batch 1100, train_loss[loss=3.473, NarTop10Accuracy=0.6216, over 7055.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6443, over 5958.67 frames. ], batch size: 18, lr: 2.84e-03 +2024-08-06 13:30:38,001 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 13:30:48,195 INFO [trainer.py:811] (3/8) Epoch 30, validation: loss=3.239, NarTop10Accuracy=0.6729, over 1907754.00 frames. +2024-08-06 13:30:48,196 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 13:30:48,916 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 1.985e+02 2.139e+02 2.326e+02 4.628e+02, threshold=4.279e+02, percent-clipped=0.1 +2024-08-06 13:31:05,665 INFO [trainer.py:765] (3/8) Epoch 30, batch 1200, train_loss[loss=3.345, NarTop10Accuracy=0.6581, over 7211.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6431, over 5952.09 frames. ], batch size: 31, lr: 2.84e-03 +2024-08-06 13:31:43,020 INFO [trainer.py:765] (3/8) Epoch 30, batch 1300, train_loss[loss=3.249, NarTop10Accuracy=0.6749, over 5056.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6438, over 6017.04 frames. ], batch size: 6, lr: 2.84e-03 +2024-08-06 13:32:19,325 INFO [trainer.py:765] (3/8) Epoch 30, batch 1400, train_loss[loss=3.522, NarTop10Accuracy=0.6017, over 6150.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6431, over 6026.26 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 13:32:52,335 INFO [trainer.py:765] (3/8) Epoch 30, batch 1500, train_loss[loss=3.532, NarTop10Accuracy=0.6138, over 5829.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6423, over 5972.24 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:33:20,407 INFO [trainer.py:765] (3/8) Epoch 30, batch 1600, train_loss[loss=3.758, NarTop10Accuracy=0.5673, over 6942.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6401, over 5959.04 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:33:47,200 INFO [trainer.py:765] (3/8) Epoch 30, batch 1700, train_loss[loss=3.654, NarTop10Accuracy=0.5884, over 6240.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6407, over 5941.81 frames. ], batch size: 13, lr: 2.83e-03 +2024-08-06 13:34:13,887 INFO [trainer.py:765] (3/8) Epoch 30, batch 1800, train_loss[loss=3.759, NarTop10Accuracy=0.5697, over 7209.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6412, over 6003.95 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:34:40,547 INFO [trainer.py:765] (3/8) Epoch 30, batch 1900, train_loss[loss=3.669, NarTop10Accuracy=0.5844, over 5970.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6382, over 6055.99 frames. ], batch size: 48, lr: 2.83e-03 +2024-08-06 13:35:06,315 INFO [trainer.py:765] (3/8) Epoch 30, batch 2000, train_loss[loss=3.574, NarTop10Accuracy=0.6123, over 6267.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6424, over 6026.26 frames. ], batch size: 51, lr: 2.83e-03 +2024-08-06 13:35:31,872 INFO [trainer.py:765] (3/8) Epoch 30, batch 2100, train_loss[loss=3.434, NarTop10Accuracy=0.6272, over 4773.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6398, over 6018.07 frames. ], batch size: 5, lr: 2.82e-03 +2024-08-06 13:36:00,553 INFO [trainer.py:765] (3/8) Epoch 30, batch 2200, train_loss[loss=3.382, NarTop10Accuracy=0.6445, over 7283.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.64, over 6044.85 frames. ], batch size: 31, lr: 2.82e-03 +2024-08-06 13:36:26,029 INFO [trainer.py:765] (3/8) Epoch 30, batch 2300, train_loss[loss=3.383, NarTop10Accuracy=0.63, over 5740.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6387, over 6057.18 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 13:36:50,825 INFO [trainer.py:765] (3/8) Epoch 30, batch 2400, train_loss[loss=3.352, NarTop10Accuracy=0.6601, over 5206.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6393, over 5873.48 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 13:37:14,388 INFO [trainer.py:765] (3/8) Epoch 30, batch 2500, train_loss[loss=3.053, NarTop10Accuracy=0.6996, over 5009.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6446, over 5541.05 frames. ], batch size: 6, lr: 2.82e-03 +2024-08-06 13:37:35,806 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 13:38:28,438 INFO [trainer.py:765] (3/8) Epoch 31, batch 100, train_loss[loss=3.258, NarTop10Accuracy=0.671, over 7100.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6571, over 2361.13 frames. ], batch size: 30, lr: 2.77e-03 +2024-08-06 13:39:02,652 INFO [trainer.py:765] (3/8) Epoch 31, batch 200, train_loss[loss=3.282, NarTop10Accuracy=0.6606, over 6938.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6552, over 3856.32 frames. ], batch size: 17, lr: 2.76e-03 +2024-08-06 13:39:34,677 INFO [trainer.py:765] (3/8) Epoch 31, batch 300, train_loss[loss=3.344, NarTop10Accuracy=0.6415, over 7115.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6555, over 4659.66 frames. ], batch size: 22, lr: 2.76e-03 +2024-08-06 13:40:07,363 INFO [trainer.py:765] (3/8) Epoch 31, batch 400, train_loss[loss=3.621, NarTop10Accuracy=0.5959, over 4999.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6522, over 5118.00 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 13:40:37,813 INFO [trainer.py:765] (3/8) Epoch 31, batch 500, train_loss[loss=3.301, NarTop10Accuracy=0.6621, over 6000.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6527, over 5403.74 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 13:40:58,299 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 13:41:08,777 INFO [trainer.py:811] (3/8) Epoch 31, validation: loss=3.268, NarTop10Accuracy=0.6673, over 1907754.00 frames. +2024-08-06 13:41:08,778 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 13:41:09,338 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 1.987e+02 2.143e+02 2.328e+02 4.341e+02, threshold=4.287e+02, percent-clipped=0.1 +2024-08-06 13:41:20,863 INFO [trainer.py:765] (3/8) Epoch 31, batch 600, train_loss[loss=3.329, NarTop10Accuracy=0.6499, over 5853.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6513, over 5673.96 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 13:41:54,259 INFO [trainer.py:765] (3/8) Epoch 31, batch 700, train_loss[loss=3.065, NarTop10Accuracy=0.7062, over 5132.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6512, over 5748.16 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 13:42:32,158 INFO [trainer.py:765] (3/8) Epoch 31, batch 800, train_loss[loss=3.16, NarTop10Accuracy=0.6864, over 5224.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6494, over 5806.89 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:43:06,274 INFO [trainer.py:765] (3/8) Epoch 31, batch 900, train_loss[loss=3.368, NarTop10Accuracy=0.6537, over 6261.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6515, over 5812.29 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 13:43:38,009 INFO [trainer.py:765] (3/8) Epoch 31, batch 1000, train_loss[loss=3.429, NarTop10Accuracy=0.6488, over 6245.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6491, over 5920.32 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 13:44:14,512 INFO [trainer.py:765] (3/8) Epoch 31, batch 1100, train_loss[loss=3.355, NarTop10Accuracy=0.6388, over 6811.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6491, over 5951.58 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 13:44:53,785 INFO [trainer.py:765] (3/8) Epoch 31, batch 1200, train_loss[loss=3.326, NarTop10Accuracy=0.6512, over 7267.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6472, over 5954.60 frames. ], batch size: 31, lr: 2.75e-03 +2024-08-06 13:45:25,076 INFO [trainer.py:765] (3/8) Epoch 31, batch 1300, train_loss[loss=3.23, NarTop10Accuracy=0.6832, over 5144.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.648, over 6014.51 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:45:58,740 INFO [trainer.py:765] (3/8) Epoch 31, batch 1400, train_loss[loss=3.001, NarTop10Accuracy=0.7108, over 6109.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6469, over 6021.84 frames. ], batch size: 11, lr: 2.74e-03 +2024-08-06 13:46:33,490 INFO [trainer.py:765] (3/8) Epoch 31, batch 1500, train_loss[loss=3.715, NarTop10Accuracy=0.5741, over 5839.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6455, over 5960.13 frames. ], batch size: 49, lr: 2.74e-03 +2024-08-06 13:47:04,657 INFO [trainer.py:765] (3/8) Epoch 31, batch 1600, train_loss[loss=3.244, NarTop10Accuracy=0.6772, over 6969.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6443, over 5947.69 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 13:47:31,423 INFO [trainer.py:765] (3/8) Epoch 31, batch 1700, train_loss[loss=3.593, NarTop10Accuracy=0.5988, over 6321.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6437, over 5931.11 frames. ], batch size: 13, lr: 2.74e-03 +2024-08-06 13:47:58,016 INFO [trainer.py:765] (3/8) Epoch 31, batch 1800, train_loss[loss=3.488, NarTop10Accuracy=0.6137, over 7095.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6434, over 5986.02 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 13:48:24,576 INFO [trainer.py:765] (3/8) Epoch 31, batch 1900, train_loss[loss=3.451, NarTop10Accuracy=0.6315, over 6079.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6423, over 6027.18 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 13:48:50,257 INFO [trainer.py:765] (3/8) Epoch 31, batch 2000, train_loss[loss=3.576, NarTop10Accuracy=0.6013, over 5440.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6433, over 5988.51 frames. ], batch size: 49, lr: 2.73e-03 +2024-08-06 13:49:15,764 INFO [trainer.py:765] (3/8) Epoch 31, batch 2100, train_loss[loss=3.488, NarTop10Accuracy=0.6243, over 4756.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6437, over 5978.69 frames. ], batch size: 5, lr: 2.73e-03 +2024-08-06 13:49:41,278 INFO [trainer.py:765] (3/8) Epoch 31, batch 2200, train_loss[loss=3.496, NarTop10Accuracy=0.6235, over 7260.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6436, over 6033.65 frames. ], batch size: 30, lr: 2.73e-03 +2024-08-06 13:50:06,707 INFO [trainer.py:765] (3/8) Epoch 31, batch 2300, train_loss[loss=3.431, NarTop10Accuracy=0.627, over 5781.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6393, over 6050.73 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 13:50:31,392 INFO [trainer.py:765] (3/8) Epoch 31, batch 2400, train_loss[loss=3.353, NarTop10Accuracy=0.6477, over 5137.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6363, over 5872.08 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 13:50:54,891 INFO [trainer.py:765] (3/8) Epoch 31, batch 2500, train_loss[loss=3.421, NarTop10Accuracy=0.6213, over 5155.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6427, over 5526.08 frames. ], batch size: 6, lr: 2.72e-03 +2024-08-06 13:51:08,994 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 13:51:19,069 INFO [trainer.py:811] (3/8) Epoch 31, validation: loss=3.234, NarTop10Accuracy=0.6746, over 1907754.00 frames. +2024-08-06 13:51:19,070 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 13:51:19,540 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.007e+02 2.182e+02 2.368e+02 4.565e+02, threshold=4.363e+02, percent-clipped=0.1 +2024-08-06 13:51:26,230 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 13:52:19,909 INFO [trainer.py:765] (3/8) Epoch 32, batch 100, train_loss[loss=3.238, NarTop10Accuracy=0.6677, over 7126.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6569, over 2376.57 frames. ], batch size: 30, lr: 2.68e-03 +2024-08-06 13:52:52,537 INFO [trainer.py:765] (3/8) Epoch 32, batch 200, train_loss[loss=3.593, NarTop10Accuracy=0.5934, over 6893.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6545, over 3884.39 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 13:53:28,092 INFO [trainer.py:765] (3/8) Epoch 32, batch 300, train_loss[loss=3.165, NarTop10Accuracy=0.6877, over 7128.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6542, over 4677.08 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 13:54:00,886 INFO [trainer.py:765] (3/8) Epoch 32, batch 400, train_loss[loss=3.642, NarTop10Accuracy=0.5923, over 5124.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6527, over 5137.16 frames. ], batch size: 7, lr: 2.67e-03 +2024-08-06 13:54:32,821 INFO [trainer.py:765] (3/8) Epoch 32, batch 500, train_loss[loss=3.09, NarTop10Accuracy=0.72, over 6109.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6553, over 5403.21 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 13:55:01,771 INFO [trainer.py:765] (3/8) Epoch 32, batch 600, train_loss[loss=3.433, NarTop10Accuracy=0.6267, over 5608.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6568, over 5655.78 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 13:55:41,510 INFO [trainer.py:765] (3/8) Epoch 32, batch 700, train_loss[loss=3.09, NarTop10Accuracy=0.7024, over 5211.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6533, over 5727.65 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:13,172 INFO [trainer.py:765] (3/8) Epoch 32, batch 800, train_loss[loss=3.139, NarTop10Accuracy=0.6839, over 5159.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6524, over 5773.96 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:43,165 INFO [trainer.py:765] (3/8) Epoch 32, batch 900, train_loss[loss=3.682, NarTop10Accuracy=0.5758, over 6208.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6527, over 5795.42 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 13:57:24,520 INFO [trainer.py:765] (3/8) Epoch 32, batch 1000, train_loss[loss=3.448, NarTop10Accuracy=0.6277, over 6160.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6504, over 5913.90 frames. ], batch size: 13, lr: 2.66e-03 +2024-08-06 13:57:57,451 INFO [trainer.py:765] (3/8) Epoch 32, batch 1100, train_loss[loss=3.14, NarTop10Accuracy=0.6812, over 6931.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6477, over 5957.71 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 13:58:30,540 INFO [trainer.py:765] (3/8) Epoch 32, batch 1200, train_loss[loss=3.136, NarTop10Accuracy=0.6921, over 6986.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6476, over 5952.58 frames. ], batch size: 30, lr: 2.66e-03 +2024-08-06 13:59:08,259 INFO [trainer.py:765] (3/8) Epoch 32, batch 1300, train_loss[loss=3.216, NarTop10Accuracy=0.6769, over 4940.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6469, over 6015.69 frames. ], batch size: 6, lr: 2.66e-03 +2024-08-06 13:59:42,265 INFO [trainer.py:765] (3/8) Epoch 32, batch 1400, train_loss[loss=3.316, NarTop10Accuracy=0.6568, over 6036.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6468, over 6030.75 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 14:00:12,975 INFO [trainer.py:765] (3/8) Epoch 32, batch 1500, train_loss[loss=3.774, NarTop10Accuracy=0.5653, over 5796.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6468, over 5953.49 frames. ], batch size: 48, lr: 2.66e-03 +2024-08-06 14:00:40,823 INFO [trainer.py:765] (3/8) Epoch 32, batch 1600, train_loss[loss=3.305, NarTop10Accuracy=0.6598, over 7113.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.647, over 5955.87 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:01:07,533 INFO [trainer.py:765] (3/8) Epoch 32, batch 1700, train_loss[loss=3.199, NarTop10Accuracy=0.6784, over 6225.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6445, over 5924.68 frames. ], batch size: 13, lr: 2.65e-03 +2024-08-06 14:01:34,088 INFO [trainer.py:765] (3/8) Epoch 32, batch 1800, train_loss[loss=3.206, NarTop10Accuracy=0.6834, over 7206.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6442, over 5980.75 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:02:00,635 INFO [trainer.py:765] (3/8) Epoch 32, batch 1900, train_loss[loss=3.489, NarTop10Accuracy=0.6212, over 5796.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6437, over 6025.88 frames. ], batch size: 48, lr: 2.65e-03 +2024-08-06 14:02:20,590 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 14:02:30,653 INFO [trainer.py:811] (3/8) Epoch 32, validation: loss=3.204, NarTop10Accuracy=0.6812, over 1907754.00 frames. +2024-08-06 14:02:30,653 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 14:02:31,152 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.032e+02 2.200e+02 2.392e+02 6.182e+02, threshold=4.401e+02, percent-clipped=0.1 +2024-08-06 14:02:36,382 INFO [trainer.py:765] (3/8) Epoch 32, batch 2000, train_loss[loss=3.577, NarTop10Accuracy=0.5963, over 6257.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6453, over 6002.69 frames. ], batch size: 48, lr: 2.65e-03 +2024-08-06 14:03:01,697 INFO [trainer.py:765] (3/8) Epoch 32, batch 2100, train_loss[loss=3.353, NarTop10Accuracy=0.6382, over 3982.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6442, over 5992.51 frames. ], batch size: 4, lr: 2.65e-03 +2024-08-06 14:03:27,176 INFO [trainer.py:765] (3/8) Epoch 32, batch 2200, train_loss[loss=3.598, NarTop10Accuracy=0.593, over 7144.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6434, over 6032.44 frames. ], batch size: 31, lr: 2.64e-03 +2024-08-06 14:03:52,585 INFO [trainer.py:765] (3/8) Epoch 32, batch 2300, train_loss[loss=3.587, NarTop10Accuracy=0.5882, over 5801.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6401, over 6066.89 frames. ], batch size: 9, lr: 2.64e-03 +2024-08-06 14:04:17,274 INFO [trainer.py:765] (3/8) Epoch 32, batch 2400, train_loss[loss=3.257, NarTop10Accuracy=0.658, over 5093.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6389, over 5878.25 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 14:04:40,635 INFO [trainer.py:765] (3/8) Epoch 32, batch 2500, train_loss[loss=2.957, NarTop10Accuracy=0.73, over 5028.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6463, over 5530.87 frames. ], batch size: 6, lr: 2.64e-03 +2024-08-06 14:05:01,810 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 14:06:02,906 INFO [trainer.py:765] (3/8) Epoch 33, batch 100, train_loss[loss=3.575, NarTop10Accuracy=0.6065, over 7313.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6558, over 2374.39 frames. ], batch size: 31, lr: 2.60e-03 +2024-08-06 14:06:36,079 INFO [trainer.py:765] (3/8) Epoch 33, batch 200, train_loss[loss=3.305, NarTop10Accuracy=0.6496, over 6886.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6574, over 3881.29 frames. ], batch size: 17, lr: 2.59e-03 +2024-08-06 14:07:12,146 INFO [trainer.py:765] (3/8) Epoch 33, batch 300, train_loss[loss=3.415, NarTop10Accuracy=0.6295, over 7334.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6572, over 4674.42 frames. ], batch size: 23, lr: 2.59e-03 +2024-08-06 14:07:48,256 INFO [trainer.py:765] (3/8) Epoch 33, batch 400, train_loss[loss=3.534, NarTop10Accuracy=0.6128, over 5161.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.656, over 5134.44 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 14:08:18,547 INFO [trainer.py:765] (3/8) Epoch 33, batch 500, train_loss[loss=3.348, NarTop10Accuracy=0.6505, over 6027.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6536, over 5402.64 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 14:08:49,792 INFO [trainer.py:765] (3/8) Epoch 33, batch 600, train_loss[loss=3.428, NarTop10Accuracy=0.6332, over 5774.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6524, over 5664.36 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 14:09:32,926 INFO [trainer.py:765] (3/8) Epoch 33, batch 700, train_loss[loss=3.59, NarTop10Accuracy=0.5973, over 5244.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6505, over 5754.67 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 14:10:04,597 INFO [trainer.py:765] (3/8) Epoch 33, batch 800, train_loss[loss=3.157, NarTop10Accuracy=0.6886, over 5172.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6475, over 5812.51 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 14:10:35,387 INFO [trainer.py:765] (3/8) Epoch 33, batch 900, train_loss[loss=3.29, NarTop10Accuracy=0.6696, over 6322.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6483, over 5837.96 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 14:11:15,069 INFO [trainer.py:765] (3/8) Epoch 33, batch 1000, train_loss[loss=3.286, NarTop10Accuracy=0.6552, over 6206.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6481, over 5953.04 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 14:11:47,302 INFO [trainer.py:765] (3/8) Epoch 33, batch 1100, train_loss[loss=3.557, NarTop10Accuracy=0.6091, over 6781.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6481, over 5986.45 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 14:12:20,928 INFO [trainer.py:765] (3/8) Epoch 33, batch 1200, train_loss[loss=3.465, NarTop10Accuracy=0.6269, over 7273.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6463, over 5945.49 frames. ], batch size: 30, lr: 2.58e-03 +2024-08-06 14:12:57,629 INFO [trainer.py:765] (3/8) Epoch 33, batch 1300, train_loss[loss=3.634, NarTop10Accuracy=0.5893, over 4283.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6478, over 6024.56 frames. ], batch size: 5, lr: 2.58e-03 +2024-08-06 14:13:30,666 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 14:13:41,686 INFO [trainer.py:811] (3/8) Epoch 33, validation: loss=3.242, NarTop10Accuracy=0.6732, over 1907754.00 frames. +2024-08-06 14:13:41,687 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 14:13:42,264 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.031e+02 2.174e+02 2.363e+02 4.871e+02, threshold=4.347e+02, percent-clipped=0.1 +2024-08-06 14:13:42,802 INFO [trainer.py:765] (3/8) Epoch 33, batch 1400, train_loss[loss=3.233, NarTop10Accuracy=0.671, over 6270.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.65, over 6054.54 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 14:14:11,244 INFO [trainer.py:765] (3/8) Epoch 33, batch 1500, train_loss[loss=3.591, NarTop10Accuracy=0.6062, over 6327.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6473, over 5977.63 frames. ], batch size: 49, lr: 2.57e-03 +2024-08-06 14:14:39,191 INFO [trainer.py:765] (3/8) Epoch 33, batch 1600, train_loss[loss=3.313, NarTop10Accuracy=0.6575, over 7256.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6448, over 5960.95 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:05,857 INFO [trainer.py:765] (3/8) Epoch 33, batch 1700, train_loss[loss=3.653, NarTop10Accuracy=0.5891, over 6328.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6445, over 5931.54 frames. ], batch size: 13, lr: 2.57e-03 +2024-08-06 14:15:32,590 INFO [trainer.py:765] (3/8) Epoch 33, batch 1800, train_loss[loss=3.372, NarTop10Accuracy=0.6488, over 7110.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6484, over 6003.86 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:59,213 INFO [trainer.py:765] (3/8) Epoch 33, batch 1900, train_loss[loss=3.37, NarTop10Accuracy=0.6544, over 6499.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6455, over 6038.21 frames. ], batch size: 49, lr: 2.57e-03 +2024-08-06 14:16:24,894 INFO [trainer.py:765] (3/8) Epoch 33, batch 2000, train_loss[loss=3.471, NarTop10Accuracy=0.6291, over 5801.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6467, over 5998.19 frames. ], batch size: 48, lr: 2.57e-03 +2024-08-06 14:16:50,350 INFO [trainer.py:765] (3/8) Epoch 33, batch 2100, train_loss[loss=3.313, NarTop10Accuracy=0.6642, over 3955.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6444, over 5975.85 frames. ], batch size: 4, lr: 2.56e-03 +2024-08-06 14:17:15,825 INFO [trainer.py:765] (3/8) Epoch 33, batch 2200, train_loss[loss=3.665, NarTop10Accuracy=0.5858, over 7140.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6445, over 6029.22 frames. ], batch size: 30, lr: 2.56e-03 +2024-08-06 14:17:41,308 INFO [trainer.py:765] (3/8) Epoch 33, batch 2300, train_loss[loss=3.237, NarTop10Accuracy=0.6838, over 5697.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.641, over 6042.46 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 14:18:10,143 INFO [trainer.py:765] (3/8) Epoch 33, batch 2400, train_loss[loss=3.535, NarTop10Accuracy=0.6104, over 5053.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6402, over 5864.14 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 14:18:33,707 INFO [trainer.py:765] (3/8) Epoch 33, batch 2500, train_loss[loss=3.464, NarTop10Accuracy=0.6254, over 5271.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6457, over 5541.14 frames. ], batch size: 6, lr: 2.56e-03 +2024-08-06 14:18:54,681 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 14:19:51,932 INFO [trainer.py:765] (3/8) Epoch 34, batch 100, train_loss[loss=3.324, NarTop10Accuracy=0.6563, over 6999.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6592, over 2371.18 frames. ], batch size: 30, lr: 2.52e-03 +2024-08-06 14:20:24,372 INFO [trainer.py:765] (3/8) Epoch 34, batch 200, train_loss[loss=3.24, NarTop10Accuracy=0.6709, over 6940.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6615, over 3879.64 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 14:21:00,842 INFO [trainer.py:765] (3/8) Epoch 34, batch 300, train_loss[loss=3.327, NarTop10Accuracy=0.6498, over 7149.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.658, over 4710.02 frames. ], batch size: 22, lr: 2.51e-03 +2024-08-06 14:21:31,449 INFO [trainer.py:765] (3/8) Epoch 34, batch 400, train_loss[loss=3.172, NarTop10Accuracy=0.6931, over 5772.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6589, over 5138.81 frames. ], batch size: 8, lr: 2.51e-03 +2024-08-06 14:22:01,875 INFO [trainer.py:765] (3/8) Epoch 34, batch 500, train_loss[loss=3.105, NarTop10Accuracy=0.6879, over 6232.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6563, over 5416.09 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 14:22:36,826 INFO [trainer.py:765] (3/8) Epoch 34, batch 600, train_loss[loss=3.403, NarTop10Accuracy=0.6399, over 5898.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6562, over 5688.02 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 14:23:14,605 INFO [trainer.py:765] (3/8) Epoch 34, batch 700, train_loss[loss=3.185, NarTop10Accuracy=0.6814, over 4894.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6538, over 5754.99 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:46,606 INFO [trainer.py:765] (3/8) Epoch 34, batch 800, train_loss[loss=3.535, NarTop10Accuracy=0.6275, over 5021.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6532, over 5822.31 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:50,719 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 14:24:00,855 INFO [trainer.py:811] (3/8) Epoch 34, validation: loss=3.226, NarTop10Accuracy=0.6758, over 1907754.00 frames. +2024-08-06 14:24:00,856 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 14:24:01,413 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.033e+02 2.200e+02 2.391e+02 5.918e+02, threshold=4.399e+02, percent-clipped=0.1 +2024-08-06 14:24:28,899 INFO [trainer.py:765] (3/8) Epoch 34, batch 900, train_loss[loss=3.372, NarTop10Accuracy=0.6494, over 6655.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6514, over 5837.17 frames. ], batch size: 14, lr: 2.51e-03 +2024-08-06 14:25:05,287 INFO [trainer.py:765] (3/8) Epoch 34, batch 1000, train_loss[loss=3.177, NarTop10Accuracy=0.6897, over 6154.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6522, over 5926.43 frames. ], batch size: 13, lr: 2.50e-03 +2024-08-06 14:25:37,997 INFO [trainer.py:765] (3/8) Epoch 34, batch 1100, train_loss[loss=3.43, NarTop10Accuracy=0.6335, over 6954.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6475, over 5937.51 frames. ], batch size: 17, lr: 2.50e-03 +2024-08-06 14:26:13,975 INFO [trainer.py:765] (3/8) Epoch 34, batch 1200, train_loss[loss=3.383, NarTop10Accuracy=0.6455, over 7495.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6481, over 5939.20 frames. ], batch size: 31, lr: 2.50e-03 +2024-08-06 14:26:52,652 INFO [trainer.py:765] (3/8) Epoch 34, batch 1300, train_loss[loss=3.882, NarTop10Accuracy=0.5556, over 4840.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6488, over 6000.98 frames. ], batch size: 6, lr: 2.50e-03 +2024-08-06 14:27:24,383 INFO [trainer.py:765] (3/8) Epoch 34, batch 1400, train_loss[loss=3.229, NarTop10Accuracy=0.6653, over 6193.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6486, over 6018.31 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 14:27:52,726 INFO [trainer.py:765] (3/8) Epoch 34, batch 1500, train_loss[loss=3.726, NarTop10Accuracy=0.5712, over 6290.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6508, over 5967.87 frames. ], batch size: 49, lr: 2.50e-03 +2024-08-06 14:28:20,672 INFO [trainer.py:765] (3/8) Epoch 34, batch 1600, train_loss[loss=3.273, NarTop10Accuracy=0.6655, over 7123.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6478, over 5949.70 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 14:28:47,383 INFO [trainer.py:765] (3/8) Epoch 34, batch 1700, train_loss[loss=3.212, NarTop10Accuracy=0.6662, over 6341.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6462, over 5934.17 frames. ], batch size: 13, lr: 2.49e-03 +2024-08-06 14:29:14,010 INFO [trainer.py:765] (3/8) Epoch 34, batch 1800, train_loss[loss=3.617, NarTop10Accuracy=0.5967, over 7043.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6484, over 5994.20 frames. ], batch size: 22, lr: 2.49e-03 +2024-08-06 14:29:43,752 INFO [trainer.py:765] (3/8) Epoch 34, batch 1900, train_loss[loss=3.54, NarTop10Accuracy=0.6121, over 6204.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6442, over 6034.66 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 14:30:09,516 INFO [trainer.py:765] (3/8) Epoch 34, batch 2000, train_loss[loss=3.615, NarTop10Accuracy=0.6044, over 6537.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6457, over 6024.50 frames. ], batch size: 49, lr: 2.49e-03 +2024-08-06 14:30:35,016 INFO [trainer.py:765] (3/8) Epoch 34, batch 2100, train_loss[loss=2.999, NarTop10Accuracy=0.6939, over 3840.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6459, over 5999.42 frames. ], batch size: 4, lr: 2.49e-03 +2024-08-06 14:31:00,511 INFO [trainer.py:765] (3/8) Epoch 34, batch 2200, train_loss[loss=3.507, NarTop10Accuracy=0.6273, over 7276.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.645, over 6039.79 frames. ], batch size: 30, lr: 2.49e-03 +2024-08-06 14:31:25,978 INFO [trainer.py:765] (3/8) Epoch 34, batch 2300, train_loss[loss=3.211, NarTop10Accuracy=0.6851, over 5852.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6445, over 6067.32 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 14:31:50,751 INFO [trainer.py:765] (3/8) Epoch 34, batch 2400, train_loss[loss=3.553, NarTop10Accuracy=0.6025, over 5142.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6441, over 5873.55 frames. ], batch size: 7, lr: 2.48e-03 +2024-08-06 14:32:14,249 INFO [trainer.py:765] (3/8) Epoch 34, batch 2500, train_loss[loss=3.069, NarTop10Accuracy=0.7011, over 5030.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6483, over 5552.14 frames. ], batch size: 6, lr: 2.48e-03 +2024-08-06 14:32:35,110 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 14:33:26,336 INFO [trainer.py:765] (3/8) Epoch 35, batch 100, train_loss[loss=3.33, NarTop10Accuracy=0.6618, over 7145.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6604, over 2379.96 frames. ], batch size: 30, lr: 2.44e-03 +2024-08-06 14:34:03,581 INFO [trainer.py:765] (3/8) Epoch 35, batch 200, train_loss[loss=3.306, NarTop10Accuracy=0.6612, over 7006.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.659, over 3875.73 frames. ], batch size: 17, lr: 2.44e-03 +2024-08-06 14:34:13,186 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 14:34:23,574 INFO [trainer.py:811] (3/8) Epoch 35, validation: loss=3.163, NarTop10Accuracy=0.689, over 1907754.00 frames. +2024-08-06 14:34:23,575 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 14:34:24,109 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.042e+02 2.203e+02 2.360e+02 4.181e+02, threshold=4.406e+02, percent-clipped=0.0 +2024-08-06 14:34:44,665 INFO [trainer.py:765] (3/8) Epoch 35, batch 300, train_loss[loss=3.411, NarTop10Accuracy=0.6333, over 7169.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6603, over 4667.40 frames. ], batch size: 22, lr: 2.44e-03 +2024-08-06 14:35:13,543 INFO [trainer.py:765] (3/8) Epoch 35, batch 400, train_loss[loss=3.259, NarTop10Accuracy=0.6712, over 5219.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.6585, over 5127.16 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 14:35:48,188 INFO [trainer.py:765] (3/8) Epoch 35, batch 500, train_loss[loss=3.545, NarTop10Accuracy=0.6188, over 6033.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6576, over 5409.39 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 14:36:22,747 INFO [trainer.py:765] (3/8) Epoch 35, batch 600, train_loss[loss=2.967, NarTop10Accuracy=0.7186, over 5860.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6548, over 5689.18 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 14:36:57,827 INFO [trainer.py:765] (3/8) Epoch 35, batch 700, train_loss[loss=3.273, NarTop10Accuracy=0.6606, over 5163.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6554, over 5743.15 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 14:37:29,769 INFO [trainer.py:765] (3/8) Epoch 35, batch 800, train_loss[loss=3.145, NarTop10Accuracy=0.6891, over 5020.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6548, over 5800.79 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:38:03,304 INFO [trainer.py:765] (3/8) Epoch 35, batch 900, train_loss[loss=3.081, NarTop10Accuracy=0.6952, over 6554.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6521, over 5827.79 frames. ], batch size: 14, lr: 2.43e-03 +2024-08-06 14:38:43,709 INFO [trainer.py:765] (3/8) Epoch 35, batch 1000, train_loss[loss=3.569, NarTop10Accuracy=0.5957, over 6272.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.652, over 5929.61 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 14:39:16,567 INFO [trainer.py:765] (3/8) Epoch 35, batch 1100, train_loss[loss=3.561, NarTop10Accuracy=0.5965, over 6866.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6511, over 5944.22 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 14:39:50,838 INFO [trainer.py:765] (3/8) Epoch 35, batch 1200, train_loss[loss=3.202, NarTop10Accuracy=0.6728, over 7121.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6501, over 5939.10 frames. ], batch size: 30, lr: 2.43e-03 +2024-08-06 14:40:33,953 INFO [trainer.py:765] (3/8) Epoch 35, batch 1300, train_loss[loss=3.141, NarTop10Accuracy=0.6892, over 5131.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6512, over 6003.77 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:41:03,184 INFO [trainer.py:765] (3/8) Epoch 35, batch 1400, train_loss[loss=3.327, NarTop10Accuracy=0.6512, over 6271.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6476, over 6014.27 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 14:41:33,824 INFO [trainer.py:765] (3/8) Epoch 35, batch 1500, train_loss[loss=3.475, NarTop10Accuracy=0.625, over 6227.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6468, over 5950.09 frames. ], batch size: 49, lr: 2.43e-03 +2024-08-06 14:42:01,777 INFO [trainer.py:765] (3/8) Epoch 35, batch 1600, train_loss[loss=3.491, NarTop10Accuracy=0.6276, over 7073.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6463, over 5952.30 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 14:42:28,467 INFO [trainer.py:765] (3/8) Epoch 35, batch 1700, train_loss[loss=3.199, NarTop10Accuracy=0.6731, over 6348.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6476, over 5936.55 frames. ], batch size: 13, lr: 2.42e-03 +2024-08-06 14:42:55,040 INFO [trainer.py:765] (3/8) Epoch 35, batch 1800, train_loss[loss=3.176, NarTop10Accuracy=0.688, over 7172.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6466, over 6002.35 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 14:43:21,647 INFO [trainer.py:765] (3/8) Epoch 35, batch 1900, train_loss[loss=3.429, NarTop10Accuracy=0.6309, over 6302.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6454, over 6041.86 frames. ], batch size: 49, lr: 2.42e-03 +2024-08-06 14:43:47,368 INFO [trainer.py:765] (3/8) Epoch 35, batch 2000, train_loss[loss=3.548, NarTop10Accuracy=0.6056, over 5548.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6456, over 6021.00 frames. ], batch size: 50, lr: 2.42e-03 +2024-08-06 14:44:12,857 INFO [trainer.py:765] (3/8) Epoch 35, batch 2100, train_loss[loss=3.375, NarTop10Accuracy=0.6487, over 4006.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6466, over 6011.45 frames. ], batch size: 4, lr: 2.42e-03 +2024-08-06 14:44:38,388 INFO [trainer.py:765] (3/8) Epoch 35, batch 2200, train_loss[loss=3.571, NarTop10Accuracy=0.6025, over 7297.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6456, over 6063.40 frames. ], batch size: 31, lr: 2.42e-03 +2024-08-06 14:44:47,200 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 14:44:57,441 INFO [trainer.py:811] (3/8) Epoch 35, validation: loss=3.219, NarTop10Accuracy=0.6773, over 1907754.00 frames. +2024-08-06 14:44:57,441 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 14:44:57,973 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.083e+02 2.237e+02 2.412e+02 3.944e+02, threshold=4.474e+02, percent-clipped=0.0 +2024-08-06 14:45:14,100 INFO [trainer.py:765] (3/8) Epoch 35, batch 2300, train_loss[loss=3.173, NarTop10Accuracy=0.6869, over 5777.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6447, over 6097.40 frames. ], batch size: 9, lr: 2.41e-03 +2024-08-06 14:45:38,820 INFO [trainer.py:765] (3/8) Epoch 35, batch 2400, train_loss[loss=2.938, NarTop10Accuracy=0.7251, over 5024.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6429, over 5896.79 frames. ], batch size: 7, lr: 2.41e-03 +2024-08-06 14:46:02,147 INFO [trainer.py:765] (3/8) Epoch 35, batch 2500, train_loss[loss=3.618, NarTop10Accuracy=0.5937, over 5049.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6486, over 5547.99 frames. ], batch size: 6, lr: 2.41e-03 +2024-08-06 14:46:23,194 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 14:47:25,441 INFO [trainer.py:765] (3/8) Epoch 36, batch 100, train_loss[loss=3.333, NarTop10Accuracy=0.6531, over 7450.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6596, over 2377.22 frames. ], batch size: 31, lr: 2.38e-03 +2024-08-06 14:47:58,358 INFO [trainer.py:765] (3/8) Epoch 36, batch 200, train_loss[loss=3.292, NarTop10Accuracy=0.6512, over 6915.00 frames. ], tot_loss[loss=3.284, NarTop10Accuracy=0.6627, over 3867.01 frames. ], batch size: 17, lr: 2.37e-03 +2024-08-06 14:48:30,724 INFO [trainer.py:765] (3/8) Epoch 36, batch 300, train_loss[loss=3.15, NarTop10Accuracy=0.6859, over 7098.00 frames. ], tot_loss[loss=3.284, NarTop10Accuracy=0.6626, over 4686.05 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 14:49:04,814 INFO [trainer.py:765] (3/8) Epoch 36, batch 400, train_loss[loss=3.121, NarTop10Accuracy=0.7002, over 5090.00 frames. ], tot_loss[loss=3.28, NarTop10Accuracy=0.6633, over 5138.24 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 14:49:36,588 INFO [trainer.py:765] (3/8) Epoch 36, batch 500, train_loss[loss=3.595, NarTop10Accuracy=0.6035, over 6186.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.663, over 5408.20 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 14:50:09,654 INFO [trainer.py:765] (3/8) Epoch 36, batch 600, train_loss[loss=3.023, NarTop10Accuracy=0.7204, over 5823.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6592, over 5679.43 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 14:50:46,513 INFO [trainer.py:765] (3/8) Epoch 36, batch 700, train_loss[loss=3.337, NarTop10Accuracy=0.6596, over 5032.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6564, over 5752.58 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:23,702 INFO [trainer.py:765] (3/8) Epoch 36, batch 800, train_loss[loss=3.495, NarTop10Accuracy=0.6185, over 5151.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6547, over 5808.28 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:54,346 INFO [trainer.py:765] (3/8) Epoch 36, batch 900, train_loss[loss=3.195, NarTop10Accuracy=0.6781, over 6361.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.655, over 5817.60 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 14:52:30,324 INFO [trainer.py:765] (3/8) Epoch 36, batch 1000, train_loss[loss=3.382, NarTop10Accuracy=0.6413, over 6176.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.655, over 5909.19 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 14:53:06,863 INFO [trainer.py:765] (3/8) Epoch 36, batch 1100, train_loss[loss=3.22, NarTop10Accuracy=0.676, over 6427.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6507, over 5938.88 frames. ], batch size: 16, lr: 2.36e-03 +2024-08-06 14:53:40,248 INFO [trainer.py:765] (3/8) Epoch 36, batch 1200, train_loss[loss=3.227, NarTop10Accuracy=0.6775, over 7202.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6528, over 5946.84 frames. ], batch size: 30, lr: 2.36e-03 +2024-08-06 14:54:15,855 INFO [trainer.py:765] (3/8) Epoch 36, batch 1300, train_loss[loss=3.074, NarTop10Accuracy=0.7004, over 5188.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6518, over 6025.29 frames. ], batch size: 6, lr: 2.36e-03 +2024-08-06 14:54:51,541 INFO [trainer.py:765] (3/8) Epoch 36, batch 1400, train_loss[loss=3.293, NarTop10Accuracy=0.6626, over 6174.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6524, over 6043.15 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 14:55:21,802 INFO [trainer.py:765] (3/8) Epoch 36, batch 1500, train_loss[loss=3.523, NarTop10Accuracy=0.6125, over 6117.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6493, over 5978.65 frames. ], batch size: 50, lr: 2.36e-03 +2024-08-06 14:55:49,902 INFO [trainer.py:765] (3/8) Epoch 36, batch 1600, train_loss[loss=3.153, NarTop10Accuracy=0.6955, over 7128.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6483, over 5952.47 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 14:56:04,132 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 14:56:14,600 INFO [trainer.py:811] (3/8) Epoch 36, validation: loss=3.22, NarTop10Accuracy=0.6784, over 1907754.00 frames. +2024-08-06 14:56:14,601 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 14:56:15,103 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.063e+02 2.224e+02 2.398e+02 5.290e+02, threshold=4.447e+02, percent-clipped=0.1 +2024-08-06 14:56:27,178 INFO [trainer.py:765] (3/8) Epoch 36, batch 1700, train_loss[loss=2.962, NarTop10Accuracy=0.7179, over 6188.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6495, over 5950.13 frames. ], batch size: 13, lr: 2.35e-03 +2024-08-06 14:56:53,759 INFO [trainer.py:765] (3/8) Epoch 36, batch 1800, train_loss[loss=3.308, NarTop10Accuracy=0.6446, over 7159.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6479, over 6020.12 frames. ], batch size: 22, lr: 2.35e-03 +2024-08-06 14:57:20,336 INFO [trainer.py:765] (3/8) Epoch 36, batch 1900, train_loss[loss=3.527, NarTop10Accuracy=0.6153, over 6448.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6471, over 6065.81 frames. ], batch size: 50, lr: 2.35e-03 +2024-08-06 14:57:46,057 INFO [trainer.py:765] (3/8) Epoch 36, batch 2000, train_loss[loss=3.716, NarTop10Accuracy=0.578, over 5587.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6461, over 6027.36 frames. ], batch size: 48, lr: 2.35e-03 +2024-08-06 14:58:11,405 INFO [trainer.py:765] (3/8) Epoch 36, batch 2100, train_loss[loss=2.972, NarTop10Accuracy=0.716, over 4858.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6469, over 5998.04 frames. ], batch size: 5, lr: 2.35e-03 +2024-08-06 14:58:36,833 INFO [trainer.py:765] (3/8) Epoch 36, batch 2200, train_loss[loss=3.599, NarTop10Accuracy=0.6035, over 7365.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6464, over 6021.54 frames. ], batch size: 31, lr: 2.35e-03 +2024-08-06 14:59:02,344 INFO [trainer.py:765] (3/8) Epoch 36, batch 2300, train_loss[loss=3.091, NarTop10Accuracy=0.6931, over 5730.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6445, over 6064.09 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 14:59:27,094 INFO [trainer.py:765] (3/8) Epoch 36, batch 2400, train_loss[loss=3.35, NarTop10Accuracy=0.6414, over 5164.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.645, over 5886.13 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 14:59:50,503 INFO [trainer.py:765] (3/8) Epoch 36, batch 2500, train_loss[loss=3.37, NarTop10Accuracy=0.6559, over 5110.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6484, over 5566.33 frames. ], batch size: 6, lr: 2.34e-03 +2024-08-06 15:00:11,410 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 15:01:14,218 INFO [trainer.py:765] (3/8) Epoch 37, batch 100, train_loss[loss=3.379, NarTop10Accuracy=0.6493, over 7226.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6642, over 2366.32 frames. ], batch size: 30, lr: 2.31e-03 +2024-08-06 15:01:44,097 INFO [trainer.py:765] (3/8) Epoch 37, batch 200, train_loss[loss=3.018, NarTop10Accuracy=0.7102, over 6706.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.6636, over 3858.39 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 15:02:17,383 INFO [trainer.py:765] (3/8) Epoch 37, batch 300, train_loss[loss=3.155, NarTop10Accuracy=0.6986, over 7019.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.6615, over 4692.23 frames. ], batch size: 22, lr: 2.31e-03 +2024-08-06 15:02:48,346 INFO [trainer.py:765] (3/8) Epoch 37, batch 400, train_loss[loss=3.381, NarTop10Accuracy=0.6427, over 5264.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6599, over 5152.43 frames. ], batch size: 7, lr: 2.31e-03 +2024-08-06 15:03:26,570 INFO [trainer.py:765] (3/8) Epoch 37, batch 500, train_loss[loss=3.166, NarTop10Accuracy=0.6792, over 5992.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6591, over 5410.08 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 15:03:58,032 INFO [trainer.py:765] (3/8) Epoch 37, batch 600, train_loss[loss=3.092, NarTop10Accuracy=0.7017, over 5713.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6586, over 5680.68 frames. ], batch size: 9, lr: 2.30e-03 +2024-08-06 15:04:30,247 INFO [trainer.py:765] (3/8) Epoch 37, batch 700, train_loss[loss=3.074, NarTop10Accuracy=0.6926, over 5107.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.657, over 5761.02 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:12,163 INFO [trainer.py:765] (3/8) Epoch 37, batch 800, train_loss[loss=3.181, NarTop10Accuracy=0.669, over 5027.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6549, over 5786.08 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:40,606 INFO [trainer.py:765] (3/8) Epoch 37, batch 900, train_loss[loss=3.122, NarTop10Accuracy=0.6875, over 6202.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6551, over 5814.83 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 15:06:15,608 INFO [trainer.py:765] (3/8) Epoch 37, batch 1000, train_loss[loss=3.131, NarTop10Accuracy=0.6815, over 6682.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.653, over 5916.04 frames. ], batch size: 14, lr: 2.30e-03 +2024-08-06 15:06:42,491 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 15:06:53,168 INFO [trainer.py:811] (3/8) Epoch 37, validation: loss=3.234, NarTop10Accuracy=0.6744, over 1907754.00 frames. +2024-08-06 15:06:53,169 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 15:06:53,809 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.068e+02 2.238e+02 2.409e+02 6.392e+02, threshold=4.475e+02, percent-clipped=0.1 +2024-08-06 15:07:01,306 INFO [trainer.py:765] (3/8) Epoch 37, batch 1100, train_loss[loss=3.383, NarTop10Accuracy=0.644, over 6443.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.652, over 5948.71 frames. ], batch size: 16, lr: 2.30e-03 +2024-08-06 15:07:32,718 INFO [trainer.py:765] (3/8) Epoch 37, batch 1200, train_loss[loss=3.25, NarTop10Accuracy=0.676, over 7150.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.652, over 5947.70 frames. ], batch size: 30, lr: 2.30e-03 +2024-08-06 15:08:04,777 INFO [trainer.py:765] (3/8) Epoch 37, batch 1300, train_loss[loss=3.351, NarTop10Accuracy=0.6494, over 4995.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6511, over 6019.22 frames. ], batch size: 6, lr: 2.29e-03 +2024-08-06 15:08:47,879 INFO [trainer.py:765] (3/8) Epoch 37, batch 1400, train_loss[loss=3.112, NarTop10Accuracy=0.6957, over 6189.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.649, over 6056.71 frames. ], batch size: 11, lr: 2.29e-03 +2024-08-06 15:09:16,180 INFO [trainer.py:765] (3/8) Epoch 37, batch 1500, train_loss[loss=3.637, NarTop10Accuracy=0.5921, over 6251.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.648, over 6000.23 frames. ], batch size: 48, lr: 2.29e-03 +2024-08-06 15:09:44,190 INFO [trainer.py:765] (3/8) Epoch 37, batch 1600, train_loss[loss=3.426, NarTop10Accuracy=0.6265, over 7251.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6487, over 5981.06 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:10:11,082 INFO [trainer.py:765] (3/8) Epoch 37, batch 1700, train_loss[loss=3.138, NarTop10Accuracy=0.6837, over 6242.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6492, over 5947.09 frames. ], batch size: 13, lr: 2.29e-03 +2024-08-06 15:10:37,752 INFO [trainer.py:765] (3/8) Epoch 37, batch 1800, train_loss[loss=3.426, NarTop10Accuracy=0.6418, over 7088.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6499, over 6014.43 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:11:04,270 INFO [trainer.py:765] (3/8) Epoch 37, batch 1900, train_loss[loss=3.503, NarTop10Accuracy=0.622, over 6097.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6465, over 6041.93 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:11:29,940 INFO [trainer.py:765] (3/8) Epoch 37, batch 2000, train_loss[loss=3.641, NarTop10Accuracy=0.5871, over 5625.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6461, over 6012.54 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:11:58,797 INFO [trainer.py:765] (3/8) Epoch 37, batch 2100, train_loss[loss=2.766, NarTop10Accuracy=0.7493, over 3924.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6485, over 5993.26 frames. ], batch size: 4, lr: 2.29e-03 +2024-08-06 15:12:24,311 INFO [trainer.py:765] (3/8) Epoch 37, batch 2200, train_loss[loss=3.519, NarTop10Accuracy=0.6156, over 7047.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6488, over 6055.09 frames. ], batch size: 30, lr: 2.28e-03 +2024-08-06 15:12:49,787 INFO [trainer.py:765] (3/8) Epoch 37, batch 2300, train_loss[loss=3.35, NarTop10Accuracy=0.6491, over 5619.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6475, over 6074.05 frames. ], batch size: 9, lr: 2.28e-03 +2024-08-06 15:13:14,526 INFO [trainer.py:765] (3/8) Epoch 37, batch 2400, train_loss[loss=3.135, NarTop10Accuracy=0.6944, over 5096.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6473, over 5886.95 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 15:13:37,942 INFO [trainer.py:765] (3/8) Epoch 37, batch 2500, train_loss[loss=3.361, NarTop10Accuracy=0.6379, over 5089.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6507, over 5556.23 frames. ], batch size: 6, lr: 2.28e-03 +2024-08-06 15:13:59,211 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 15:14:50,845 INFO [trainer.py:765] (3/8) Epoch 38, batch 100, train_loss[loss=3.555, NarTop10Accuracy=0.6066, over 7197.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6522, over 2374.82 frames. ], batch size: 31, lr: 2.25e-03 +2024-08-06 15:15:27,288 INFO [trainer.py:765] (3/8) Epoch 38, batch 200, train_loss[loss=3.238, NarTop10Accuracy=0.6717, over 6678.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6598, over 3873.57 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 15:16:01,279 INFO [trainer.py:765] (3/8) Epoch 38, batch 300, train_loss[loss=3.349, NarTop10Accuracy=0.6442, over 7093.00 frames. ], tot_loss[loss=3.272, NarTop10Accuracy=0.6643, over 4689.24 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 15:16:32,594 INFO [trainer.py:765] (3/8) Epoch 38, batch 400, train_loss[loss=2.981, NarTop10Accuracy=0.7107, over 5187.00 frames. ], tot_loss[loss=3.271, NarTop10Accuracy=0.6645, over 5131.77 frames. ], batch size: 7, lr: 2.24e-03 +2024-08-06 15:17:04,256 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 15:17:14,104 INFO [trainer.py:811] (3/8) Epoch 38, validation: loss=3.229, NarTop10Accuracy=0.6755, over 1907754.00 frames. +2024-08-06 15:17:14,105 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 15:17:14,630 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.062e+02 2.214e+02 2.396e+02 3.845e+02, threshold=4.429e+02, percent-clipped=0.0 +2024-08-06 15:17:16,479 INFO [trainer.py:765] (3/8) Epoch 38, batch 500, train_loss[loss=3.341, NarTop10Accuracy=0.6532, over 6206.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6629, over 5398.11 frames. ], batch size: 11, lr: 2.24e-03 +2024-08-06 15:17:53,875 INFO [trainer.py:765] (3/8) Epoch 38, batch 600, train_loss[loss=3.152, NarTop10Accuracy=0.6977, over 5779.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6617, over 5660.06 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 15:18:26,466 INFO [trainer.py:765] (3/8) Epoch 38, batch 700, train_loss[loss=3.057, NarTop10Accuracy=0.6858, over 4991.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6588, over 5728.84 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:01,129 INFO [trainer.py:765] (3/8) Epoch 38, batch 800, train_loss[loss=3.075, NarTop10Accuracy=0.7046, over 5179.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6566, over 5797.72 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:36,539 INFO [trainer.py:765] (3/8) Epoch 38, batch 900, train_loss[loss=3.334, NarTop10Accuracy=0.6455, over 6654.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6551, over 5835.46 frames. ], batch size: 14, lr: 2.24e-03 +2024-08-06 15:20:09,134 INFO [trainer.py:765] (3/8) Epoch 38, batch 1000, train_loss[loss=3.389, NarTop10Accuracy=0.6501, over 6303.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6548, over 5933.02 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 15:20:47,346 INFO [trainer.py:765] (3/8) Epoch 38, batch 1100, train_loss[loss=3.415, NarTop10Accuracy=0.6417, over 6932.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6526, over 5970.41 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 15:21:25,594 INFO [trainer.py:765] (3/8) Epoch 38, batch 1200, train_loss[loss=3.434, NarTop10Accuracy=0.6386, over 7386.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.651, over 5953.54 frames. ], batch size: 31, lr: 2.23e-03 +2024-08-06 15:21:57,556 INFO [trainer.py:765] (3/8) Epoch 38, batch 1300, train_loss[loss=3.054, NarTop10Accuracy=0.7019, over 4893.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6531, over 6018.05 frames. ], batch size: 6, lr: 2.23e-03 +2024-08-06 15:22:29,468 INFO [trainer.py:765] (3/8) Epoch 38, batch 1400, train_loss[loss=3.24, NarTop10Accuracy=0.6676, over 6214.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6509, over 6038.92 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 15:23:06,615 INFO [trainer.py:765] (3/8) Epoch 38, batch 1500, train_loss[loss=3.464, NarTop10Accuracy=0.622, over 5995.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6484, over 5962.31 frames. ], batch size: 48, lr: 2.23e-03 +2024-08-06 15:23:34,640 INFO [trainer.py:765] (3/8) Epoch 38, batch 1600, train_loss[loss=3.48, NarTop10Accuracy=0.6311, over 7043.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6476, over 5936.53 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:01,433 INFO [trainer.py:765] (3/8) Epoch 38, batch 1700, train_loss[loss=3.304, NarTop10Accuracy=0.6605, over 6179.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6475, over 5911.58 frames. ], batch size: 13, lr: 2.23e-03 +2024-08-06 15:24:28,064 INFO [trainer.py:765] (3/8) Epoch 38, batch 1800, train_loss[loss=3.385, NarTop10Accuracy=0.6415, over 7093.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6491, over 5992.23 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:54,673 INFO [trainer.py:765] (3/8) Epoch 38, batch 1900, train_loss[loss=3.297, NarTop10Accuracy=0.6641, over 6163.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6481, over 6027.14 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 15:25:20,410 INFO [trainer.py:765] (3/8) Epoch 38, batch 2000, train_loss[loss=3.49, NarTop10Accuracy=0.6282, over 5843.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6476, over 5999.68 frames. ], batch size: 51, lr: 2.23e-03 +2024-08-06 15:25:45,856 INFO [trainer.py:765] (3/8) Epoch 38, batch 2100, train_loss[loss=3.268, NarTop10Accuracy=0.6679, over 4883.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6486, over 5979.17 frames. ], batch size: 5, lr: 2.22e-03 +2024-08-06 15:26:11,316 INFO [trainer.py:765] (3/8) Epoch 38, batch 2200, train_loss[loss=3.634, NarTop10Accuracy=0.5837, over 6940.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6484, over 6032.89 frames. ], batch size: 30, lr: 2.22e-03 +2024-08-06 15:26:36,708 INFO [trainer.py:765] (3/8) Epoch 38, batch 2300, train_loss[loss=3.418, NarTop10Accuracy=0.6531, over 5746.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6463, over 6065.49 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 15:27:01,480 INFO [trainer.py:765] (3/8) Epoch 38, batch 2400, train_loss[loss=3.226, NarTop10Accuracy=0.6704, over 5836.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6465, over 5871.75 frames. ], batch size: 8, lr: 2.22e-03 +2024-08-06 15:27:23,144 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 15:27:33,589 INFO [trainer.py:811] (3/8) Epoch 38, validation: loss=3.213, NarTop10Accuracy=0.6782, over 1907754.00 frames. +2024-08-06 15:27:33,590 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 15:27:34,075 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.098e+02 2.247e+02 2.437e+02 3.550e+02, threshold=4.494e+02, percent-clipped=0.0 +2024-08-06 15:27:35,514 INFO [trainer.py:765] (3/8) Epoch 38, batch 2500, train_loss[loss=3.411, NarTop10Accuracy=0.6344, over 5075.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6528, over 5533.38 frames. ], batch size: 6, lr: 2.22e-03 +2024-08-06 15:27:56,651 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 15:28:51,227 INFO [trainer.py:765] (3/8) Epoch 39, batch 100, train_loss[loss=3.174, NarTop10Accuracy=0.6756, over 7324.00 frames. ], tot_loss[loss=3.271, NarTop10Accuracy=0.6644, over 2390.61 frames. ], batch size: 30, lr: 2.19e-03 +2024-08-06 15:29:28,052 INFO [trainer.py:765] (3/8) Epoch 39, batch 200, train_loss[loss=3.505, NarTop10Accuracy=0.6181, over 6788.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6642, over 3882.76 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 15:30:02,018 INFO [trainer.py:765] (3/8) Epoch 39, batch 300, train_loss[loss=3.228, NarTop10Accuracy=0.6794, over 7214.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6621, over 4694.37 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 15:30:32,992 INFO [trainer.py:765] (3/8) Epoch 39, batch 400, train_loss[loss=3.098, NarTop10Accuracy=0.7107, over 5238.00 frames. ], tot_loss[loss=3.296, NarTop10Accuracy=0.6598, over 5112.96 frames. ], batch size: 7, lr: 2.19e-03 +2024-08-06 15:31:03,569 INFO [trainer.py:765] (3/8) Epoch 39, batch 500, train_loss[loss=2.935, NarTop10Accuracy=0.7388, over 6104.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.6601, over 5399.08 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 15:31:40,850 INFO [trainer.py:765] (3/8) Epoch 39, batch 600, train_loss[loss=3.358, NarTop10Accuracy=0.648, over 5754.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6598, over 5670.64 frames. ], batch size: 9, lr: 2.18e-03 +2024-08-06 15:32:14,451 INFO [trainer.py:765] (3/8) Epoch 39, batch 700, train_loss[loss=2.933, NarTop10Accuracy=0.7313, over 5061.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6592, over 5723.71 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:32:44,166 INFO [trainer.py:765] (3/8) Epoch 39, batch 800, train_loss[loss=3.137, NarTop10Accuracy=0.6871, over 5044.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6594, over 5802.91 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:33:21,117 INFO [trainer.py:765] (3/8) Epoch 39, batch 900, train_loss[loss=3.133, NarTop10Accuracy=0.6908, over 6397.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6576, over 5827.77 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 15:34:02,656 INFO [trainer.py:765] (3/8) Epoch 39, batch 1000, train_loss[loss=3.05, NarTop10Accuracy=0.7112, over 6274.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6566, over 5926.23 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 15:34:33,094 INFO [trainer.py:765] (3/8) Epoch 39, batch 1100, train_loss[loss=3.029, NarTop10Accuracy=0.7107, over 6691.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.655, over 5961.42 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 15:35:09,244 INFO [trainer.py:765] (3/8) Epoch 39, batch 1200, train_loss[loss=3.22, NarTop10Accuracy=0.6819, over 7242.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6539, over 5959.40 frames. ], batch size: 30, lr: 2.18e-03 +2024-08-06 15:35:46,813 INFO [trainer.py:765] (3/8) Epoch 39, batch 1300, train_loss[loss=3.451, NarTop10Accuracy=0.6315, over 5003.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6544, over 6025.66 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:36:18,850 INFO [trainer.py:765] (3/8) Epoch 39, batch 1400, train_loss[loss=3.292, NarTop10Accuracy=0.6659, over 6216.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.655, over 6018.91 frames. ], batch size: 11, lr: 2.17e-03 +2024-08-06 15:36:47,214 INFO [trainer.py:765] (3/8) Epoch 39, batch 1500, train_loss[loss=3.319, NarTop10Accuracy=0.6539, over 5757.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6517, over 5971.90 frames. ], batch size: 49, lr: 2.17e-03 +2024-08-06 15:37:15,216 INFO [trainer.py:765] (3/8) Epoch 39, batch 1600, train_loss[loss=3.249, NarTop10Accuracy=0.6687, over 7398.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6505, over 5953.06 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:37:41,883 INFO [trainer.py:765] (3/8) Epoch 39, batch 1700, train_loss[loss=2.997, NarTop10Accuracy=0.7293, over 6156.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6508, over 5944.32 frames. ], batch size: 13, lr: 2.17e-03 +2024-08-06 15:38:08,509 INFO [trainer.py:765] (3/8) Epoch 39, batch 1800, train_loss[loss=3.303, NarTop10Accuracy=0.6539, over 7341.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6529, over 6008.92 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:38:35,253 INFO [trainer.py:765] (3/8) Epoch 39, batch 1900, train_loss[loss=3.399, NarTop10Accuracy=0.6386, over 6629.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.649, over 6049.09 frames. ], batch size: 49, lr: 2.17e-03 +2024-08-06 15:38:37,991 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 15:38:48,262 INFO [trainer.py:811] (3/8) Epoch 39, validation: loss=3.177, NarTop10Accuracy=0.6866, over 1907754.00 frames. +2024-08-06 15:38:48,263 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 15:38:48,768 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.106e+02 2.266e+02 2.462e+02 4.274e+02, threshold=4.532e+02, percent-clipped=0.0 +2024-08-06 15:39:11,227 INFO [trainer.py:765] (3/8) Epoch 39, batch 2000, train_loss[loss=3.413, NarTop10Accuracy=0.6388, over 6166.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6507, over 6026.47 frames. ], batch size: 49, lr: 2.17e-03 +2024-08-06 15:39:36,692 INFO [trainer.py:765] (3/8) Epoch 39, batch 2100, train_loss[loss=3.747, NarTop10Accuracy=0.5658, over 4787.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6495, over 5996.90 frames. ], batch size: 5, lr: 2.17e-03 +2024-08-06 15:40:02,086 INFO [trainer.py:765] (3/8) Epoch 39, batch 2200, train_loss[loss=3.427, NarTop10Accuracy=0.6357, over 7388.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6502, over 6042.14 frames. ], batch size: 31, lr: 2.17e-03 +2024-08-06 15:40:27,497 INFO [trainer.py:765] (3/8) Epoch 39, batch 2300, train_loss[loss=3.3, NarTop10Accuracy=0.6515, over 5718.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6481, over 6059.80 frames. ], batch size: 9, lr: 2.16e-03 +2024-08-06 15:40:52,331 INFO [trainer.py:765] (3/8) Epoch 39, batch 2400, train_loss[loss=3.298, NarTop10Accuracy=0.6448, over 5086.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6452, over 5876.30 frames. ], batch size: 7, lr: 2.16e-03 +2024-08-06 15:41:15,695 INFO [trainer.py:765] (3/8) Epoch 39, batch 2500, train_loss[loss=3.667, NarTop10Accuracy=0.5972, over 5141.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6522, over 5543.05 frames. ], batch size: 6, lr: 2.16e-03 +2024-08-06 15:41:36,850 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 15:42:35,254 INFO [trainer.py:765] (3/8) Epoch 40, batch 100, train_loss[loss=3.655, NarTop10Accuracy=0.5918, over 7153.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6542, over 2373.67 frames. ], batch size: 30, lr: 2.13e-03 +2024-08-06 15:43:09,645 INFO [trainer.py:765] (3/8) Epoch 40, batch 200, train_loss[loss=3.46, NarTop10Accuracy=0.6239, over 6923.00 frames. ], tot_loss[loss=3.289, NarTop10Accuracy=0.6614, over 3863.57 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 15:43:43,738 INFO [trainer.py:765] (3/8) Epoch 40, batch 300, train_loss[loss=3.338, NarTop10Accuracy=0.6464, over 7212.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6621, over 4662.72 frames. ], batch size: 22, lr: 2.13e-03 +2024-08-06 15:44:18,202 INFO [trainer.py:765] (3/8) Epoch 40, batch 400, train_loss[loss=3.082, NarTop10Accuracy=0.7023, over 5094.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6634, over 5127.30 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 15:44:50,258 INFO [trainer.py:765] (3/8) Epoch 40, batch 500, train_loss[loss=3.027, NarTop10Accuracy=0.7025, over 6146.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6638, over 5410.45 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 15:45:25,431 INFO [trainer.py:765] (3/8) Epoch 40, batch 600, train_loss[loss=3.414, NarTop10Accuracy=0.627, over 5761.00 frames. ], tot_loss[loss=3.289, NarTop10Accuracy=0.6611, over 5687.73 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 15:45:58,647 INFO [trainer.py:765] (3/8) Epoch 40, batch 700, train_loss[loss=3.374, NarTop10Accuracy=0.6321, over 5101.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6595, over 5749.29 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 15:46:34,887 INFO [trainer.py:765] (3/8) Epoch 40, batch 800, train_loss[loss=3.32, NarTop10Accuracy=0.661, over 4216.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6572, over 5794.16 frames. ], batch size: 5, lr: 2.13e-03 +2024-08-06 15:47:07,290 INFO [trainer.py:765] (3/8) Epoch 40, batch 900, train_loss[loss=3.101, NarTop10Accuracy=0.7017, over 6309.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6567, over 5810.97 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:47:43,510 INFO [trainer.py:765] (3/8) Epoch 40, batch 1000, train_loss[loss=3.646, NarTop10Accuracy=0.5909, over 6242.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6548, over 5915.59 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:48:18,710 INFO [trainer.py:765] (3/8) Epoch 40, batch 1100, train_loss[loss=3.366, NarTop10Accuracy=0.6415, over 7059.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6549, over 5966.63 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 15:48:52,094 INFO [trainer.py:765] (3/8) Epoch 40, batch 1200, train_loss[loss=3.416, NarTop10Accuracy=0.6357, over 7153.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6551, over 5957.65 frames. ], batch size: 30, lr: 2.12e-03 +2024-08-06 15:49:29,783 INFO [trainer.py:765] (3/8) Epoch 40, batch 1300, train_loss[loss=3.241, NarTop10Accuracy=0.6589, over 5111.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6549, over 6013.07 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 15:49:38,246 INFO [trainer.py:803] (3/8) Computing validation loss +2024-08-06 15:49:48,934 INFO [trainer.py:811] (3/8) Epoch 40, validation: loss=3.171, NarTop10Accuracy=0.6871, over 1907754.00 frames. +2024-08-06 15:49:48,935 INFO [trainer.py:814] (3/8) Maximum memory allocated so far is 30594MB +2024-08-06 15:49:49,615 INFO [optim.py:386] (3/8) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.095e+02 2.264e+02 2.441e+02 4.960e+02, threshold=4.528e+02, percent-clipped=0.1 +2024-08-06 15:50:12,460 INFO [trainer.py:765] (3/8) Epoch 40, batch 1400, train_loss[loss=3.311, NarTop10Accuracy=0.6662, over 6145.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6524, over 6028.53 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 15:50:45,930 INFO [trainer.py:765] (3/8) Epoch 40, batch 1500, train_loss[loss=3.479, NarTop10Accuracy=0.6126, over 6285.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6528, over 5977.43 frames. ], batch size: 49, lr: 2.12e-03 +2024-08-06 15:51:13,820 INFO [trainer.py:765] (3/8) Epoch 40, batch 1600, train_loss[loss=3.145, NarTop10Accuracy=0.6877, over 6911.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6549, over 5946.38 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:51:40,571 INFO [trainer.py:765] (3/8) Epoch 40, batch 1700, train_loss[loss=3.246, NarTop10Accuracy=0.6631, over 6666.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6553, over 5937.31 frames. ], batch size: 14, lr: 2.12e-03 +2024-08-06 15:52:07,236 INFO [trainer.py:765] (3/8) Epoch 40, batch 1800, train_loss[loss=3.495, NarTop10Accuracy=0.625, over 7269.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6533, over 6014.19 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:52:33,820 INFO [trainer.py:765] (3/8) Epoch 40, batch 1900, train_loss[loss=3.492, NarTop10Accuracy=0.6221, over 6446.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6518, over 6061.16 frames. ], batch size: 49, lr: 2.11e-03 +2024-08-06 15:52:59,511 INFO [trainer.py:765] (3/8) Epoch 40, batch 2000, train_loss[loss=3.354, NarTop10Accuracy=0.6497, over 6499.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6509, over 6028.55 frames. ], batch size: 49, lr: 2.11e-03 +2024-08-06 15:53:24,913 INFO [trainer.py:765] (3/8) Epoch 40, batch 2100, train_loss[loss=3.331, NarTop10Accuracy=0.6517, over 3938.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6518, over 5996.13 frames. ], batch size: 4, lr: 2.11e-03 +2024-08-06 15:53:50,419 INFO [trainer.py:765] (3/8) Epoch 40, batch 2200, train_loss[loss=3.36, NarTop10Accuracy=0.6451, over 7331.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6515, over 6031.54 frames. ], batch size: 30, lr: 2.11e-03 +2024-08-06 15:54:15,886 INFO [trainer.py:765] (3/8) Epoch 40, batch 2300, train_loss[loss=3.448, NarTop10Accuracy=0.6239, over 5611.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6515, over 6060.62 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 15:54:43,787 INFO [trainer.py:765] (3/8) Epoch 40, batch 2400, train_loss[loss=3.575, NarTop10Accuracy=0.5983, over 5130.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6504, over 5856.46 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 15:55:07,364 INFO [trainer.py:765] (3/8) Epoch 40, batch 2500, train_loss[loss=3.178, NarTop10Accuracy=0.679, over 5021.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6556, over 5518.31 frames. ], batch size: 6, lr: 2.11e-03 +2024-08-06 15:55:28,788 INFO [trainer.py:650] (3/8) Reaches end of dataloader. +2024-08-06 15:55:28,790 INFO [trainer.py:1069] (3/8) Done! diff --git a/libritts/log/log-train-2024-08-06-06-41-41-4 b/libritts/log/log-train-2024-08-06-06-41-41-4 new file mode 100644 index 0000000000000000000000000000000000000000..6dc70f5d818cb0681d04140026c45ffe3cd66205 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-41-41-4 @@ -0,0 +1,1260 @@ +2024-08-06 06:41:41,464 INFO [trainer.py:870] (4/8) Training started +2024-08-06 06:41:41,465 INFO [trainer.py:889] (4/8) Device: cuda:4 +2024-08-06 06:41:41,465 INFO [trainer.py:890] (4/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:41:41,466 INFO [trainer.py:892] (4/8) About to create model +2024-08-06 06:41:42,221 INFO [trainer.py:899] (4/8) Number of model parameters: 367386628 +2024-08-06 06:41:42,222 INFO [checkpoint.py:112] (4/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 06:41:44,339 INFO [trainer.py:914] (4/8) Using DDP +2024-08-06 06:41:46,902 INFO [datamodule.py:427] (4/8) About to get train cuts +2024-08-06 06:41:46,904 INFO [datamodule.py:434] (4/8) About to get dev cuts +2024-08-06 06:41:46,905 INFO [datamodule.py:292] (4/8) Disable SpecAugment +2024-08-06 06:41:46,905 INFO [datamodule.py:294] (4/8) About to create train dataset +2024-08-06 06:41:46,906 INFO [datamodule.py:323] (4/8) Using DynamicBucketingSampler +2024-08-06 06:41:47,520 INFO [datamodule.py:344] (4/8) About to create train dataloader +2024-08-06 06:41:47,520 INFO [datamodule.py:367] (4/8) About to create dev dataset +2024-08-06 06:41:47,852 INFO [datamodule.py:388] (4/8) About to create dev dataloader +2024-08-06 06:42:36,136 INFO [trainer.py:765] (4/8) Epoch 1, batch 100, train_loss[loss=95.66, NarTop10Accuracy=0.01274, over 7128.00 frames. ], tot_loss[loss=80.57, NarTop10Accuracy=0.05303, over 2372.61 frames. ], batch size: 30, lr: 2.25e-02 +2024-08-06 06:43:05,819 INFO [trainer.py:765] (4/8) Epoch 1, batch 200, train_loss[loss=119.5, NarTop10Accuracy=0.02287, over 6819.00 frames. ], tot_loss[loss=99.03, NarTop10Accuracy=0.04602, over 3877.45 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 06:43:33,849 INFO [trainer.py:765] (4/8) Epoch 1, batch 300, train_loss[loss=72.51, NarTop10Accuracy=0.02378, over 7384.00 frames. ], tot_loss[loss=86.76, NarTop10Accuracy=0.04662, over 4686.58 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 06:44:05,252 INFO [trainer.py:765] (4/8) Epoch 1, batch 400, train_loss[loss=33.56, NarTop10Accuracy=0.06908, over 5252.00 frames. ], tot_loss[loss=67.76, NarTop10Accuracy=0.05074, over 5133.42 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 06:44:33,445 INFO [trainer.py:765] (4/8) Epoch 1, batch 500, train_loss[loss=16.72, NarTop10Accuracy=0.02486, over 6149.00 frames. ], tot_loss[loss=48.48, NarTop10Accuracy=0.05642, over 5427.01 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 06:45:02,925 INFO [trainer.py:765] (4/8) Epoch 1, batch 600, train_loss[loss=6, NarTop10Accuracy=0.1917, over 5774.00 frames. ], tot_loss[loss=33.27, NarTop10Accuracy=0.06358, over 5683.67 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 06:45:40,482 INFO [trainer.py:765] (4/8) Epoch 1, batch 700, train_loss[loss=7.188, NarTop10Accuracy=0.08092, over 5045.00 frames. ], tot_loss[loss=23.52, NarTop10Accuracy=0.07065, over 5739.87 frames. ], batch size: 6, lr: 2.99e-02 +2024-08-06 06:46:09,664 INFO [trainer.py:765] (4/8) Epoch 1, batch 800, train_loss[loss=6.584, NarTop10Accuracy=0.0977, over 5086.00 frames. ], tot_loss[loss=17.5, NarTop10Accuracy=0.08179, over 5799.71 frames. ], batch size: 6, lr: 2.98e-02 +2024-08-06 06:46:37,734 INFO [trainer.py:765] (4/8) Epoch 1, batch 900, train_loss[loss=5.926, NarTop10Accuracy=0.1968, over 6558.00 frames. ], tot_loss[loss=13.01, NarTop10Accuracy=0.1119, over 5819.00 frames. ], batch size: 14, lr: 2.98e-02 +2024-08-06 06:47:13,908 INFO [trainer.py:765] (4/8) Epoch 1, batch 1000, train_loss[loss=6.027, NarTop10Accuracy=0.1532, over 6156.00 frames. ], tot_loss[loss=10.15, NarTop10Accuracy=0.1376, over 5933.48 frames. ], batch size: 13, lr: 2.97e-02 +2024-08-06 06:47:47,141 INFO [trainer.py:765] (4/8) Epoch 1, batch 1100, train_loss[loss=5.562, NarTop10Accuracy=0.1876, over 6748.00 frames. ], tot_loss[loss=8.407, NarTop10Accuracy=0.1577, over 5958.30 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 06:48:15,709 INFO [trainer.py:765] (4/8) Epoch 1, batch 1200, train_loss[loss=6.203, NarTop10Accuracy=0.1468, over 7076.00 frames. ], tot_loss[loss=7.305, NarTop10Accuracy=0.1741, over 5951.90 frames. ], batch size: 31, lr: 2.96e-02 +2024-08-06 06:48:47,235 INFO [trainer.py:765] (4/8) Epoch 1, batch 1300, train_loss[loss=5.655, NarTop10Accuracy=0.195, over 5011.00 frames. ], tot_loss[loss=6.612, NarTop10Accuracy=0.1855, over 6016.03 frames. ], batch size: 6, lr: 2.95e-02 +2024-08-06 06:49:23,567 INFO [trainer.py:765] (4/8) Epoch 1, batch 1400, train_loss[loss=5.503, NarTop10Accuracy=0.1999, over 6101.00 frames. ], tot_loss[loss=6.189, NarTop10Accuracy=0.1926, over 6030.39 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 06:49:51,507 INFO [trainer.py:765] (4/8) Epoch 1, batch 1500, train_loss[loss=5.814, NarTop10Accuracy=0.1533, over 5862.00 frames. ], tot_loss[loss=5.924, NarTop10Accuracy=0.1985, over 5986.99 frames. ], batch size: 49, lr: 2.94e-02 +2024-08-06 06:50:19,162 INFO [trainer.py:765] (4/8) Epoch 1, batch 1600, train_loss[loss=5.533, NarTop10Accuracy=0.2098, over 7236.00 frames. ], tot_loss[loss=5.76, NarTop10Accuracy=0.2029, over 5971.43 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 06:50:45,596 INFO [trainer.py:765] (4/8) Epoch 1, batch 1700, train_loss[loss=5.468, NarTop10Accuracy=0.2176, over 6624.00 frames. ], tot_loss[loss=5.643, NarTop10Accuracy=0.208, over 5948.28 frames. ], batch size: 14, lr: 2.92e-02 +2024-08-06 06:51:11,955 INFO [trainer.py:765] (4/8) Epoch 1, batch 1800, train_loss[loss=5.555, NarTop10Accuracy=0.1868, over 7027.00 frames. ], tot_loss[loss=5.554, NarTop10Accuracy=0.215, over 6013.77 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 06:51:38,223 INFO [trainer.py:765] (4/8) Epoch 1, batch 1900, train_loss[loss=5.389, NarTop10Accuracy=0.2221, over 5750.00 frames. ], tot_loss[loss=5.497, NarTop10Accuracy=0.2205, over 6042.89 frames. ], batch size: 49, lr: 2.90e-02 +2024-08-06 06:52:03,653 INFO [trainer.py:765] (4/8) Epoch 1, batch 2000, train_loss[loss=5.373, NarTop10Accuracy=0.2426, over 5947.00 frames. ], tot_loss[loss=5.449, NarTop10Accuracy=0.2268, over 6013.92 frames. ], batch size: 49, lr: 2.89e-02 +2024-08-06 06:52:03,654 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 06:52:13,994 INFO [trainer.py:811] (4/8) Epoch 1, validation: loss=5.351, NarTop10Accuracy=0.2423, over 1907754.00 frames. +2024-08-06 06:52:13,994 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27216MB +2024-08-06 06:52:14,534 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 4.341e+01 2.262e+02 7.241e+02 2.074e+04 7.259e+05, threshold=1.448e+03, percent-clipped=0.0 +2024-08-06 06:52:39,586 INFO [trainer.py:765] (4/8) Epoch 1, batch 2100, train_loss[loss=5.419, NarTop10Accuracy=0.2155, over 3930.00 frames. ], tot_loss[loss=5.394, NarTop10Accuracy=0.2358, over 6001.11 frames. ], batch size: 4, lr: 2.88e-02 +2024-08-06 06:53:05,354 INFO [trainer.py:765] (4/8) Epoch 1, batch 2200, train_loss[loss=5.287, NarTop10Accuracy=0.2443, over 7063.00 frames. ], tot_loss[loss=5.36, NarTop10Accuracy=0.2404, over 6037.58 frames. ], batch size: 30, lr: 2.87e-02 +2024-08-06 06:53:30,702 INFO [trainer.py:765] (4/8) Epoch 1, batch 2300, train_loss[loss=5.243, NarTop10Accuracy=0.2662, over 5756.00 frames. ], tot_loss[loss=5.333, NarTop10Accuracy=0.2452, over 6054.84 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 06:53:55,359 INFO [trainer.py:765] (4/8) Epoch 1, batch 2400, train_loss[loss=5.275, NarTop10Accuracy=0.2553, over 5281.00 frames. ], tot_loss[loss=5.302, NarTop10Accuracy=0.2517, over 5862.78 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 06:54:18,659 INFO [trainer.py:765] (4/8) Epoch 1, batch 2500, train_loss[loss=5.114, NarTop10Accuracy=0.2912, over 4295.00 frames. ], tot_loss[loss=5.265, NarTop10Accuracy=0.2583, over 5525.41 frames. ], batch size: 5, lr: 2.84e-02 +2024-08-06 06:54:40,471 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 06:55:37,937 INFO [trainer.py:765] (4/8) Epoch 2, batch 100, train_loss[loss=5.251, NarTop10Accuracy=0.2713, over 7150.00 frames. ], tot_loss[loss=5.185, NarTop10Accuracy=0.2792, over 2374.58 frames. ], batch size: 30, lr: 2.77e-02 +2024-08-06 06:56:16,405 INFO [trainer.py:765] (4/8) Epoch 2, batch 200, train_loss[loss=4.932, NarTop10Accuracy=0.3317, over 6877.00 frames. ], tot_loss[loss=5.157, NarTop10Accuracy=0.2828, over 3880.29 frames. ], batch size: 17, lr: 2.76e-02 +2024-08-06 06:56:44,973 INFO [trainer.py:765] (4/8) Epoch 2, batch 300, train_loss[loss=5.255, NarTop10Accuracy=0.2704, over 7002.00 frames. ], tot_loss[loss=5.138, NarTop10Accuracy=0.2868, over 4676.86 frames. ], batch size: 22, lr: 2.75e-02 +2024-08-06 06:57:13,939 INFO [trainer.py:765] (4/8) Epoch 2, batch 400, train_loss[loss=5.33, NarTop10Accuracy=0.2334, over 5097.00 frames. ], tot_loss[loss=5.123, NarTop10Accuracy=0.2896, over 5133.65 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 06:57:56,209 INFO [trainer.py:765] (4/8) Epoch 2, batch 500, train_loss[loss=4.872, NarTop10Accuracy=0.3342, over 6125.00 frames. ], tot_loss[loss=5.098, NarTop10Accuracy=0.2944, over 5406.51 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 06:58:25,426 INFO [trainer.py:765] (4/8) Epoch 2, batch 600, train_loss[loss=5.095, NarTop10Accuracy=0.3111, over 5769.00 frames. ], tot_loss[loss=5.096, NarTop10Accuracy=0.2959, over 5674.35 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 06:58:55,282 INFO [trainer.py:765] (4/8) Epoch 2, batch 700, train_loss[loss=4.792, NarTop10Accuracy=0.3329, over 5186.00 frames. ], tot_loss[loss=5.083, NarTop10Accuracy=0.2987, over 5731.53 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 06:59:31,889 INFO [trainer.py:765] (4/8) Epoch 2, batch 800, train_loss[loss=4.944, NarTop10Accuracy=0.3263, over 5119.00 frames. ], tot_loss[loss=5.086, NarTop10Accuracy=0.2977, over 5790.73 frames. ], batch size: 6, lr: 2.69e-02 +2024-08-06 07:00:03,183 INFO [trainer.py:765] (4/8) Epoch 2, batch 900, train_loss[loss=5.532, NarTop10Accuracy=0.203, over 6122.00 frames. ], tot_loss[loss=5.043, NarTop10Accuracy=0.3061, over 5815.05 frames. ], batch size: 13, lr: 2.68e-02 +2024-08-06 07:00:33,143 INFO [trainer.py:765] (4/8) Epoch 2, batch 1000, train_loss[loss=4.763, NarTop10Accuracy=0.3628, over 6649.00 frames. ], tot_loss[loss=5.009, NarTop10Accuracy=0.3124, over 5909.23 frames. ], batch size: 14, lr: 2.66e-02 +2024-08-06 07:01:05,573 INFO [trainer.py:765] (4/8) Epoch 2, batch 1100, train_loss[loss=5.204, NarTop10Accuracy=0.2913, over 6873.00 frames. ], tot_loss[loss=5.01, NarTop10Accuracy=0.3124, over 5945.08 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 07:01:46,285 INFO [trainer.py:765] (4/8) Epoch 2, batch 1200, train_loss[loss=4.743, NarTop10Accuracy=0.3674, over 7277.00 frames. ], tot_loss[loss=5.003, NarTop10Accuracy=0.3131, over 5950.35 frames. ], batch size: 30, lr: 2.64e-02 +2024-08-06 07:02:15,645 INFO [trainer.py:765] (4/8) Epoch 2, batch 1300, train_loss[loss=5.142, NarTop10Accuracy=0.2777, over 5041.00 frames. ], tot_loss[loss=4.954, NarTop10Accuracy=0.3225, over 6007.43 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 07:02:45,253 INFO [trainer.py:765] (4/8) Epoch 2, batch 1400, train_loss[loss=4.571, NarTop10Accuracy=0.3976, over 6085.00 frames. ], tot_loss[loss=4.951, NarTop10Accuracy=0.3234, over 6022.56 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 07:02:50,267 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 07:03:02,094 INFO [trainer.py:811] (4/8) Epoch 2, validation: loss=4.943, NarTop10Accuracy=0.3266, over 1907754.00 frames. +2024-08-06 07:03:02,095 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27216MB +2024-08-06 07:03:02,638 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 5.429e+01 1.166e+02 1.425e+02 1.750e+02 6.435e+02, threshold=2.851e+02, percent-clipped=0.0 +2024-08-06 07:03:25,471 INFO [trainer.py:765] (4/8) Epoch 2, batch 1500, train_loss[loss=5.111, NarTop10Accuracy=0.2995, over 5724.00 frames. ], tot_loss[loss=4.935, NarTop10Accuracy=0.326, over 5964.40 frames. ], batch size: 48, lr: 2.60e-02 +2024-08-06 07:03:53,553 INFO [trainer.py:765] (4/8) Epoch 2, batch 1600, train_loss[loss=4.773, NarTop10Accuracy=0.3518, over 7265.00 frames. ], tot_loss[loss=4.916, NarTop10Accuracy=0.3301, over 5949.41 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 07:04:20,314 INFO [trainer.py:765] (4/8) Epoch 2, batch 1700, train_loss[loss=4.691, NarTop10Accuracy=0.3642, over 6181.00 frames. ], tot_loss[loss=4.913, NarTop10Accuracy=0.3313, over 5925.57 frames. ], batch size: 13, lr: 2.58e-02 +2024-08-06 07:04:46,888 INFO [trainer.py:765] (4/8) Epoch 2, batch 1800, train_loss[loss=4.828, NarTop10Accuracy=0.352, over 7085.00 frames. ], tot_loss[loss=4.89, NarTop10Accuracy=0.3356, over 6000.46 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 07:05:13,587 INFO [trainer.py:765] (4/8) Epoch 2, batch 1900, train_loss[loss=4.877, NarTop10Accuracy=0.3356, over 5851.00 frames. ], tot_loss[loss=4.871, NarTop10Accuracy=0.3391, over 6043.53 frames. ], batch size: 48, lr: 2.55e-02 +2024-08-06 07:05:39,285 INFO [trainer.py:765] (4/8) Epoch 2, batch 2000, train_loss[loss=4.856, NarTop10Accuracy=0.3459, over 6375.00 frames. ], tot_loss[loss=4.853, NarTop10Accuracy=0.3425, over 6007.43 frames. ], batch size: 48, lr: 2.54e-02 +2024-08-06 07:06:04,829 INFO [trainer.py:765] (4/8) Epoch 2, batch 2100, train_loss[loss=4.659, NarTop10Accuracy=0.3706, over 3844.00 frames. ], tot_loss[loss=4.854, NarTop10Accuracy=0.3427, over 5989.96 frames. ], batch size: 4, lr: 2.52e-02 +2024-08-06 07:06:30,373 INFO [trainer.py:765] (4/8) Epoch 2, batch 2200, train_loss[loss=4.646, NarTop10Accuracy=0.3736, over 7473.00 frames. ], tot_loss[loss=4.814, NarTop10Accuracy=0.3511, over 6036.77 frames. ], batch size: 31, lr: 2.51e-02 +2024-08-06 07:06:55,874 INFO [trainer.py:765] (4/8) Epoch 2, batch 2300, train_loss[loss=4.573, NarTop10Accuracy=0.4063, over 5722.00 frames. ], tot_loss[loss=4.81, NarTop10Accuracy=0.3522, over 6072.32 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 07:07:20,576 INFO [trainer.py:765] (4/8) Epoch 2, batch 2400, train_loss[loss=4.541, NarTop10Accuracy=0.4131, over 5078.00 frames. ], tot_loss[loss=4.79, NarTop10Accuracy=0.3564, over 5867.01 frames. ], batch size: 7, lr: 2.49e-02 +2024-08-06 07:07:47,111 INFO [trainer.py:765] (4/8) Epoch 2, batch 2500, train_loss[loss=4.36, NarTop10Accuracy=0.4385, over 5206.00 frames. ], tot_loss[loss=4.755, NarTop10Accuracy=0.3629, over 5519.84 frames. ], batch size: 6, lr: 2.47e-02 +2024-08-06 07:08:08,224 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 07:09:08,538 INFO [trainer.py:765] (4/8) Epoch 3, batch 100, train_loss[loss=4.901, NarTop10Accuracy=0.3468, over 7252.00 frames. ], tot_loss[loss=4.677, NarTop10Accuracy=0.381, over 2373.05 frames. ], batch size: 30, lr: 2.35e-02 +2024-08-06 07:09:41,500 INFO [trainer.py:765] (4/8) Epoch 3, batch 200, train_loss[loss=4.551, NarTop10Accuracy=0.398, over 6840.00 frames. ], tot_loss[loss=4.638, NarTop10Accuracy=0.3882, over 3865.21 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 07:10:16,976 INFO [trainer.py:765] (4/8) Epoch 3, batch 300, train_loss[loss=4.427, NarTop10Accuracy=0.427, over 7263.00 frames. ], tot_loss[loss=4.612, NarTop10Accuracy=0.3919, over 4689.01 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 07:10:49,792 INFO [trainer.py:765] (4/8) Epoch 3, batch 400, train_loss[loss=4.521, NarTop10Accuracy=0.4108, over 5046.00 frames. ], tot_loss[loss=4.582, NarTop10Accuracy=0.3981, over 5135.14 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 07:11:18,179 INFO [trainer.py:765] (4/8) Epoch 3, batch 500, train_loss[loss=4.821, NarTop10Accuracy=0.3537, over 6074.00 frames. ], tot_loss[loss=4.582, NarTop10Accuracy=0.3982, over 5411.08 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 07:11:51,262 INFO [trainer.py:765] (4/8) Epoch 3, batch 600, train_loss[loss=4.528, NarTop10Accuracy=0.4068, over 5826.00 frames. ], tot_loss[loss=4.565, NarTop10Accuracy=0.4012, over 5664.06 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 07:12:32,101 INFO [trainer.py:765] (4/8) Epoch 3, batch 700, train_loss[loss=4.83, NarTop10Accuracy=0.3549, over 5117.00 frames. ], tot_loss[loss=4.553, NarTop10Accuracy=0.403, over 5750.61 frames. ], batch size: 6, lr: 2.29e-02 +2024-08-06 07:13:01,919 INFO [trainer.py:765] (4/8) Epoch 3, batch 800, train_loss[loss=4.314, NarTop10Accuracy=0.4579, over 5056.00 frames. ], tot_loss[loss=4.547, NarTop10Accuracy=0.4041, over 5806.98 frames. ], batch size: 6, lr: 2.27e-02 +2024-08-06 07:13:12,669 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 07:13:22,883 INFO [trainer.py:811] (4/8) Epoch 3, validation: loss=4.43, NarTop10Accuracy=0.4285, over 1907754.00 frames. +2024-08-06 07:13:22,884 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27216MB +2024-08-06 07:13:23,429 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 6.823e+01 1.318e+02 1.583e+02 1.978e+02 8.364e+02, threshold=3.166e+02, percent-clipped=5.2 +2024-08-06 07:13:42,435 INFO [trainer.py:765] (4/8) Epoch 3, batch 900, train_loss[loss=4.363, NarTop10Accuracy=0.4426, over 6694.00 frames. ], tot_loss[loss=4.518, NarTop10Accuracy=0.4104, over 5833.06 frames. ], batch size: 14, lr: 2.26e-02 +2024-08-06 07:14:25,627 INFO [trainer.py:765] (4/8) Epoch 3, batch 1000, train_loss[loss=4.105, NarTop10Accuracy=0.4731, over 6148.00 frames. ], tot_loss[loss=4.509, NarTop10Accuracy=0.4119, over 5932.46 frames. ], batch size: 13, lr: 2.25e-02 +2024-08-06 07:14:56,324 INFO [trainer.py:765] (4/8) Epoch 3, batch 1100, train_loss[loss=4.413, NarTop10Accuracy=0.4312, over 6735.00 frames. ], tot_loss[loss=4.501, NarTop10Accuracy=0.4133, over 5959.80 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 07:15:29,866 INFO [trainer.py:765] (4/8) Epoch 3, batch 1200, train_loss[loss=4.352, NarTop10Accuracy=0.4455, over 7279.00 frames. ], tot_loss[loss=4.489, NarTop10Accuracy=0.4155, over 5946.81 frames. ], batch size: 31, lr: 2.23e-02 +2024-08-06 07:16:12,664 INFO [trainer.py:765] (4/8) Epoch 3, batch 1300, train_loss[loss=4.458, NarTop10Accuracy=0.4203, over 5140.00 frames. ], tot_loss[loss=4.467, NarTop10Accuracy=0.4195, over 6010.42 frames. ], batch size: 6, lr: 2.22e-02 +2024-08-06 07:16:42,204 INFO [trainer.py:765] (4/8) Epoch 3, batch 1400, train_loss[loss=4.324, NarTop10Accuracy=0.4302, over 6280.00 frames. ], tot_loss[loss=4.457, NarTop10Accuracy=0.4211, over 6031.60 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 07:17:10,663 INFO [trainer.py:765] (4/8) Epoch 3, batch 1500, train_loss[loss=4.765, NarTop10Accuracy=0.3561, over 6208.00 frames. ], tot_loss[loss=4.449, NarTop10Accuracy=0.4226, over 5957.74 frames. ], batch size: 49, lr: 2.20e-02 +2024-08-06 07:17:38,769 INFO [trainer.py:765] (4/8) Epoch 3, batch 1600, train_loss[loss=4.288, NarTop10Accuracy=0.4534, over 7163.00 frames. ], tot_loss[loss=4.422, NarTop10Accuracy=0.428, over 5948.34 frames. ], batch size: 22, lr: 2.19e-02 +2024-08-06 07:18:05,503 INFO [trainer.py:765] (4/8) Epoch 3, batch 1700, train_loss[loss=4.334, NarTop10Accuracy=0.4501, over 6197.00 frames. ], tot_loss[loss=4.396, NarTop10Accuracy=0.4325, over 5958.58 frames. ], batch size: 13, lr: 2.18e-02 +2024-08-06 07:18:32,160 INFO [trainer.py:765] (4/8) Epoch 3, batch 1800, train_loss[loss=4.201, NarTop10Accuracy=0.4673, over 7297.00 frames. ], tot_loss[loss=4.374, NarTop10Accuracy=0.4365, over 6013.22 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 07:19:01,958 INFO [trainer.py:765] (4/8) Epoch 3, batch 1900, train_loss[loss=4.64, NarTop10Accuracy=0.3956, over 6101.00 frames. ], tot_loss[loss=4.364, NarTop10Accuracy=0.4393, over 6042.31 frames. ], batch size: 49, lr: 2.16e-02 +2024-08-06 07:19:27,621 INFO [trainer.py:765] (4/8) Epoch 3, batch 2000, train_loss[loss=4.489, NarTop10Accuracy=0.4081, over 6184.00 frames. ], tot_loss[loss=4.343, NarTop10Accuracy=0.443, over 6009.23 frames. ], batch size: 48, lr: 2.15e-02 +2024-08-06 07:19:53,070 INFO [trainer.py:765] (4/8) Epoch 3, batch 2100, train_loss[loss=4.244, NarTop10Accuracy=0.4616, over 4009.00 frames. ], tot_loss[loss=4.32, NarTop10Accuracy=0.4473, over 5988.14 frames. ], batch size: 4, lr: 2.14e-02 +2024-08-06 07:20:18,553 INFO [trainer.py:765] (4/8) Epoch 3, batch 2200, train_loss[loss=4.547, NarTop10Accuracy=0.4029, over 6895.00 frames. ], tot_loss[loss=4.31, NarTop10Accuracy=0.4494, over 6033.78 frames. ], batch size: 30, lr: 2.13e-02 +2024-08-06 07:20:44,051 INFO [trainer.py:765] (4/8) Epoch 3, batch 2300, train_loss[loss=4.325, NarTop10Accuracy=0.447, over 5906.00 frames. ], tot_loss[loss=4.319, NarTop10Accuracy=0.4475, over 6049.40 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 07:21:08,677 INFO [trainer.py:765] (4/8) Epoch 3, batch 2400, train_loss[loss=3.689, NarTop10Accuracy=0.5603, over 5164.00 frames. ], tot_loss[loss=4.31, NarTop10Accuracy=0.4494, over 5861.45 frames. ], batch size: 7, lr: 2.11e-02 +2024-08-06 07:21:32,172 INFO [trainer.py:765] (4/8) Epoch 3, batch 2500, train_loss[loss=4.376, NarTop10Accuracy=0.4458, over 5004.00 frames. ], tot_loss[loss=4.269, NarTop10Accuracy=0.4577, over 5537.80 frames. ], batch size: 6, lr: 2.10e-02 +2024-08-06 07:21:53,891 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 07:23:00,976 INFO [trainer.py:765] (4/8) Epoch 4, batch 100, train_loss[loss=4.159, NarTop10Accuracy=0.4841, over 7431.00 frames. ], tot_loss[loss=4.184, NarTop10Accuracy=0.4759, over 2375.71 frames. ], batch size: 31, lr: 1.97e-02 +2024-08-06 07:23:33,304 INFO [trainer.py:765] (4/8) Epoch 4, batch 200, train_loss[loss=4.246, NarTop10Accuracy=0.4648, over 6926.00 frames. ], tot_loss[loss=4.192, NarTop10Accuracy=0.4748, over 3866.30 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 07:23:51,466 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 07:24:01,517 INFO [trainer.py:811] (4/8) Epoch 4, validation: loss=4.035, NarTop10Accuracy=0.5085, over 1907754.00 frames. +2024-08-06 07:24:01,517 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27216MB +2024-08-06 07:24:02,097 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 9.910e+01 1.530e+02 1.750e+02 2.064e+02 5.317e+02, threshold=3.500e+02, percent-clipped=3.3 +2024-08-06 07:24:14,362 INFO [trainer.py:765] (4/8) Epoch 4, batch 300, train_loss[loss=4.012, NarTop10Accuracy=0.5174, over 7074.00 frames. ], tot_loss[loss=4.177, NarTop10Accuracy=0.4779, over 4668.02 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 07:24:53,597 INFO [trainer.py:765] (4/8) Epoch 4, batch 400, train_loss[loss=3.917, NarTop10Accuracy=0.5225, over 5066.00 frames. ], tot_loss[loss=4.178, NarTop10Accuracy=0.4775, over 5128.63 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 07:25:25,295 INFO [trainer.py:765] (4/8) Epoch 4, batch 500, train_loss[loss=4.275, NarTop10Accuracy=0.4711, over 6264.00 frames. ], tot_loss[loss=4.163, NarTop10Accuracy=0.4803, over 5398.86 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 07:25:56,975 INFO [trainer.py:765] (4/8) Epoch 4, batch 600, train_loss[loss=4.197, NarTop10Accuracy=0.4644, over 5748.00 frames. ], tot_loss[loss=4.148, NarTop10Accuracy=0.4832, over 5680.65 frames. ], batch size: 9, lr: 1.92e-02 +2024-08-06 07:26:37,607 INFO [trainer.py:765] (4/8) Epoch 4, batch 700, train_loss[loss=3.91, NarTop10Accuracy=0.5255, over 4992.00 frames. ], tot_loss[loss=4.145, NarTop10Accuracy=0.4836, over 5739.12 frames. ], batch size: 6, lr: 1.92e-02 +2024-08-06 07:27:07,434 INFO [trainer.py:765] (4/8) Epoch 4, batch 800, train_loss[loss=4.021, NarTop10Accuracy=0.5062, over 5139.00 frames. ], tot_loss[loss=4.142, NarTop10Accuracy=0.4844, over 5782.66 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 07:27:42,042 INFO [trainer.py:765] (4/8) Epoch 4, batch 900, train_loss[loss=4.177, NarTop10Accuracy=0.4797, over 6283.00 frames. ], tot_loss[loss=4.113, NarTop10Accuracy=0.4901, over 5805.60 frames. ], batch size: 13, lr: 1.90e-02 +2024-08-06 07:28:20,670 INFO [trainer.py:765] (4/8) Epoch 4, batch 1000, train_loss[loss=3.9, NarTop10Accuracy=0.5274, over 6195.00 frames. ], tot_loss[loss=4.102, NarTop10Accuracy=0.4924, over 5907.69 frames. ], batch size: 13, lr: 1.89e-02 +2024-08-06 07:28:54,070 INFO [trainer.py:765] (4/8) Epoch 4, batch 1100, train_loss[loss=3.954, NarTop10Accuracy=0.5189, over 6984.00 frames. ], tot_loss[loss=4.102, NarTop10Accuracy=0.4922, over 5959.08 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 07:29:29,598 INFO [trainer.py:765] (4/8) Epoch 4, batch 1200, train_loss[loss=4.251, NarTop10Accuracy=0.4656, over 7423.00 frames. ], tot_loss[loss=4.098, NarTop10Accuracy=0.4925, over 5947.16 frames. ], batch size: 30, lr: 1.87e-02 +2024-08-06 07:30:04,991 INFO [trainer.py:765] (4/8) Epoch 4, batch 1300, train_loss[loss=3.793, NarTop10Accuracy=0.5674, over 5090.00 frames. ], tot_loss[loss=4.076, NarTop10Accuracy=0.4971, over 6021.76 frames. ], batch size: 6, lr: 1.87e-02 +2024-08-06 07:30:43,379 INFO [trainer.py:765] (4/8) Epoch 4, batch 1400, train_loss[loss=4.085, NarTop10Accuracy=0.4916, over 6174.00 frames. ], tot_loss[loss=4.076, NarTop10Accuracy=0.4966, over 6054.21 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 07:31:11,831 INFO [trainer.py:765] (4/8) Epoch 4, batch 1500, train_loss[loss=4.034, NarTop10Accuracy=0.5092, over 6436.00 frames. ], tot_loss[loss=4.073, NarTop10Accuracy=0.4977, over 5971.09 frames. ], batch size: 49, lr: 1.85e-02 +2024-08-06 07:31:39,960 INFO [trainer.py:765] (4/8) Epoch 4, batch 1600, train_loss[loss=4.243, NarTop10Accuracy=0.4616, over 7203.00 frames. ], tot_loss[loss=4.081, NarTop10Accuracy=0.4965, over 5953.65 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 07:32:06,853 INFO [trainer.py:765] (4/8) Epoch 4, batch 1700, train_loss[loss=4.414, NarTop10Accuracy=0.4255, over 6315.00 frames. ], tot_loss[loss=4.053, NarTop10Accuracy=0.5021, over 5946.31 frames. ], batch size: 13, lr: 1.84e-02 +2024-08-06 07:32:33,482 INFO [trainer.py:765] (4/8) Epoch 4, batch 1800, train_loss[loss=4.187, NarTop10Accuracy=0.4706, over 7134.00 frames. ], tot_loss[loss=4.053, NarTop10Accuracy=0.5021, over 6006.35 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 07:33:00,193 INFO [trainer.py:765] (4/8) Epoch 4, batch 1900, train_loss[loss=4.156, NarTop10Accuracy=0.4784, over 5916.00 frames. ], tot_loss[loss=4.074, NarTop10Accuracy=0.498, over 6046.40 frames. ], batch size: 49, lr: 1.82e-02 +2024-08-06 07:33:25,989 INFO [trainer.py:765] (4/8) Epoch 4, batch 2000, train_loss[loss=4.387, NarTop10Accuracy=0.4419, over 5490.00 frames. ], tot_loss[loss=4.052, NarTop10Accuracy=0.5024, over 6020.87 frames. ], batch size: 49, lr: 1.81e-02 +2024-08-06 07:33:51,511 INFO [trainer.py:765] (4/8) Epoch 4, batch 2100, train_loss[loss=3.742, NarTop10Accuracy=0.5517, over 4846.00 frames. ], tot_loss[loss=4.04, NarTop10Accuracy=0.5052, over 6017.08 frames. ], batch size: 5, lr: 1.81e-02 +2024-08-06 07:34:16,906 INFO [trainer.py:765] (4/8) Epoch 4, batch 2200, train_loss[loss=4.044, NarTop10Accuracy=0.5012, over 6965.00 frames. ], tot_loss[loss=4.042, NarTop10Accuracy=0.5048, over 6054.25 frames. ], batch size: 30, lr: 1.80e-02 +2024-08-06 07:34:31,431 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 07:34:41,462 INFO [trainer.py:811] (4/8) Epoch 4, validation: loss=3.858, NarTop10Accuracy=0.5445, over 1907754.00 frames. +2024-08-06 07:34:41,463 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27216MB +2024-08-06 07:34:41,980 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.721e+02 1.919e+02 2.225e+02 9.682e+02, threshold=3.839e+02, percent-clipped=2.3 +2024-08-06 07:34:52,444 INFO [trainer.py:765] (4/8) Epoch 4, batch 2300, train_loss[loss=3.778, NarTop10Accuracy=0.564, over 5739.00 frames. ], tot_loss[loss=4.038, NarTop10Accuracy=0.506, over 6060.08 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 07:35:17,167 INFO [trainer.py:765] (4/8) Epoch 4, batch 2400, train_loss[loss=3.867, NarTop10Accuracy=0.5383, over 5190.00 frames. ], tot_loss[loss=4.037, NarTop10Accuracy=0.5061, over 5867.65 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 07:35:40,622 INFO [trainer.py:765] (4/8) Epoch 4, batch 2500, train_loss[loss=4.046, NarTop10Accuracy=0.4996, over 5117.00 frames. ], tot_loss[loss=4.009, NarTop10Accuracy=0.5113, over 5518.04 frames. ], batch size: 6, lr: 1.78e-02 +2024-08-06 07:36:01,745 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 07:37:02,524 INFO [trainer.py:765] (4/8) Epoch 5, batch 100, train_loss[loss=3.941, NarTop10Accuracy=0.5196, over 7092.00 frames. ], tot_loss[loss=3.967, NarTop10Accuracy=0.5212, over 2360.98 frames. ], batch size: 30, lr: 1.66e-02 +2024-08-06 07:37:39,814 INFO [trainer.py:765] (4/8) Epoch 5, batch 200, train_loss[loss=3.98, NarTop10Accuracy=0.514, over 6944.00 frames. ], tot_loss[loss=3.938, NarTop10Accuracy=0.5272, over 3860.19 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 07:38:13,471 INFO [trainer.py:765] (4/8) Epoch 5, batch 300, train_loss[loss=4.197, NarTop10Accuracy=0.479, over 7038.00 frames. ], tot_loss[loss=3.926, NarTop10Accuracy=0.5302, over 4669.67 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 07:38:42,429 INFO [trainer.py:765] (4/8) Epoch 5, batch 400, train_loss[loss=3.861, NarTop10Accuracy=0.5352, over 5139.00 frames. ], tot_loss[loss=3.918, NarTop10Accuracy=0.5306, over 5127.58 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 07:39:17,020 INFO [trainer.py:765] (4/8) Epoch 5, batch 500, train_loss[loss=3.953, NarTop10Accuracy=0.5287, over 6135.00 frames. ], tot_loss[loss=3.922, NarTop10Accuracy=0.5298, over 5397.04 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 07:39:51,943 INFO [trainer.py:765] (4/8) Epoch 5, batch 600, train_loss[loss=3.857, NarTop10Accuracy=0.5368, over 5789.00 frames. ], tot_loss[loss=3.903, NarTop10Accuracy=0.5336, over 5668.44 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 07:40:28,626 INFO [trainer.py:765] (4/8) Epoch 5, batch 700, train_loss[loss=3.524, NarTop10Accuracy=0.6096, over 4998.00 frames. ], tot_loss[loss=3.905, NarTop10Accuracy=0.5332, over 5743.87 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:02,366 INFO [trainer.py:765] (4/8) Epoch 5, batch 800, train_loss[loss=4.282, NarTop10Accuracy=0.4615, over 5055.00 frames. ], tot_loss[loss=3.906, NarTop10Accuracy=0.5334, over 5774.75 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:37,937 INFO [trainer.py:765] (4/8) Epoch 5, batch 900, train_loss[loss=4.166, NarTop10Accuracy=0.4806, over 6646.00 frames. ], tot_loss[loss=3.897, NarTop10Accuracy=0.5349, over 5817.50 frames. ], batch size: 14, lr: 1.61e-02 +2024-08-06 07:42:13,845 INFO [trainer.py:765] (4/8) Epoch 5, batch 1000, train_loss[loss=4.088, NarTop10Accuracy=0.4833, over 6363.00 frames. ], tot_loss[loss=3.889, NarTop10Accuracy=0.5366, over 5918.22 frames. ], batch size: 13, lr: 1.60e-02 +2024-08-06 07:42:46,467 INFO [trainer.py:765] (4/8) Epoch 5, batch 1100, train_loss[loss=3.974, NarTop10Accuracy=0.5201, over 6834.00 frames. ], tot_loss[loss=3.906, NarTop10Accuracy=0.5331, over 5955.97 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 07:43:25,226 INFO [trainer.py:765] (4/8) Epoch 5, batch 1200, train_loss[loss=4.027, NarTop10Accuracy=0.512, over 7083.00 frames. ], tot_loss[loss=3.906, NarTop10Accuracy=0.5328, over 5938.48 frames. ], batch size: 30, lr: 1.59e-02 +2024-08-06 07:44:00,556 INFO [trainer.py:765] (4/8) Epoch 5, batch 1300, train_loss[loss=4.1, NarTop10Accuracy=0.5038, over 5046.00 frames. ], tot_loss[loss=3.906, NarTop10Accuracy=0.5332, over 6011.41 frames. ], batch size: 6, lr: 1.59e-02 +2024-08-06 07:44:30,238 INFO [trainer.py:765] (4/8) Epoch 5, batch 1400, train_loss[loss=3.874, NarTop10Accuracy=0.5476, over 6284.00 frames. ], tot_loss[loss=3.899, NarTop10Accuracy=0.5346, over 6037.53 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 07:45:02,845 INFO [trainer.py:765] (4/8) Epoch 5, batch 1500, train_loss[loss=3.867, NarTop10Accuracy=0.5459, over 6213.00 frames. ], tot_loss[loss=3.904, NarTop10Accuracy=0.5332, over 5983.00 frames. ], batch size: 49, lr: 1.57e-02 +2024-08-06 07:45:31,008 INFO [trainer.py:765] (4/8) Epoch 5, batch 1600, train_loss[loss=4.15, NarTop10Accuracy=0.492, over 7108.00 frames. ], tot_loss[loss=3.906, NarTop10Accuracy=0.5329, over 5960.19 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 07:45:51,058 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 07:46:01,621 INFO [trainer.py:811] (4/8) Epoch 5, validation: loss=3.749, NarTop10Accuracy=0.5672, over 1907754.00 frames. +2024-08-06 07:46:01,622 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27216MB +2024-08-06 07:46:02,123 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.669e+02 1.884e+02 2.190e+02 6.243e+02, threshold=3.768e+02, percent-clipped=1.8 +2024-08-06 07:46:08,362 INFO [trainer.py:765] (4/8) Epoch 5, batch 1700, train_loss[loss=3.809, NarTop10Accuracy=0.5615, over 6698.00 frames. ], tot_loss[loss=3.901, NarTop10Accuracy=0.5339, over 5935.47 frames. ], batch size: 14, lr: 1.56e-02 +2024-08-06 07:46:34,967 INFO [trainer.py:765] (4/8) Epoch 5, batch 1800, train_loss[loss=4.108, NarTop10Accuracy=0.4974, over 7211.00 frames. ], tot_loss[loss=3.888, NarTop10Accuracy=0.537, over 6007.05 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 07:47:01,489 INFO [trainer.py:765] (4/8) Epoch 5, batch 1900, train_loss[loss=3.919, NarTop10Accuracy=0.5329, over 6285.00 frames. ], tot_loss[loss=3.9, NarTop10Accuracy=0.5344, over 6032.06 frames. ], batch size: 49, lr: 1.55e-02 +2024-08-06 07:47:27,147 INFO [trainer.py:765] (4/8) Epoch 5, batch 2000, train_loss[loss=3.736, NarTop10Accuracy=0.569, over 5969.00 frames. ], tot_loss[loss=3.904, NarTop10Accuracy=0.534, over 6010.51 frames. ], batch size: 49, lr: 1.55e-02 +2024-08-06 07:47:52,618 INFO [trainer.py:765] (4/8) Epoch 5, batch 2100, train_loss[loss=3.86, NarTop10Accuracy=0.5287, over 3849.00 frames. ], tot_loss[loss=3.898, NarTop10Accuracy=0.5349, over 5985.77 frames. ], batch size: 4, lr: 1.54e-02 +2024-08-06 07:48:17,993 INFO [trainer.py:765] (4/8) Epoch 5, batch 2200, train_loss[loss=3.889, NarTop10Accuracy=0.5426, over 7293.00 frames. ], tot_loss[loss=3.88, NarTop10Accuracy=0.5385, over 6041.91 frames. ], batch size: 30, lr: 1.54e-02 +2024-08-06 07:48:43,421 INFO [trainer.py:765] (4/8) Epoch 5, batch 2300, train_loss[loss=4.035, NarTop10Accuracy=0.5123, over 5872.00 frames. ], tot_loss[loss=3.897, NarTop10Accuracy=0.5352, over 6053.91 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 07:49:08,169 INFO [trainer.py:765] (4/8) Epoch 5, batch 2400, train_loss[loss=3.86, NarTop10Accuracy=0.5394, over 5201.00 frames. ], tot_loss[loss=3.892, NarTop10Accuracy=0.5361, over 5880.80 frames. ], batch size: 7, lr: 1.53e-02 +2024-08-06 07:49:31,645 INFO [trainer.py:765] (4/8) Epoch 5, batch 2500, train_loss[loss=3.543, NarTop10Accuracy=0.6045, over 5102.00 frames. ], tot_loss[loss=3.856, NarTop10Accuracy=0.5433, over 5547.17 frames. ], batch size: 6, lr: 1.52e-02 +2024-08-06 07:49:52,939 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 07:50:58,969 INFO [trainer.py:765] (4/8) Epoch 6, batch 100, train_loss[loss=3.677, NarTop10Accuracy=0.5804, over 7112.00 frames. ], tot_loss[loss=3.78, NarTop10Accuracy=0.5596, over 2360.74 frames. ], batch size: 30, lr: 1.42e-02 +2024-08-06 07:51:31,789 INFO [trainer.py:765] (4/8) Epoch 6, batch 200, train_loss[loss=3.765, NarTop10Accuracy=0.5614, over 6913.00 frames. ], tot_loss[loss=3.794, NarTop10Accuracy=0.5566, over 3869.01 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 07:52:04,696 INFO [trainer.py:765] (4/8) Epoch 6, batch 300, train_loss[loss=3.752, NarTop10Accuracy=0.5667, over 7047.00 frames. ], tot_loss[loss=3.783, NarTop10Accuracy=0.5594, over 4668.27 frames. ], batch size: 22, lr: 1.41e-02 +2024-08-06 07:52:36,200 INFO [trainer.py:765] (4/8) Epoch 6, batch 400, train_loss[loss=3.842, NarTop10Accuracy=0.5501, over 5194.00 frames. ], tot_loss[loss=3.789, NarTop10Accuracy=0.5582, over 5118.44 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 07:53:06,103 INFO [trainer.py:765] (4/8) Epoch 6, batch 500, train_loss[loss=3.925, NarTop10Accuracy=0.5313, over 5945.00 frames. ], tot_loss[loss=3.77, NarTop10Accuracy=0.5617, over 5399.97 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 07:53:43,285 INFO [trainer.py:765] (4/8) Epoch 6, batch 600, train_loss[loss=3.873, NarTop10Accuracy=0.549, over 5779.00 frames. ], tot_loss[loss=3.773, NarTop10Accuracy=0.561, over 5653.54 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 07:54:15,439 INFO [trainer.py:765] (4/8) Epoch 6, batch 700, train_loss[loss=4.142, NarTop10Accuracy=0.4862, over 5196.00 frames. ], tot_loss[loss=3.779, NarTop10Accuracy=0.5599, over 5738.27 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:54:49,526 INFO [trainer.py:765] (4/8) Epoch 6, batch 800, train_loss[loss=3.872, NarTop10Accuracy=0.5445, over 5238.00 frames. ], tot_loss[loss=3.793, NarTop10Accuracy=0.5568, over 5799.37 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:55:21,984 INFO [trainer.py:765] (4/8) Epoch 6, batch 900, train_loss[loss=3.561, NarTop10Accuracy=0.6108, over 6383.00 frames. ], tot_loss[loss=3.79, NarTop10Accuracy=0.5573, over 5818.21 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 07:56:00,804 INFO [trainer.py:765] (4/8) Epoch 6, batch 1000, train_loss[loss=3.583, NarTop10Accuracy=0.6, over 6734.00 frames. ], tot_loss[loss=3.804, NarTop10Accuracy=0.5541, over 5928.15 frames. ], batch size: 14, lr: 1.38e-02 +2024-08-06 07:56:34,171 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 07:56:44,742 INFO [trainer.py:811] (4/8) Epoch 6, validation: loss=3.634, NarTop10Accuracy=0.5919, over 1907754.00 frames. +2024-08-06 07:56:44,743 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27799MB +2024-08-06 07:56:45,277 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.300e+02 1.714e+02 1.918e+02 2.211e+02 6.360e+02, threshold=3.836e+02, percent-clipped=1.6 +2024-08-06 07:56:46,639 INFO [trainer.py:765] (4/8) Epoch 6, batch 1100, train_loss[loss=3.786, NarTop10Accuracy=0.5581, over 6836.00 frames. ], tot_loss[loss=3.801, NarTop10Accuracy=0.5546, over 5959.67 frames. ], batch size: 17, lr: 1.37e-02 +2024-08-06 07:57:24,888 INFO [trainer.py:765] (4/8) Epoch 6, batch 1200, train_loss[loss=4.026, NarTop10Accuracy=0.506, over 7481.00 frames. ], tot_loss[loss=3.799, NarTop10Accuracy=0.5548, over 5965.73 frames. ], batch size: 33, lr: 1.37e-02 +2024-08-06 07:57:56,612 INFO [trainer.py:765] (4/8) Epoch 6, batch 1300, train_loss[loss=3.65, NarTop10Accuracy=0.5829, over 5009.00 frames. ], tot_loss[loss=3.791, NarTop10Accuracy=0.5565, over 6028.06 frames. ], batch size: 6, lr: 1.37e-02 +2024-08-06 07:58:30,736 INFO [trainer.py:765] (4/8) Epoch 6, batch 1400, train_loss[loss=4.23, NarTop10Accuracy=0.4649, over 6074.00 frames. ], tot_loss[loss=3.797, NarTop10Accuracy=0.5555, over 6050.61 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 07:59:00,999 INFO [trainer.py:765] (4/8) Epoch 6, batch 1500, train_loss[loss=4.077, NarTop10Accuracy=0.5008, over 6034.00 frames. ], tot_loss[loss=3.793, NarTop10Accuracy=0.5561, over 5990.78 frames. ], batch size: 51, lr: 1.36e-02 +2024-08-06 07:59:28,933 INFO [trainer.py:765] (4/8) Epoch 6, batch 1600, train_loss[loss=3.615, NarTop10Accuracy=0.6056, over 7290.00 frames. ], tot_loss[loss=3.787, NarTop10Accuracy=0.5575, over 5957.12 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 07:59:55,617 INFO [trainer.py:765] (4/8) Epoch 6, batch 1700, train_loss[loss=3.79, NarTop10Accuracy=0.5633, over 6698.00 frames. ], tot_loss[loss=3.786, NarTop10Accuracy=0.5579, over 5945.85 frames. ], batch size: 14, lr: 1.35e-02 +2024-08-06 08:00:22,187 INFO [trainer.py:765] (4/8) Epoch 6, batch 1800, train_loss[loss=3.784, NarTop10Accuracy=0.5632, over 7189.00 frames. ], tot_loss[loss=3.784, NarTop10Accuracy=0.5583, over 6023.75 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 08:00:48,795 INFO [trainer.py:765] (4/8) Epoch 6, batch 1900, train_loss[loss=4.097, NarTop10Accuracy=0.5034, over 6462.00 frames. ], tot_loss[loss=3.819, NarTop10Accuracy=0.5515, over 6067.62 frames. ], batch size: 51, lr: 1.34e-02 +2024-08-06 08:01:14,461 INFO [trainer.py:765] (4/8) Epoch 6, batch 2000, train_loss[loss=4.033, NarTop10Accuracy=0.5188, over 5842.00 frames. ], tot_loss[loss=3.801, NarTop10Accuracy=0.5552, over 6043.42 frames. ], batch size: 48, lr: 1.34e-02 +2024-08-06 08:01:43,134 INFO [trainer.py:765] (4/8) Epoch 6, batch 2100, train_loss[loss=3.672, NarTop10Accuracy=0.5768, over 4823.00 frames. ], tot_loss[loss=3.803, NarTop10Accuracy=0.5551, over 6020.20 frames. ], batch size: 5, lr: 1.33e-02 +2024-08-06 08:02:08,518 INFO [trainer.py:765] (4/8) Epoch 6, batch 2200, train_loss[loss=3.836, NarTop10Accuracy=0.5516, over 7126.00 frames. ], tot_loss[loss=3.805, NarTop10Accuracy=0.554, over 6043.96 frames. ], batch size: 30, lr: 1.33e-02 +2024-08-06 08:02:33,916 INFO [trainer.py:765] (4/8) Epoch 6, batch 2300, train_loss[loss=3.649, NarTop10Accuracy=0.5874, over 5810.00 frames. ], tot_loss[loss=3.807, NarTop10Accuracy=0.5538, over 6073.83 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 08:02:58,616 INFO [trainer.py:765] (4/8) Epoch 6, batch 2400, train_loss[loss=3.894, NarTop10Accuracy=0.549, over 5205.00 frames. ], tot_loss[loss=3.795, NarTop10Accuracy=0.5563, over 5864.99 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 08:03:21,939 INFO [trainer.py:765] (4/8) Epoch 6, batch 2500, train_loss[loss=4.061, NarTop10Accuracy=0.4977, over 4404.00 frames. ], tot_loss[loss=3.78, NarTop10Accuracy=0.5593, over 5545.00 frames. ], batch size: 5, lr: 1.32e-02 +2024-08-06 08:03:43,382 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 08:04:42,817 INFO [trainer.py:765] (4/8) Epoch 7, batch 100, train_loss[loss=3.921, NarTop10Accuracy=0.5353, over 6993.00 frames. ], tot_loss[loss=3.684, NarTop10Accuracy=0.5797, over 2384.13 frames. ], batch size: 30, lr: 1.23e-02 +2024-08-06 08:05:18,347 INFO [trainer.py:765] (4/8) Epoch 7, batch 200, train_loss[loss=3.89, NarTop10Accuracy=0.547, over 6834.00 frames. ], tot_loss[loss=3.707, NarTop10Accuracy=0.5745, over 3880.84 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 08:05:46,773 INFO [trainer.py:765] (4/8) Epoch 7, batch 300, train_loss[loss=3.65, NarTop10Accuracy=0.5821, over 6974.00 frames. ], tot_loss[loss=3.713, NarTop10Accuracy=0.5736, over 4668.66 frames. ], batch size: 22, lr: 1.23e-02 +2024-08-06 08:06:22,091 INFO [trainer.py:765] (4/8) Epoch 7, batch 400, train_loss[loss=4.144, NarTop10Accuracy=0.4862, over 5112.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.572, over 5123.12 frames. ], batch size: 7, lr: 1.22e-02 +2024-08-06 08:06:52,316 INFO [trainer.py:765] (4/8) Epoch 7, batch 500, train_loss[loss=3.422, NarTop10Accuracy=0.625, over 6120.00 frames. ], tot_loss[loss=3.709, NarTop10Accuracy=0.5741, over 5397.40 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 08:06:56,086 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 08:07:06,251 INFO [trainer.py:811] (4/8) Epoch 7, validation: loss=3.56, NarTop10Accuracy=0.6069, over 1907754.00 frames. +2024-08-06 08:07:06,252 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 27799MB +2024-08-06 08:07:06,837 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 1.760e+02 1.958e+02 2.227e+02 5.399e+02, threshold=3.916e+02, percent-clipped=0.8 +2024-08-06 08:07:33,151 INFO [trainer.py:765] (4/8) Epoch 7, batch 600, train_loss[loss=3.75, NarTop10Accuracy=0.5696, over 5680.00 frames. ], tot_loss[loss=3.708, NarTop10Accuracy=0.574, over 5674.31 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 08:08:11,333 INFO [trainer.py:765] (4/8) Epoch 7, batch 700, train_loss[loss=3.648, NarTop10Accuracy=0.5947, over 5029.00 frames. ], tot_loss[loss=3.717, NarTop10Accuracy=0.5728, over 5750.98 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 08:08:45,558 INFO [trainer.py:765] (4/8) Epoch 7, batch 800, train_loss[loss=3.522, NarTop10Accuracy=0.6182, over 4928.00 frames. ], tot_loss[loss=3.699, NarTop10Accuracy=0.5765, over 5793.95 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 08:09:17,739 INFO [trainer.py:765] (4/8) Epoch 7, batch 900, train_loss[loss=3.67, NarTop10Accuracy=0.5781, over 6343.00 frames. ], tot_loss[loss=3.702, NarTop10Accuracy=0.5759, over 5817.20 frames. ], batch size: 13, lr: 1.21e-02 +2024-08-06 08:09:54,192 INFO [trainer.py:765] (4/8) Epoch 7, batch 1000, train_loss[loss=4.063, NarTop10Accuracy=0.502, over 6285.00 frames. ], tot_loss[loss=3.709, NarTop10Accuracy=0.574, over 5929.25 frames. ], batch size: 13, lr: 1.20e-02 +2024-08-06 08:10:29,570 INFO [trainer.py:765] (4/8) Epoch 7, batch 1100, train_loss[loss=3.732, NarTop10Accuracy=0.5701, over 6859.00 frames. ], tot_loss[loss=3.715, NarTop10Accuracy=0.5726, over 5957.71 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 08:11:02,491 INFO [trainer.py:765] (4/8) Epoch 7, batch 1200, train_loss[loss=4.1, NarTop10Accuracy=0.4865, over 7259.00 frames. ], tot_loss[loss=3.712, NarTop10Accuracy=0.5731, over 5950.84 frames. ], batch size: 30, lr: 1.20e-02 +2024-08-06 08:11:33,447 INFO [trainer.py:765] (4/8) Epoch 7, batch 1300, train_loss[loss=3.387, NarTop10Accuracy=0.6379, over 5099.00 frames. ], tot_loss[loss=3.716, NarTop10Accuracy=0.572, over 6011.96 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 08:12:10,912 INFO [trainer.py:765] (4/8) Epoch 7, batch 1400, train_loss[loss=3.839, NarTop10Accuracy=0.5519, over 6193.00 frames. ], tot_loss[loss=3.721, NarTop10Accuracy=0.5712, over 6034.48 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 08:12:42,109 INFO [trainer.py:765] (4/8) Epoch 7, batch 1500, train_loss[loss=3.724, NarTop10Accuracy=0.5802, over 6143.00 frames. ], tot_loss[loss=3.711, NarTop10Accuracy=0.5725, over 5977.01 frames. ], batch size: 48, lr: 1.19e-02 +2024-08-06 08:13:13,238 INFO [trainer.py:765] (4/8) Epoch 7, batch 1600, train_loss[loss=3.522, NarTop10Accuracy=0.6107, over 7070.00 frames. ], tot_loss[loss=3.712, NarTop10Accuracy=0.5725, over 5961.24 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:13:40,016 INFO [trainer.py:765] (4/8) Epoch 7, batch 1700, train_loss[loss=3.59, NarTop10Accuracy=0.5941, over 6757.00 frames. ], tot_loss[loss=3.726, NarTop10Accuracy=0.5697, over 5953.41 frames. ], batch size: 14, lr: 1.18e-02 +2024-08-06 08:14:06,584 INFO [trainer.py:765] (4/8) Epoch 7, batch 1800, train_loss[loss=3.619, NarTop10Accuracy=0.5937, over 7021.00 frames. ], tot_loss[loss=3.734, NarTop10Accuracy=0.5685, over 6013.77 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:14:33,223 INFO [trainer.py:765] (4/8) Epoch 7, batch 1900, train_loss[loss=4.214, NarTop10Accuracy=0.4789, over 5672.00 frames. ], tot_loss[loss=3.735, NarTop10Accuracy=0.5685, over 6044.40 frames. ], batch size: 49, lr: 1.17e-02 +2024-08-06 08:14:58,995 INFO [trainer.py:765] (4/8) Epoch 7, batch 2000, train_loss[loss=3.722, NarTop10Accuracy=0.5734, over 5955.00 frames. ], tot_loss[loss=3.724, NarTop10Accuracy=0.5706, over 6032.25 frames. ], batch size: 49, lr: 1.17e-02 +2024-08-06 08:15:24,423 INFO [trainer.py:765] (4/8) Epoch 7, batch 2100, train_loss[loss=3.882, NarTop10Accuracy=0.5337, over 4820.00 frames. ], tot_loss[loss=3.723, NarTop10Accuracy=0.5714, over 6003.69 frames. ], batch size: 5, lr: 1.17e-02 +2024-08-06 08:15:49,960 INFO [trainer.py:765] (4/8) Epoch 7, batch 2200, train_loss[loss=3.896, NarTop10Accuracy=0.5352, over 7249.00 frames. ], tot_loss[loss=3.725, NarTop10Accuracy=0.5704, over 6048.60 frames. ], batch size: 30, lr: 1.17e-02 +2024-08-06 08:16:15,490 INFO [trainer.py:765] (4/8) Epoch 7, batch 2300, train_loss[loss=4.003, NarTop10Accuracy=0.5096, over 5791.00 frames. ], tot_loss[loss=3.739, NarTop10Accuracy=0.5676, over 6072.33 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 08:16:40,319 INFO [trainer.py:765] (4/8) Epoch 7, batch 2400, train_loss[loss=3.476, NarTop10Accuracy=0.6146, over 5073.00 frames. ], tot_loss[loss=3.728, NarTop10Accuracy=0.5696, over 5891.49 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 08:17:03,739 INFO [trainer.py:765] (4/8) Epoch 7, batch 2500, train_loss[loss=3.457, NarTop10Accuracy=0.6086, over 4375.00 frames. ], tot_loss[loss=3.702, NarTop10Accuracy=0.575, over 5560.81 frames. ], batch size: 5, lr: 1.16e-02 +2024-08-06 08:17:06,844 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 08:17:17,433 INFO [trainer.py:811] (4/8) Epoch 7, validation: loss=3.591, NarTop10Accuracy=0.6002, over 1907754.00 frames. +2024-08-06 08:17:17,433 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29101MB +2024-08-06 08:17:17,901 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.794e+02 1.981e+02 2.246e+02 4.644e+02, threshold=3.962e+02, percent-clipped=1.0 +2024-08-06 08:17:35,455 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 08:18:36,194 INFO [trainer.py:765] (4/8) Epoch 8, batch 100, train_loss[loss=3.709, NarTop10Accuracy=0.5638, over 7418.00 frames. ], tot_loss[loss=3.675, NarTop10Accuracy=0.5818, over 2370.87 frames. ], batch size: 30, lr: 1.09e-02 +2024-08-06 08:19:15,020 INFO [trainer.py:765] (4/8) Epoch 8, batch 200, train_loss[loss=3.636, NarTop10Accuracy=0.585, over 6870.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5827, over 3878.97 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 08:19:43,561 INFO [trainer.py:765] (4/8) Epoch 8, batch 300, train_loss[loss=3.757, NarTop10Accuracy=0.5647, over 7136.00 frames. ], tot_loss[loss=3.668, NarTop10Accuracy=0.5831, over 4672.67 frames. ], batch size: 22, lr: 1.08e-02 +2024-08-06 08:20:16,269 INFO [trainer.py:765] (4/8) Epoch 8, batch 400, train_loss[loss=3.579, NarTop10Accuracy=0.6112, over 5199.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5831, over 5108.07 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 08:20:48,422 INFO [trainer.py:765] (4/8) Epoch 8, batch 500, train_loss[loss=3.433, NarTop10Accuracy=0.6359, over 6072.00 frames. ], tot_loss[loss=3.661, NarTop10Accuracy=0.5845, over 5393.93 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 08:21:23,737 INFO [trainer.py:765] (4/8) Epoch 8, batch 600, train_loss[loss=3.78, NarTop10Accuracy=0.5619, over 5844.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5827, over 5679.87 frames. ], batch size: 9, lr: 1.07e-02 +2024-08-06 08:21:57,607 INFO [trainer.py:765] (4/8) Epoch 8, batch 700, train_loss[loss=3.76, NarTop10Accuracy=0.5693, over 5188.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5827, over 5735.87 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:22:27,342 INFO [trainer.py:765] (4/8) Epoch 8, batch 800, train_loss[loss=3.33, NarTop10Accuracy=0.6592, over 5056.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5826, over 5801.05 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:23:06,892 INFO [trainer.py:765] (4/8) Epoch 8, batch 900, train_loss[loss=3.463, NarTop10Accuracy=0.6214, over 6335.00 frames. ], tot_loss[loss=3.659, NarTop10Accuracy=0.5844, over 5827.98 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 08:23:42,944 INFO [trainer.py:765] (4/8) Epoch 8, batch 1000, train_loss[loss=3.575, NarTop10Accuracy=0.5976, over 6198.00 frames. ], tot_loss[loss=3.66, NarTop10Accuracy=0.584, over 5931.11 frames. ], batch size: 13, lr: 1.06e-02 +2024-08-06 08:24:15,105 INFO [trainer.py:765] (4/8) Epoch 8, batch 1100, train_loss[loss=3.542, NarTop10Accuracy=0.6062, over 6884.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.5814, over 5964.46 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 08:24:57,339 INFO [trainer.py:765] (4/8) Epoch 8, batch 1200, train_loss[loss=3.659, NarTop10Accuracy=0.5925, over 7422.00 frames. ], tot_loss[loss=3.682, NarTop10Accuracy=0.5794, over 5967.27 frames. ], batch size: 32, lr: 1.06e-02 +2024-08-06 08:25:26,604 INFO [trainer.py:765] (4/8) Epoch 8, batch 1300, train_loss[loss=3.843, NarTop10Accuracy=0.5484, over 4983.00 frames. ], tot_loss[loss=3.661, NarTop10Accuracy=0.5835, over 6031.22 frames. ], batch size: 6, lr: 1.06e-02 +2024-08-06 08:26:00,605 INFO [trainer.py:765] (4/8) Epoch 8, batch 1400, train_loss[loss=3.722, NarTop10Accuracy=0.573, over 6243.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.5814, over 6033.27 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 08:26:28,987 INFO [trainer.py:765] (4/8) Epoch 8, batch 1500, train_loss[loss=3.692, NarTop10Accuracy=0.5806, over 6226.00 frames. ], tot_loss[loss=3.675, NarTop10Accuracy=0.5812, over 5972.85 frames. ], batch size: 49, lr: 1.05e-02 +2024-08-06 08:26:56,933 INFO [trainer.py:765] (4/8) Epoch 8, batch 1600, train_loss[loss=3.805, NarTop10Accuracy=0.5472, over 7132.00 frames. ], tot_loss[loss=3.675, NarTop10Accuracy=0.581, over 5965.21 frames. ], batch size: 22, lr: 1.05e-02 +2024-08-06 08:27:23,763 INFO [trainer.py:765] (4/8) Epoch 8, batch 1700, train_loss[loss=3.684, NarTop10Accuracy=0.5843, over 6188.00 frames. ], tot_loss[loss=3.682, NarTop10Accuracy=0.5794, over 5948.16 frames. ], batch size: 13, lr: 1.05e-02 +2024-08-06 08:27:50,462 INFO [trainer.py:765] (4/8) Epoch 8, batch 1800, train_loss[loss=3.874, NarTop10Accuracy=0.5445, over 7171.00 frames. ], tot_loss[loss=3.676, NarTop10Accuracy=0.5813, over 6028.72 frames. ], batch size: 23, lr: 1.04e-02 +2024-08-06 08:28:17,180 INFO [trainer.py:765] (4/8) Epoch 8, batch 1900, train_loss[loss=4.063, NarTop10Accuracy=0.4985, over 5801.00 frames. ], tot_loss[loss=3.674, NarTop10Accuracy=0.5816, over 6053.77 frames. ], batch size: 48, lr: 1.04e-02 +2024-08-06 08:28:25,165 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 08:28:35,290 INFO [trainer.py:811] (4/8) Epoch 8, validation: loss=3.507, NarTop10Accuracy=0.6181, over 1907754.00 frames. +2024-08-06 08:28:35,291 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29101MB +2024-08-06 08:28:35,796 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.304e+02 1.789e+02 1.988e+02 2.230e+02 4.452e+02, threshold=3.975e+02, percent-clipped=0.5 +2024-08-06 08:28:52,984 INFO [trainer.py:765] (4/8) Epoch 8, batch 2000, train_loss[loss=3.845, NarTop10Accuracy=0.5593, over 6283.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.5818, over 6033.50 frames. ], batch size: 49, lr: 1.04e-02 +2024-08-06 08:29:18,485 INFO [trainer.py:765] (4/8) Epoch 8, batch 2100, train_loss[loss=3.206, NarTop10Accuracy=0.6725, over 4896.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5825, over 6019.98 frames. ], batch size: 5, lr: 1.04e-02 +2024-08-06 08:29:43,790 INFO [trainer.py:765] (4/8) Epoch 8, batch 2200, train_loss[loss=4.01, NarTop10Accuracy=0.5166, over 7322.00 frames. ], tot_loss[loss=3.681, NarTop10Accuracy=0.5799, over 6051.96 frames. ], batch size: 30, lr: 1.03e-02 +2024-08-06 08:30:09,134 INFO [trainer.py:765] (4/8) Epoch 8, batch 2300, train_loss[loss=3.609, NarTop10Accuracy=0.6004, over 5734.00 frames. ], tot_loss[loss=3.685, NarTop10Accuracy=0.5791, over 6079.14 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 08:30:33,792 INFO [trainer.py:765] (4/8) Epoch 8, batch 2400, train_loss[loss=3.715, NarTop10Accuracy=0.5866, over 5185.00 frames. ], tot_loss[loss=3.689, NarTop10Accuracy=0.5783, over 5885.99 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 08:30:57,140 INFO [trainer.py:765] (4/8) Epoch 8, batch 2500, train_loss[loss=3.59, NarTop10Accuracy=0.6094, over 5121.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5829, over 5522.74 frames. ], batch size: 6, lr: 1.03e-02 +2024-08-06 08:31:18,599 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 08:32:19,098 INFO [trainer.py:765] (4/8) Epoch 9, batch 100, train_loss[loss=3.833, NarTop10Accuracy=0.5437, over 7165.00 frames. ], tot_loss[loss=3.607, NarTop10Accuracy=0.5979, over 2366.77 frames. ], batch size: 30, lr: 9.71e-03 +2024-08-06 08:32:51,461 INFO [trainer.py:765] (4/8) Epoch 9, batch 200, train_loss[loss=3.585, NarTop10Accuracy=0.5947, over 6889.00 frames. ], tot_loss[loss=3.599, NarTop10Accuracy=0.5983, over 3864.97 frames. ], batch size: 17, lr: 9.69e-03 +2024-08-06 08:33:27,115 INFO [trainer.py:765] (4/8) Epoch 9, batch 300, train_loss[loss=3.661, NarTop10Accuracy=0.573, over 6924.00 frames. ], tot_loss[loss=3.601, NarTop10Accuracy=0.5975, over 4672.82 frames. ], batch size: 22, lr: 9.67e-03 +2024-08-06 08:34:00,964 INFO [trainer.py:765] (4/8) Epoch 9, batch 400, train_loss[loss=3.576, NarTop10Accuracy=0.6016, over 5120.00 frames. ], tot_loss[loss=3.584, NarTop10Accuracy=0.6, over 5125.75 frames. ], batch size: 7, lr: 9.64e-03 +2024-08-06 08:34:32,880 INFO [trainer.py:765] (4/8) Epoch 9, batch 500, train_loss[loss=3.565, NarTop10Accuracy=0.5996, over 6198.00 frames. ], tot_loss[loss=3.572, NarTop10Accuracy=0.6026, over 5390.89 frames. ], batch size: 11, lr: 9.62e-03 +2024-08-06 08:35:07,498 INFO [trainer.py:765] (4/8) Epoch 9, batch 600, train_loss[loss=3.427, NarTop10Accuracy=0.6359, over 5944.00 frames. ], tot_loss[loss=3.58, NarTop10Accuracy=0.601, over 5657.28 frames. ], batch size: 9, lr: 9.60e-03 +2024-08-06 08:35:42,824 INFO [trainer.py:765] (4/8) Epoch 9, batch 700, train_loss[loss=3.726, NarTop10Accuracy=0.5799, over 4956.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.5984, over 5738.84 frames. ], batch size: 6, lr: 9.58e-03 +2024-08-06 08:36:14,821 INFO [trainer.py:765] (4/8) Epoch 9, batch 800, train_loss[loss=3.405, NarTop10Accuracy=0.6446, over 4331.00 frames. ], tot_loss[loss=3.612, NarTop10Accuracy=0.5945, over 5803.61 frames. ], batch size: 5, lr: 9.56e-03 +2024-08-06 08:36:46,455 INFO [trainer.py:765] (4/8) Epoch 9, batch 900, train_loss[loss=3.455, NarTop10Accuracy=0.6197, over 6643.00 frames. ], tot_loss[loss=3.613, NarTop10Accuracy=0.5939, over 5814.23 frames. ], batch size: 14, lr: 9.54e-03 +2024-08-06 08:37:26,565 INFO [trainer.py:765] (4/8) Epoch 9, batch 1000, train_loss[loss=3.471, NarTop10Accuracy=0.6191, over 6247.00 frames. ], tot_loss[loss=3.612, NarTop10Accuracy=0.5937, over 5925.56 frames. ], batch size: 13, lr: 9.52e-03 +2024-08-06 08:37:59,421 INFO [trainer.py:765] (4/8) Epoch 9, batch 1100, train_loss[loss=3.693, NarTop10Accuracy=0.5735, over 6867.00 frames. ], tot_loss[loss=3.629, NarTop10Accuracy=0.5904, over 5954.63 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 08:38:31,995 INFO [trainer.py:765] (4/8) Epoch 9, batch 1200, train_loss[loss=3.797, NarTop10Accuracy=0.5548, over 7102.00 frames. ], tot_loss[loss=3.634, NarTop10Accuracy=0.5892, over 5944.98 frames. ], batch size: 30, lr: 9.48e-03 +2024-08-06 08:39:11,840 INFO [trainer.py:765] (4/8) Epoch 9, batch 1300, train_loss[loss=3.306, NarTop10Accuracy=0.6367, over 5055.00 frames. ], tot_loss[loss=3.643, NarTop10Accuracy=0.5873, over 6002.93 frames. ], batch size: 6, lr: 9.46e-03 +2024-08-06 08:39:27,117 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 08:39:38,196 INFO [trainer.py:811] (4/8) Epoch 9, validation: loss=3.495, NarTop10Accuracy=0.6214, over 1907754.00 frames. +2024-08-06 08:39:38,197 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29101MB +2024-08-06 08:39:38,758 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 1.781e+02 1.970e+02 2.189e+02 6.315e+02, threshold=3.940e+02, percent-clipped=0.6 +2024-08-06 08:39:52,278 INFO [trainer.py:765] (4/8) Epoch 9, batch 1400, train_loss[loss=3.555, NarTop10Accuracy=0.6148, over 6185.00 frames. ], tot_loss[loss=3.626, NarTop10Accuracy=0.5905, over 6026.24 frames. ], batch size: 11, lr: 9.43e-03 +2024-08-06 08:40:22,332 INFO [trainer.py:765] (4/8) Epoch 9, batch 1500, train_loss[loss=3.919, NarTop10Accuracy=0.5401, over 5939.00 frames. ], tot_loss[loss=3.64, NarTop10Accuracy=0.5885, over 5968.42 frames. ], batch size: 49, lr: 9.41e-03 +2024-08-06 08:40:50,368 INFO [trainer.py:765] (4/8) Epoch 9, batch 1600, train_loss[loss=3.723, NarTop10Accuracy=0.5724, over 7221.00 frames. ], tot_loss[loss=3.637, NarTop10Accuracy=0.5886, over 5947.35 frames. ], batch size: 22, lr: 9.39e-03 +2024-08-06 08:41:17,152 INFO [trainer.py:765] (4/8) Epoch 9, batch 1700, train_loss[loss=3.527, NarTop10Accuracy=0.6108, over 6763.00 frames. ], tot_loss[loss=3.647, NarTop10Accuracy=0.5867, over 5936.58 frames. ], batch size: 14, lr: 9.37e-03 +2024-08-06 08:41:43,812 INFO [trainer.py:765] (4/8) Epoch 9, batch 1800, train_loss[loss=3.782, NarTop10Accuracy=0.5621, over 7184.00 frames. ], tot_loss[loss=3.634, NarTop10Accuracy=0.5893, over 5995.46 frames. ], batch size: 22, lr: 9.35e-03 +2024-08-06 08:42:10,496 INFO [trainer.py:765] (4/8) Epoch 9, batch 1900, train_loss[loss=3.805, NarTop10Accuracy=0.5538, over 5738.00 frames. ], tot_loss[loss=3.633, NarTop10Accuracy=0.5895, over 6041.83 frames. ], batch size: 49, lr: 9.33e-03 +2024-08-06 08:42:36,204 INFO [trainer.py:765] (4/8) Epoch 9, batch 2000, train_loss[loss=4.003, NarTop10Accuracy=0.5208, over 5811.00 frames. ], tot_loss[loss=3.642, NarTop10Accuracy=0.5878, over 6003.99 frames. ], batch size: 51, lr: 9.31e-03 +2024-08-06 08:43:01,667 INFO [trainer.py:765] (4/8) Epoch 9, batch 2100, train_loss[loss=3.408, NarTop10Accuracy=0.6329, over 3850.00 frames. ], tot_loss[loss=3.638, NarTop10Accuracy=0.5888, over 6006.71 frames. ], batch size: 4, lr: 9.30e-03 +2024-08-06 08:43:27,178 INFO [trainer.py:765] (4/8) Epoch 9, batch 2200, train_loss[loss=3.569, NarTop10Accuracy=0.6111, over 7448.00 frames. ], tot_loss[loss=3.643, NarTop10Accuracy=0.5879, over 6038.40 frames. ], batch size: 31, lr: 9.28e-03 +2024-08-06 08:43:52,671 INFO [trainer.py:765] (4/8) Epoch 9, batch 2300, train_loss[loss=3.852, NarTop10Accuracy=0.5421, over 5780.00 frames. ], tot_loss[loss=3.657, NarTop10Accuracy=0.5853, over 6068.13 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 08:44:20,550 INFO [trainer.py:765] (4/8) Epoch 9, batch 2400, train_loss[loss=3.833, NarTop10Accuracy=0.5505, over 5162.00 frames. ], tot_loss[loss=3.658, NarTop10Accuracy=0.5853, over 5874.86 frames. ], batch size: 7, lr: 9.24e-03 +2024-08-06 08:44:44,002 INFO [trainer.py:765] (4/8) Epoch 9, batch 2500, train_loss[loss=3.826, NarTop10Accuracy=0.552, over 5017.00 frames. ], tot_loss[loss=3.641, NarTop10Accuracy=0.588, over 5536.46 frames. ], batch size: 6, lr: 9.22e-03 +2024-08-06 08:45:05,299 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 08:46:09,065 INFO [trainer.py:765] (4/8) Epoch 10, batch 100, train_loss[loss=3.468, NarTop10Accuracy=0.6245, over 7670.00 frames. ], tot_loss[loss=3.58, NarTop10Accuracy=0.6004, over 2376.52 frames. ], batch size: 31, lr: 8.75e-03 +2024-08-06 08:46:44,074 INFO [trainer.py:765] (4/8) Epoch 10, batch 200, train_loss[loss=3.585, NarTop10Accuracy=0.6002, over 6880.00 frames. ], tot_loss[loss=3.56, NarTop10Accuracy=0.6054, over 3873.00 frames. ], batch size: 17, lr: 8.73e-03 +2024-08-06 08:47:14,444 INFO [trainer.py:765] (4/8) Epoch 10, batch 300, train_loss[loss=3.661, NarTop10Accuracy=0.5864, over 7273.00 frames. ], tot_loss[loss=3.561, NarTop10Accuracy=0.6053, over 4670.45 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 08:47:46,119 INFO [trainer.py:765] (4/8) Epoch 10, batch 400, train_loss[loss=3.675, NarTop10Accuracy=0.5871, over 5110.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.6023, over 5111.23 frames. ], batch size: 7, lr: 8.70e-03 +2024-08-06 08:48:22,371 INFO [trainer.py:765] (4/8) Epoch 10, batch 500, train_loss[loss=3.487, NarTop10Accuracy=0.6218, over 6079.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6023, over 5386.90 frames. ], batch size: 11, lr: 8.68e-03 +2024-08-06 08:48:53,461 INFO [trainer.py:765] (4/8) Epoch 10, batch 600, train_loss[loss=3.191, NarTop10Accuracy=0.6726, over 5803.00 frames. ], tot_loss[loss=3.579, NarTop10Accuracy=0.6014, over 5654.20 frames. ], batch size: 9, lr: 8.66e-03 +2024-08-06 08:49:26,708 INFO [trainer.py:765] (4/8) Epoch 10, batch 700, train_loss[loss=3.525, NarTop10Accuracy=0.6169, over 5002.00 frames. ], tot_loss[loss=3.589, NarTop10Accuracy=0.5989, over 5736.89 frames. ], batch size: 6, lr: 8.65e-03 +2024-08-06 08:49:49,165 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 08:50:00,983 INFO [trainer.py:811] (4/8) Epoch 10, validation: loss=3.46, NarTop10Accuracy=0.6279, over 1907754.00 frames. +2024-08-06 08:50:00,984 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29735MB +2024-08-06 08:50:01,724 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.353e+02 1.818e+02 1.985e+02 2.213e+02 4.843e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 08:50:09,801 INFO [trainer.py:765] (4/8) Epoch 10, batch 800, train_loss[loss=3.386, NarTop10Accuracy=0.6517, over 5163.00 frames. ], tot_loss[loss=3.587, NarTop10Accuracy=0.5989, over 5793.13 frames. ], batch size: 6, lr: 8.63e-03 +2024-08-06 08:50:42,891 INFO [trainer.py:765] (4/8) Epoch 10, batch 900, train_loss[loss=3.411, NarTop10Accuracy=0.6422, over 6696.00 frames. ], tot_loss[loss=3.573, NarTop10Accuracy=0.6019, over 5828.95 frames. ], batch size: 14, lr: 8.61e-03 +2024-08-06 08:51:18,460 INFO [trainer.py:765] (4/8) Epoch 10, batch 1000, train_loss[loss=4.069, NarTop10Accuracy=0.5068, over 6248.00 frames. ], tot_loss[loss=3.594, NarTop10Accuracy=0.5978, over 5931.04 frames. ], batch size: 13, lr: 8.59e-03 +2024-08-06 08:51:57,362 INFO [trainer.py:765] (4/8) Epoch 10, batch 1100, train_loss[loss=3.317, NarTop10Accuracy=0.6552, over 6979.00 frames. ], tot_loss[loss=3.602, NarTop10Accuracy=0.596, over 5959.78 frames. ], batch size: 17, lr: 8.58e-03 +2024-08-06 08:52:32,048 INFO [trainer.py:765] (4/8) Epoch 10, batch 1200, train_loss[loss=3.639, NarTop10Accuracy=0.5879, over 7340.00 frames. ], tot_loss[loss=3.598, NarTop10Accuracy=0.5969, over 5941.12 frames. ], batch size: 30, lr: 8.56e-03 +2024-08-06 08:53:06,608 INFO [trainer.py:765] (4/8) Epoch 10, batch 1300, train_loss[loss=3.746, NarTop10Accuracy=0.5781, over 4315.00 frames. ], tot_loss[loss=3.595, NarTop10Accuracy=0.5971, over 6023.90 frames. ], batch size: 5, lr: 8.54e-03 +2024-08-06 08:53:46,880 INFO [trainer.py:765] (4/8) Epoch 10, batch 1400, train_loss[loss=3.566, NarTop10Accuracy=0.616, over 6027.00 frames. ], tot_loss[loss=3.612, NarTop10Accuracy=0.5938, over 6034.16 frames. ], batch size: 11, lr: 8.53e-03 +2024-08-06 08:54:17,501 INFO [trainer.py:765] (4/8) Epoch 10, batch 1500, train_loss[loss=3.653, NarTop10Accuracy=0.5888, over 6288.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.5983, over 5977.00 frames. ], batch size: 50, lr: 8.51e-03 +2024-08-06 08:54:45,526 INFO [trainer.py:765] (4/8) Epoch 10, batch 1600, train_loss[loss=3.54, NarTop10Accuracy=0.6155, over 7212.00 frames. ], tot_loss[loss=3.606, NarTop10Accuracy=0.5958, over 5961.15 frames. ], batch size: 22, lr: 8.49e-03 +2024-08-06 08:55:12,300 INFO [trainer.py:765] (4/8) Epoch 10, batch 1700, train_loss[loss=3.411, NarTop10Accuracy=0.64, over 6141.00 frames. ], tot_loss[loss=3.601, NarTop10Accuracy=0.5967, over 5932.55 frames. ], batch size: 13, lr: 8.48e-03 +2024-08-06 08:55:41,989 INFO [trainer.py:765] (4/8) Epoch 10, batch 1800, train_loss[loss=3.483, NarTop10Accuracy=0.6292, over 7053.00 frames. ], tot_loss[loss=3.594, NarTop10Accuracy=0.598, over 5992.62 frames. ], batch size: 22, lr: 8.46e-03 +2024-08-06 08:56:08,572 INFO [trainer.py:765] (4/8) Epoch 10, batch 1900, train_loss[loss=4.11, NarTop10Accuracy=0.4986, over 6288.00 frames. ], tot_loss[loss=3.606, NarTop10Accuracy=0.5956, over 6036.82 frames. ], batch size: 49, lr: 8.45e-03 +2024-08-06 08:56:34,287 INFO [trainer.py:765] (4/8) Epoch 10, batch 2000, train_loss[loss=3.591, NarTop10Accuracy=0.5975, over 6352.00 frames. ], tot_loss[loss=3.599, NarTop10Accuracy=0.5964, over 6008.32 frames. ], batch size: 49, lr: 8.43e-03 +2024-08-06 08:56:59,751 INFO [trainer.py:765] (4/8) Epoch 10, batch 2100, train_loss[loss=3.579, NarTop10Accuracy=0.59, over 3973.00 frames. ], tot_loss[loss=3.604, NarTop10Accuracy=0.5958, over 5997.91 frames. ], batch size: 4, lr: 8.41e-03 +2024-08-06 08:57:25,280 INFO [trainer.py:765] (4/8) Epoch 10, batch 2200, train_loss[loss=3.766, NarTop10Accuracy=0.5571, over 7369.00 frames. ], tot_loss[loss=3.605, NarTop10Accuracy=0.5958, over 6046.63 frames. ], batch size: 31, lr: 8.40e-03 +2024-08-06 08:57:50,682 INFO [trainer.py:765] (4/8) Epoch 10, batch 2300, train_loss[loss=3.382, NarTop10Accuracy=0.6542, over 5807.00 frames. ], tot_loss[loss=3.612, NarTop10Accuracy=0.5943, over 6069.97 frames. ], batch size: 9, lr: 8.38e-03 +2024-08-06 08:58:15,344 INFO [trainer.py:765] (4/8) Epoch 10, batch 2400, train_loss[loss=3.368, NarTop10Accuracy=0.6569, over 5153.00 frames. ], tot_loss[loss=3.618, NarTop10Accuracy=0.5929, over 5857.28 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 08:58:38,809 INFO [trainer.py:765] (4/8) Epoch 10, batch 2500, train_loss[loss=3.609, NarTop10Accuracy=0.5912, over 5005.00 frames. ], tot_loss[loss=3.602, NarTop10Accuracy=0.5954, over 5529.78 frames. ], batch size: 6, lr: 8.35e-03 +2024-08-06 08:59:00,334 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 09:00:03,680 INFO [trainer.py:765] (4/8) Epoch 11, batch 100, train_loss[loss=3.449, NarTop10Accuracy=0.6233, over 6883.00 frames. ], tot_loss[loss=3.516, NarTop10Accuracy=0.6148, over 2362.69 frames. ], batch size: 30, lr: 7.96e-03 +2024-08-06 09:00:30,915 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 09:00:41,217 INFO [trainer.py:811] (4/8) Epoch 11, validation: loss=3.404, NarTop10Accuracy=0.6396, over 1907754.00 frames. +2024-08-06 09:00:41,217 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29735MB +2024-08-06 09:00:41,774 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 1.800e+02 1.980e+02 2.200e+02 4.491e+02, threshold=3.959e+02, percent-clipped=0.2 +2024-08-06 09:00:46,859 INFO [trainer.py:765] (4/8) Epoch 11, batch 200, train_loss[loss=3.771, NarTop10Accuracy=0.5525, over 6891.00 frames. ], tot_loss[loss=3.513, NarTop10Accuracy=0.6148, over 3874.48 frames. ], batch size: 17, lr: 7.94e-03 +2024-08-06 09:01:17,854 INFO [trainer.py:765] (4/8) Epoch 11, batch 300, train_loss[loss=3.385, NarTop10Accuracy=0.6383, over 7047.00 frames. ], tot_loss[loss=3.523, NarTop10Accuracy=0.6128, over 4687.73 frames. ], batch size: 22, lr: 7.93e-03 +2024-08-06 09:01:50,534 INFO [trainer.py:765] (4/8) Epoch 11, batch 400, train_loss[loss=3.08, NarTop10Accuracy=0.692, over 5107.00 frames. ], tot_loss[loss=3.519, NarTop10Accuracy=0.6134, over 5133.38 frames. ], batch size: 7, lr: 7.91e-03 +2024-08-06 09:02:21,240 INFO [trainer.py:765] (4/8) Epoch 11, batch 500, train_loss[loss=3.474, NarTop10Accuracy=0.6238, over 6239.00 frames. ], tot_loss[loss=3.524, NarTop10Accuracy=0.6126, over 5400.33 frames. ], batch size: 11, lr: 7.90e-03 +2024-08-06 09:03:01,742 INFO [trainer.py:765] (4/8) Epoch 11, batch 600, train_loss[loss=3.668, NarTop10Accuracy=0.5842, over 5644.00 frames. ], tot_loss[loss=3.541, NarTop10Accuracy=0.6097, over 5659.50 frames. ], batch size: 9, lr: 7.88e-03 +2024-08-06 09:03:38,236 INFO [trainer.py:765] (4/8) Epoch 11, batch 700, train_loss[loss=3.345, NarTop10Accuracy=0.6499, over 5072.00 frames. ], tot_loss[loss=3.543, NarTop10Accuracy=0.6088, over 5750.87 frames. ], batch size: 6, lr: 7.87e-03 +2024-08-06 09:04:10,756 INFO [trainer.py:765] (4/8) Epoch 11, batch 800, train_loss[loss=3.633, NarTop10Accuracy=0.5994, over 4857.00 frames. ], tot_loss[loss=3.561, NarTop10Accuracy=0.6052, over 5802.46 frames. ], batch size: 6, lr: 7.86e-03 +2024-08-06 09:04:50,084 INFO [trainer.py:765] (4/8) Epoch 11, batch 900, train_loss[loss=3.461, NarTop10Accuracy=0.6137, over 6663.00 frames. ], tot_loss[loss=3.556, NarTop10Accuracy=0.6053, over 5829.71 frames. ], batch size: 14, lr: 7.84e-03 +2024-08-06 09:05:27,013 INFO [trainer.py:765] (4/8) Epoch 11, batch 1000, train_loss[loss=3.499, NarTop10Accuracy=0.6078, over 6220.00 frames. ], tot_loss[loss=3.563, NarTop10Accuracy=0.6041, over 5929.03 frames. ], batch size: 13, lr: 7.83e-03 +2024-08-06 09:06:00,350 INFO [trainer.py:765] (4/8) Epoch 11, batch 1100, train_loss[loss=3.484, NarTop10Accuracy=0.6232, over 6822.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6029, over 5969.52 frames. ], batch size: 17, lr: 7.81e-03 +2024-08-06 09:06:40,946 INFO [trainer.py:765] (4/8) Epoch 11, batch 1200, train_loss[loss=3.645, NarTop10Accuracy=0.5806, over 7330.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6025, over 5956.02 frames. ], batch size: 31, lr: 7.80e-03 +2024-08-06 09:07:15,494 INFO [trainer.py:765] (4/8) Epoch 11, batch 1300, train_loss[loss=3.34, NarTop10Accuracy=0.6443, over 4915.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.6003, over 6022.19 frames. ], batch size: 6, lr: 7.79e-03 +2024-08-06 09:07:47,629 INFO [trainer.py:765] (4/8) Epoch 11, batch 1400, train_loss[loss=3.473, NarTop10Accuracy=0.6139, over 6137.00 frames. ], tot_loss[loss=3.585, NarTop10Accuracy=0.5991, over 6041.99 frames. ], batch size: 11, lr: 7.77e-03 +2024-08-06 09:08:18,987 INFO [trainer.py:765] (4/8) Epoch 11, batch 1500, train_loss[loss=3.637, NarTop10Accuracy=0.5911, over 6153.00 frames. ], tot_loss[loss=3.588, NarTop10Accuracy=0.5989, over 5972.78 frames. ], batch size: 51, lr: 7.76e-03 +2024-08-06 09:08:47,149 INFO [trainer.py:765] (4/8) Epoch 11, batch 1600, train_loss[loss=3.576, NarTop10Accuracy=0.6149, over 7263.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6, over 5942.61 frames. ], batch size: 22, lr: 7.74e-03 +2024-08-06 09:09:13,951 INFO [trainer.py:765] (4/8) Epoch 11, batch 1700, train_loss[loss=3.523, NarTop10Accuracy=0.6035, over 6258.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.5998, over 5929.19 frames. ], batch size: 13, lr: 7.73e-03 +2024-08-06 09:09:40,733 INFO [trainer.py:765] (4/8) Epoch 11, batch 1800, train_loss[loss=3.787, NarTop10Accuracy=0.563, over 7188.00 frames. ], tot_loss[loss=3.586, NarTop10Accuracy=0.5996, over 5996.29 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 09:10:07,342 INFO [trainer.py:765] (4/8) Epoch 11, batch 1900, train_loss[loss=3.72, NarTop10Accuracy=0.5759, over 5436.00 frames. ], tot_loss[loss=3.599, NarTop10Accuracy=0.5971, over 6018.79 frames. ], batch size: 48, lr: 7.70e-03 +2024-08-06 09:10:33,040 INFO [trainer.py:765] (4/8) Epoch 11, batch 2000, train_loss[loss=3.602, NarTop10Accuracy=0.5965, over 6473.00 frames. ], tot_loss[loss=3.596, NarTop10Accuracy=0.598, over 6000.83 frames. ], batch size: 49, lr: 7.69e-03 +2024-08-06 09:10:58,442 INFO [trainer.py:765] (4/8) Epoch 11, batch 2100, train_loss[loss=3.366, NarTop10Accuracy=0.6441, over 4825.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.602, over 5980.11 frames. ], batch size: 5, lr: 7.68e-03 +2024-08-06 09:11:20,709 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 09:11:31,457 INFO [trainer.py:811] (4/8) Epoch 11, validation: loss=3.372, NarTop10Accuracy=0.6462, over 1907754.00 frames. +2024-08-06 09:11:31,458 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29735MB +2024-08-06 09:11:31,930 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.800e+02 1.966e+02 2.160e+02 4.000e+02, threshold=3.933e+02, percent-clipped=0.1 +2024-08-06 09:11:34,519 INFO [trainer.py:765] (4/8) Epoch 11, batch 2200, train_loss[loss=3.577, NarTop10Accuracy=0.591, over 7263.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6006, over 6025.90 frames. ], batch size: 31, lr: 7.66e-03 +2024-08-06 09:11:59,940 INFO [trainer.py:765] (4/8) Epoch 11, batch 2300, train_loss[loss=3.45, NarTop10Accuracy=0.6221, over 5709.00 frames. ], tot_loss[loss=3.587, NarTop10Accuracy=0.5999, over 6075.67 frames. ], batch size: 9, lr: 7.65e-03 +2024-08-06 09:12:24,696 INFO [trainer.py:765] (4/8) Epoch 11, batch 2400, train_loss[loss=3.656, NarTop10Accuracy=0.5912, over 5076.00 frames. ], tot_loss[loss=3.594, NarTop10Accuracy=0.5981, over 5892.69 frames. ], batch size: 7, lr: 7.64e-03 +2024-08-06 09:12:47,879 INFO [trainer.py:765] (4/8) Epoch 11, batch 2500, train_loss[loss=3.697, NarTop10Accuracy=0.584, over 4908.00 frames. ], tot_loss[loss=3.573, NarTop10Accuracy=0.602, over 5552.35 frames. ], batch size: 6, lr: 7.62e-03 +2024-08-06 09:13:09,006 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 09:14:12,278 INFO [trainer.py:765] (4/8) Epoch 12, batch 100, train_loss[loss=3.536, NarTop10Accuracy=0.6216, over 7383.00 frames. ], tot_loss[loss=3.532, NarTop10Accuracy=0.6122, over 2381.64 frames. ], batch size: 30, lr: 7.29e-03 +2024-08-06 09:14:48,095 INFO [trainer.py:765] (4/8) Epoch 12, batch 200, train_loss[loss=3.266, NarTop10Accuracy=0.6692, over 6881.00 frames. ], tot_loss[loss=3.507, NarTop10Accuracy=0.6172, over 3879.90 frames. ], batch size: 17, lr: 7.28e-03 +2024-08-06 09:15:20,021 INFO [trainer.py:765] (4/8) Epoch 12, batch 300, train_loss[loss=3.448, NarTop10Accuracy=0.6375, over 7204.00 frames. ], tot_loss[loss=3.505, NarTop10Accuracy=0.6172, over 4686.75 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 09:15:52,633 INFO [trainer.py:765] (4/8) Epoch 12, batch 400, train_loss[loss=3.328, NarTop10Accuracy=0.6459, over 5672.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.6142, over 5129.76 frames. ], batch size: 8, lr: 7.25e-03 +2024-08-06 09:16:26,432 INFO [trainer.py:765] (4/8) Epoch 12, batch 500, train_loss[loss=3.471, NarTop10Accuracy=0.6239, over 6197.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6143, over 5417.07 frames. ], batch size: 11, lr: 7.24e-03 +2024-08-06 09:16:59,238 INFO [trainer.py:765] (4/8) Epoch 12, batch 600, train_loss[loss=3.281, NarTop10Accuracy=0.6588, over 5788.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.61, over 5693.05 frames. ], batch size: 9, lr: 7.23e-03 +2024-08-06 09:17:36,317 INFO [trainer.py:765] (4/8) Epoch 12, batch 700, train_loss[loss=3.612, NarTop10Accuracy=0.6025, over 4945.00 frames. ], tot_loss[loss=3.527, NarTop10Accuracy=0.6112, over 5743.32 frames. ], batch size: 6, lr: 7.22e-03 +2024-08-06 09:18:07,752 INFO [trainer.py:765] (4/8) Epoch 12, batch 800, train_loss[loss=3.534, NarTop10Accuracy=0.6038, over 5032.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.6128, over 5792.28 frames. ], batch size: 6, lr: 7.21e-03 +2024-08-06 09:18:43,779 INFO [trainer.py:765] (4/8) Epoch 12, batch 900, train_loss[loss=3.553, NarTop10Accuracy=0.611, over 6268.00 frames. ], tot_loss[loss=3.527, NarTop10Accuracy=0.6111, over 5812.81 frames. ], batch size: 13, lr: 7.19e-03 +2024-08-06 09:19:17,689 INFO [trainer.py:765] (4/8) Epoch 12, batch 1000, train_loss[loss=3.449, NarTop10Accuracy=0.6215, over 6200.00 frames. ], tot_loss[loss=3.526, NarTop10Accuracy=0.6115, over 5898.43 frames. ], batch size: 13, lr: 7.18e-03 +2024-08-06 09:19:52,426 INFO [trainer.py:765] (4/8) Epoch 12, batch 1100, train_loss[loss=3.749, NarTop10Accuracy=0.5792, over 6909.00 frames. ], tot_loss[loss=3.535, NarTop10Accuracy=0.6099, over 5939.51 frames. ], batch size: 17, lr: 7.17e-03 +2024-08-06 09:20:29,442 INFO [trainer.py:765] (4/8) Epoch 12, batch 1200, train_loss[loss=3.601, NarTop10Accuracy=0.6036, over 7342.00 frames. ], tot_loss[loss=3.542, NarTop10Accuracy=0.6086, over 5942.35 frames. ], batch size: 31, lr: 7.16e-03 +2024-08-06 09:21:02,826 INFO [trainer.py:765] (4/8) Epoch 12, batch 1300, train_loss[loss=3.826, NarTop10Accuracy=0.5516, over 5005.00 frames. ], tot_loss[loss=3.553, NarTop10Accuracy=0.6061, over 6007.03 frames. ], batch size: 6, lr: 7.15e-03 +2024-08-06 09:21:36,980 INFO [trainer.py:765] (4/8) Epoch 12, batch 1400, train_loss[loss=3.287, NarTop10Accuracy=0.6624, over 6503.00 frames. ], tot_loss[loss=3.557, NarTop10Accuracy=0.6054, over 6030.38 frames. ], batch size: 11, lr: 7.13e-03 +2024-08-06 09:22:09,919 INFO [trainer.py:765] (4/8) Epoch 12, batch 1500, train_loss[loss=3.624, NarTop10Accuracy=0.598, over 5860.00 frames. ], tot_loss[loss=3.554, NarTop10Accuracy=0.6058, over 5960.70 frames. ], batch size: 50, lr: 7.12e-03 +2024-08-06 09:22:38,026 INFO [trainer.py:765] (4/8) Epoch 12, batch 1600, train_loss[loss=3.608, NarTop10Accuracy=0.596, over 7034.00 frames. ], tot_loss[loss=3.558, NarTop10Accuracy=0.6049, over 5942.78 frames. ], batch size: 22, lr: 7.11e-03 +2024-08-06 09:22:39,858 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 09:22:49,889 INFO [trainer.py:811] (4/8) Epoch 12, validation: loss=3.364, NarTop10Accuracy=0.6481, over 1907754.00 frames. +2024-08-06 09:22:49,890 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29735MB +2024-08-06 09:22:50,413 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.796e+02 1.978e+02 2.176e+02 4.603e+02, threshold=3.957e+02, percent-clipped=0.2 +2024-08-06 09:23:14,786 INFO [trainer.py:765] (4/8) Epoch 12, batch 1700, train_loss[loss=3.63, NarTop10Accuracy=0.589, over 6281.00 frames. ], tot_loss[loss=3.566, NarTop10Accuracy=0.6035, over 5921.33 frames. ], batch size: 13, lr: 7.10e-03 +2024-08-06 09:23:41,387 INFO [trainer.py:765] (4/8) Epoch 12, batch 1800, train_loss[loss=3.343, NarTop10Accuracy=0.6539, over 7037.00 frames. ], tot_loss[loss=3.549, NarTop10Accuracy=0.6068, over 6002.86 frames. ], batch size: 22, lr: 7.09e-03 +2024-08-06 09:24:07,957 INFO [trainer.py:765] (4/8) Epoch 12, batch 1900, train_loss[loss=3.635, NarTop10Accuracy=0.59, over 5748.00 frames. ], tot_loss[loss=3.557, NarTop10Accuracy=0.6055, over 6022.76 frames. ], batch size: 49, lr: 7.08e-03 +2024-08-06 09:24:33,619 INFO [trainer.py:765] (4/8) Epoch 12, batch 2000, train_loss[loss=3.626, NarTop10Accuracy=0.5931, over 6273.00 frames. ], tot_loss[loss=3.564, NarTop10Accuracy=0.604, over 5996.46 frames. ], batch size: 48, lr: 7.07e-03 +2024-08-06 09:24:59,038 INFO [trainer.py:765] (4/8) Epoch 12, batch 2100, train_loss[loss=3.81, NarTop10Accuracy=0.5561, over 3941.00 frames. ], tot_loss[loss=3.557, NarTop10Accuracy=0.6051, over 5973.20 frames. ], batch size: 4, lr: 7.05e-03 +2024-08-06 09:25:24,509 INFO [trainer.py:765] (4/8) Epoch 12, batch 2200, train_loss[loss=3.435, NarTop10Accuracy=0.6287, over 7305.00 frames. ], tot_loss[loss=3.561, NarTop10Accuracy=0.6044, over 6012.23 frames. ], batch size: 31, lr: 7.04e-03 +2024-08-06 09:25:49,926 INFO [trainer.py:765] (4/8) Epoch 12, batch 2300, train_loss[loss=3.686, NarTop10Accuracy=0.5701, over 5793.00 frames. ], tot_loss[loss=3.566, NarTop10Accuracy=0.6034, over 6034.81 frames. ], batch size: 9, lr: 7.03e-03 +2024-08-06 09:26:14,656 INFO [trainer.py:765] (4/8) Epoch 12, batch 2400, train_loss[loss=3.259, NarTop10Accuracy=0.6634, over 5197.00 frames. ], tot_loss[loss=3.567, NarTop10Accuracy=0.6031, over 5840.30 frames. ], batch size: 7, lr: 7.02e-03 +2024-08-06 09:26:38,154 INFO [trainer.py:765] (4/8) Epoch 12, batch 2500, train_loss[loss=3.272, NarTop10Accuracy=0.6296, over 5113.00 frames. ], tot_loss[loss=3.552, NarTop10Accuracy=0.6056, over 5515.99 frames. ], batch size: 6, lr: 7.01e-03 +2024-08-06 09:26:59,511 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 09:28:03,610 INFO [trainer.py:765] (4/8) Epoch 13, batch 100, train_loss[loss=3.588, NarTop10Accuracy=0.608, over 7329.00 frames. ], tot_loss[loss=3.529, NarTop10Accuracy=0.613, over 2366.36 frames. ], batch size: 30, lr: 6.72e-03 +2024-08-06 09:28:36,905 INFO [trainer.py:765] (4/8) Epoch 13, batch 200, train_loss[loss=3.211, NarTop10Accuracy=0.6793, over 6742.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.6171, over 3853.22 frames. ], batch size: 17, lr: 6.71e-03 +2024-08-06 09:29:07,170 INFO [trainer.py:765] (4/8) Epoch 13, batch 300, train_loss[loss=3.444, NarTop10Accuracy=0.6389, over 7133.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6177, over 4655.04 frames. ], batch size: 22, lr: 6.70e-03 +2024-08-06 09:29:41,038 INFO [trainer.py:765] (4/8) Epoch 13, batch 400, train_loss[loss=3.401, NarTop10Accuracy=0.6449, over 5207.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6203, over 5134.53 frames. ], batch size: 7, lr: 6.69e-03 +2024-08-06 09:30:13,730 INFO [trainer.py:765] (4/8) Epoch 13, batch 500, train_loss[loss=3.563, NarTop10Accuracy=0.6021, over 6131.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6202, over 5410.57 frames. ], batch size: 11, lr: 6.68e-03 +2024-08-06 09:30:47,198 INFO [trainer.py:765] (4/8) Epoch 13, batch 600, train_loss[loss=3.738, NarTop10Accuracy=0.5808, over 5768.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6184, over 5675.42 frames. ], batch size: 9, lr: 6.67e-03 +2024-08-06 09:31:23,820 INFO [trainer.py:765] (4/8) Epoch 13, batch 700, train_loss[loss=3.416, NarTop10Accuracy=0.6054, over 4245.00 frames. ], tot_loss[loss=3.509, NarTop10Accuracy=0.616, over 5724.17 frames. ], batch size: 5, lr: 6.66e-03 +2024-08-06 09:31:58,208 INFO [trainer.py:765] (4/8) Epoch 13, batch 800, train_loss[loss=3.367, NarTop10Accuracy=0.6334, over 4956.00 frames. ], tot_loss[loss=3.508, NarTop10Accuracy=0.6159, over 5792.30 frames. ], batch size: 6, lr: 6.65e-03 +2024-08-06 09:32:29,193 INFO [trainer.py:765] (4/8) Epoch 13, batch 900, train_loss[loss=3.128, NarTop10Accuracy=0.6931, over 6243.00 frames. ], tot_loss[loss=3.51, NarTop10Accuracy=0.6151, over 5804.03 frames. ], batch size: 13, lr: 6.64e-03 +2024-08-06 09:33:03,133 INFO [trainer.py:765] (4/8) Epoch 13, batch 1000, train_loss[loss=3.699, NarTop10Accuracy=0.5751, over 6516.00 frames. ], tot_loss[loss=3.517, NarTop10Accuracy=0.6137, over 5912.69 frames. ], batch size: 14, lr: 6.63e-03 +2024-08-06 09:33:14,218 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 09:33:24,525 INFO [trainer.py:811] (4/8) Epoch 13, validation: loss=3.389, NarTop10Accuracy=0.6428, over 1907754.00 frames. +2024-08-06 09:33:24,526 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29735MB +2024-08-06 09:33:25,132 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.457e+02 1.794e+02 1.964e+02 2.145e+02 3.608e+02, threshold=3.929e+02, percent-clipped=0.0 +2024-08-06 09:33:51,714 INFO [trainer.py:765] (4/8) Epoch 13, batch 1100, train_loss[loss=3.767, NarTop10Accuracy=0.5599, over 6907.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.6103, over 5952.77 frames. ], batch size: 17, lr: 6.62e-03 +2024-08-06 09:34:25,485 INFO [trainer.py:765] (4/8) Epoch 13, batch 1200, train_loss[loss=3.486, NarTop10Accuracy=0.6207, over 6857.00 frames. ], tot_loss[loss=3.525, NarTop10Accuracy=0.6117, over 5934.49 frames. ], batch size: 31, lr: 6.61e-03 +2024-08-06 09:35:05,084 INFO [trainer.py:765] (4/8) Epoch 13, batch 1300, train_loss[loss=3.477, NarTop10Accuracy=0.6112, over 5087.00 frames. ], tot_loss[loss=3.526, NarTop10Accuracy=0.6115, over 6017.46 frames. ], batch size: 6, lr: 6.60e-03 +2024-08-06 09:35:36,405 INFO [trainer.py:765] (4/8) Epoch 13, batch 1400, train_loss[loss=3.486, NarTop10Accuracy=0.6292, over 6110.00 frames. ], tot_loss[loss=3.536, NarTop10Accuracy=0.6099, over 6028.47 frames. ], batch size: 11, lr: 6.59e-03 +2024-08-06 09:36:07,320 INFO [trainer.py:765] (4/8) Epoch 13, batch 1500, train_loss[loss=3.773, NarTop10Accuracy=0.563, over 6182.00 frames. ], tot_loss[loss=3.543, NarTop10Accuracy=0.608, over 5971.17 frames. ], batch size: 50, lr: 6.58e-03 +2024-08-06 09:36:35,389 INFO [trainer.py:765] (4/8) Epoch 13, batch 1600, train_loss[loss=3.711, NarTop10Accuracy=0.5661, over 7292.00 frames. ], tot_loss[loss=3.546, NarTop10Accuracy=0.6078, over 5967.71 frames. ], batch size: 22, lr: 6.57e-03 +2024-08-06 09:37:02,144 INFO [trainer.py:765] (4/8) Epoch 13, batch 1700, train_loss[loss=3.596, NarTop10Accuracy=0.5895, over 6179.00 frames. ], tot_loss[loss=3.543, NarTop10Accuracy=0.6081, over 5948.24 frames. ], batch size: 13, lr: 6.56e-03 +2024-08-06 09:37:28,778 INFO [trainer.py:765] (4/8) Epoch 13, batch 1800, train_loss[loss=3.294, NarTop10Accuracy=0.6596, over 7201.00 frames. ], tot_loss[loss=3.539, NarTop10Accuracy=0.6096, over 6013.59 frames. ], batch size: 22, lr: 6.55e-03 +2024-08-06 09:37:55,386 INFO [trainer.py:765] (4/8) Epoch 13, batch 1900, train_loss[loss=3.472, NarTop10Accuracy=0.628, over 6435.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.6084, over 6044.50 frames. ], batch size: 49, lr: 6.54e-03 +2024-08-06 09:38:21,122 INFO [trainer.py:765] (4/8) Epoch 13, batch 2000, train_loss[loss=3.654, NarTop10Accuracy=0.5828, over 6165.00 frames. ], tot_loss[loss=3.542, NarTop10Accuracy=0.6087, over 6034.40 frames. ], batch size: 51, lr: 6.53e-03 +2024-08-06 09:38:49,691 INFO [trainer.py:765] (4/8) Epoch 13, batch 2100, train_loss[loss=3.403, NarTop10Accuracy=0.6409, over 4840.00 frames. ], tot_loss[loss=3.535, NarTop10Accuracy=0.6103, over 6006.07 frames. ], batch size: 5, lr: 6.52e-03 +2024-08-06 09:39:15,107 INFO [trainer.py:765] (4/8) Epoch 13, batch 2200, train_loss[loss=3.645, NarTop10Accuracy=0.5852, over 7484.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6088, over 6030.34 frames. ], batch size: 31, lr: 6.51e-03 +2024-08-06 09:39:40,618 INFO [trainer.py:765] (4/8) Epoch 13, batch 2300, train_loss[loss=3.43, NarTop10Accuracy=0.6334, over 5672.00 frames. ], tot_loss[loss=3.547, NarTop10Accuracy=0.6076, over 6069.83 frames. ], batch size: 9, lr: 6.50e-03 +2024-08-06 09:40:05,343 INFO [trainer.py:765] (4/8) Epoch 13, batch 2400, train_loss[loss=3.346, NarTop10Accuracy=0.6499, over 4988.00 frames. ], tot_loss[loss=3.559, NarTop10Accuracy=0.6048, over 5883.71 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 09:40:28,767 INFO [trainer.py:765] (4/8) Epoch 13, batch 2500, train_loss[loss=3.271, NarTop10Accuracy=0.6596, over 5005.00 frames. ], tot_loss[loss=3.529, NarTop10Accuracy=0.6109, over 5545.68 frames. ], batch size: 6, lr: 6.48e-03 +2024-08-06 09:40:49,871 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 09:41:48,980 INFO [trainer.py:765] (4/8) Epoch 14, batch 100, train_loss[loss=3.43, NarTop10Accuracy=0.6306, over 7159.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6203, over 2366.91 frames. ], batch size: 30, lr: 6.24e-03 +2024-08-06 09:42:22,937 INFO [trainer.py:765] (4/8) Epoch 14, batch 200, train_loss[loss=3.646, NarTop10Accuracy=0.5927, over 6841.00 frames. ], tot_loss[loss=3.466, NarTop10Accuracy=0.6252, over 3861.36 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 09:42:58,414 INFO [trainer.py:765] (4/8) Epoch 14, batch 300, train_loss[loss=3.843, NarTop10Accuracy=0.5426, over 7070.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6201, over 4672.02 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 09:43:30,439 INFO [trainer.py:765] (4/8) Epoch 14, batch 400, train_loss[loss=3.126, NarTop10Accuracy=0.6908, over 5116.00 frames. ], tot_loss[loss=3.484, NarTop10Accuracy=0.6209, over 5134.18 frames. ], batch size: 7, lr: 6.21e-03 +2024-08-06 09:43:42,487 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 09:43:53,651 INFO [trainer.py:811] (4/8) Epoch 14, validation: loss=3.321, NarTop10Accuracy=0.6566, over 1907754.00 frames. +2024-08-06 09:43:53,652 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 09:43:54,212 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.805e+02 1.968e+02 2.158e+02 4.264e+02, threshold=3.936e+02, percent-clipped=0.2 +2024-08-06 09:44:11,700 INFO [trainer.py:765] (4/8) Epoch 14, batch 500, train_loss[loss=3.514, NarTop10Accuracy=0.62, over 6118.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.623, over 5408.01 frames. ], batch size: 11, lr: 6.20e-03 +2024-08-06 09:44:47,166 INFO [trainer.py:765] (4/8) Epoch 14, batch 600, train_loss[loss=3.677, NarTop10Accuracy=0.5802, over 5887.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6222, over 5666.42 frames. ], batch size: 9, lr: 6.19e-03 +2024-08-06 09:45:19,804 INFO [trainer.py:765] (4/8) Epoch 14, batch 700, train_loss[loss=3.727, NarTop10Accuracy=0.5679, over 5171.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6241, over 5733.60 frames. ], batch size: 6, lr: 6.18e-03 +2024-08-06 09:45:58,435 INFO [trainer.py:765] (4/8) Epoch 14, batch 800, train_loss[loss=3.152, NarTop10Accuracy=0.6855, over 5054.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6204, over 5800.85 frames. ], batch size: 6, lr: 6.17e-03 +2024-08-06 09:46:35,420 INFO [trainer.py:765] (4/8) Epoch 14, batch 900, train_loss[loss=3.816, NarTop10Accuracy=0.5583, over 6351.00 frames. ], tot_loss[loss=3.48, NarTop10Accuracy=0.6211, over 5826.05 frames. ], batch size: 13, lr: 6.17e-03 +2024-08-06 09:47:08,399 INFO [trainer.py:765] (4/8) Epoch 14, batch 1000, train_loss[loss=3.522, NarTop10Accuracy=0.6218, over 6239.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6192, over 5918.60 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 09:47:47,663 INFO [trainer.py:765] (4/8) Epoch 14, batch 1100, train_loss[loss=3.384, NarTop10Accuracy=0.6471, over 6806.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6164, over 5947.68 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 09:48:23,500 INFO [trainer.py:765] (4/8) Epoch 14, batch 1200, train_loss[loss=3.39, NarTop10Accuracy=0.6413, over 7682.00 frames. ], tot_loss[loss=3.496, NarTop10Accuracy=0.6173, over 5943.76 frames. ], batch size: 31, lr: 6.14e-03 +2024-08-06 09:48:57,971 INFO [trainer.py:765] (4/8) Epoch 14, batch 1300, train_loss[loss=3.414, NarTop10Accuracy=0.623, over 5143.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6181, over 6018.40 frames. ], batch size: 6, lr: 6.13e-03 +2024-08-06 09:49:30,235 INFO [trainer.py:765] (4/8) Epoch 14, batch 1400, train_loss[loss=3.455, NarTop10Accuracy=0.6292, over 6152.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.6145, over 6050.41 frames. ], batch size: 11, lr: 6.12e-03 +2024-08-06 09:50:07,531 INFO [trainer.py:765] (4/8) Epoch 14, batch 1500, train_loss[loss=3.608, NarTop10Accuracy=0.5991, over 6051.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6138, over 5970.33 frames. ], batch size: 49, lr: 6.11e-03 +2024-08-06 09:50:35,638 INFO [trainer.py:765] (4/8) Epoch 14, batch 1600, train_loss[loss=3.412, NarTop10Accuracy=0.6334, over 7013.00 frames. ], tot_loss[loss=3.507, NarTop10Accuracy=0.615, over 5947.97 frames. ], batch size: 22, lr: 6.10e-03 +2024-08-06 09:51:02,378 INFO [trainer.py:765] (4/8) Epoch 14, batch 1700, train_loss[loss=3.197, NarTop10Accuracy=0.67, over 6227.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6168, over 5926.59 frames. ], batch size: 13, lr: 6.10e-03 +2024-08-06 09:51:28,994 INFO [trainer.py:765] (4/8) Epoch 14, batch 1800, train_loss[loss=3.693, NarTop10Accuracy=0.5757, over 7088.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6167, over 5992.25 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 09:51:55,729 INFO [trainer.py:765] (4/8) Epoch 14, batch 1900, train_loss[loss=3.848, NarTop10Accuracy=0.5544, over 6141.00 frames. ], tot_loss[loss=3.527, NarTop10Accuracy=0.6117, over 6049.98 frames. ], batch size: 49, lr: 6.08e-03 +2024-08-06 09:52:21,503 INFO [trainer.py:765] (4/8) Epoch 14, batch 2000, train_loss[loss=3.632, NarTop10Accuracy=0.6, over 6291.00 frames. ], tot_loss[loss=3.535, NarTop10Accuracy=0.61, over 6014.11 frames. ], batch size: 48, lr: 6.07e-03 +2024-08-06 09:52:47,011 INFO [trainer.py:765] (4/8) Epoch 14, batch 2100, train_loss[loss=3.348, NarTop10Accuracy=0.6326, over 4006.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6127, over 6000.40 frames. ], batch size: 4, lr: 6.06e-03 +2024-08-06 09:53:12,480 INFO [trainer.py:765] (4/8) Epoch 14, batch 2200, train_loss[loss=3.408, NarTop10Accuracy=0.6344, over 7159.00 frames. ], tot_loss[loss=3.519, NarTop10Accuracy=0.6134, over 6028.92 frames. ], batch size: 30, lr: 6.05e-03 +2024-08-06 09:53:37,976 INFO [trainer.py:765] (4/8) Epoch 14, batch 2300, train_loss[loss=3.625, NarTop10Accuracy=0.5979, over 5773.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6099, over 6060.21 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 09:54:02,717 INFO [trainer.py:765] (4/8) Epoch 14, batch 2400, train_loss[loss=3.669, NarTop10Accuracy=0.5755, over 5127.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6101, over 5861.39 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 09:54:12,820 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 09:54:24,304 INFO [trainer.py:811] (4/8) Epoch 14, validation: loss=3.364, NarTop10Accuracy=0.6477, over 1907754.00 frames. +2024-08-06 09:54:24,304 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 09:54:24,752 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.815e+02 1.970e+02 2.165e+02 3.684e+02, threshold=3.939e+02, percent-clipped=0.0 +2024-08-06 09:54:37,619 INFO [trainer.py:765] (4/8) Epoch 14, batch 2500, train_loss[loss=3.962, NarTop10Accuracy=0.5328, over 5083.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6142, over 5545.36 frames. ], batch size: 6, lr: 6.03e-03 +2024-08-06 09:54:58,615 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 09:56:03,097 INFO [trainer.py:765] (4/8) Epoch 15, batch 100, train_loss[loss=3.505, NarTop10Accuracy=0.6242, over 7039.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6265, over 2369.55 frames. ], batch size: 30, lr: 5.81e-03 +2024-08-06 09:56:35,980 INFO [trainer.py:765] (4/8) Epoch 15, batch 200, train_loss[loss=3.349, NarTop10Accuracy=0.6468, over 6763.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6263, over 3875.90 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 09:57:07,654 INFO [trainer.py:765] (4/8) Epoch 15, batch 300, train_loss[loss=3.429, NarTop10Accuracy=0.6334, over 7185.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6285, over 4664.58 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 09:57:38,464 INFO [trainer.py:765] (4/8) Epoch 15, batch 400, train_loss[loss=3.436, NarTop10Accuracy=0.6207, over 5630.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6292, over 5130.18 frames. ], batch size: 8, lr: 5.79e-03 +2024-08-06 09:58:12,235 INFO [trainer.py:765] (4/8) Epoch 15, batch 500, train_loss[loss=3.425, NarTop10Accuracy=0.6242, over 6288.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6261, over 5409.12 frames. ], batch size: 11, lr: 5.78e-03 +2024-08-06 09:58:47,543 INFO [trainer.py:765] (4/8) Epoch 15, batch 600, train_loss[loss=3.733, NarTop10Accuracy=0.5834, over 5690.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.624, over 5682.36 frames. ], batch size: 9, lr: 5.77e-03 +2024-08-06 09:59:17,062 INFO [trainer.py:765] (4/8) Epoch 15, batch 700, train_loss[loss=3.335, NarTop10Accuracy=0.6493, over 5120.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6219, over 5767.73 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 09:59:55,588 INFO [trainer.py:765] (4/8) Epoch 15, batch 800, train_loss[loss=3.558, NarTop10Accuracy=0.5858, over 5038.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6211, over 5830.80 frames. ], batch size: 6, lr: 5.76e-03 +2024-08-06 10:00:32,024 INFO [trainer.py:765] (4/8) Epoch 15, batch 900, train_loss[loss=3.36, NarTop10Accuracy=0.6424, over 6209.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6237, over 5828.61 frames. ], batch size: 13, lr: 5.75e-03 +2024-08-06 10:01:05,539 INFO [trainer.py:765] (4/8) Epoch 15, batch 1000, train_loss[loss=3.354, NarTop10Accuracy=0.655, over 6320.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.6241, over 5941.80 frames. ], batch size: 13, lr: 5.74e-03 +2024-08-06 10:01:45,154 INFO [trainer.py:765] (4/8) Epoch 15, batch 1100, train_loss[loss=3.533, NarTop10Accuracy=0.6016, over 6796.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.62, over 5980.27 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 10:02:18,757 INFO [trainer.py:765] (4/8) Epoch 15, batch 1200, train_loss[loss=3.776, NarTop10Accuracy=0.5613, over 7450.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6219, over 5976.79 frames. ], batch size: 31, lr: 5.73e-03 +2024-08-06 10:02:51,921 INFO [trainer.py:765] (4/8) Epoch 15, batch 1300, train_loss[loss=3.58, NarTop10Accuracy=0.6027, over 5035.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.6205, over 6030.22 frames. ], batch size: 6, lr: 5.72e-03 +2024-08-06 10:03:25,436 INFO [trainer.py:765] (4/8) Epoch 15, batch 1400, train_loss[loss=3.574, NarTop10Accuracy=0.5988, over 6168.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6165, over 6055.76 frames. ], batch size: 11, lr: 5.71e-03 +2024-08-06 10:03:59,042 INFO [trainer.py:765] (4/8) Epoch 15, batch 1500, train_loss[loss=3.658, NarTop10Accuracy=0.592, over 6130.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6185, over 5986.83 frames. ], batch size: 49, lr: 5.71e-03 +2024-08-06 10:04:27,107 INFO [trainer.py:765] (4/8) Epoch 15, batch 1600, train_loss[loss=3.74, NarTop10Accuracy=0.5643, over 7287.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.6209, over 5956.27 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 10:04:53,907 INFO [trainer.py:765] (4/8) Epoch 15, batch 1700, train_loss[loss=3.799, NarTop10Accuracy=0.5523, over 6743.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6215, over 5937.33 frames. ], batch size: 14, lr: 5.69e-03 +2024-08-06 10:05:20,729 INFO [trainer.py:765] (4/8) Epoch 15, batch 1800, train_loss[loss=3.813, NarTop10Accuracy=0.5545, over 7312.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6174, over 6005.08 frames. ], batch size: 22, lr: 5.68e-03 +2024-08-06 10:05:37,266 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 10:05:47,411 INFO [trainer.py:811] (4/8) Epoch 15, validation: loss=3.325, NarTop10Accuracy=0.6551, over 1907754.00 frames. +2024-08-06 10:05:47,412 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 10:05:47,919 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.835e+02 1.986e+02 2.156e+02 4.531e+02, threshold=3.972e+02, percent-clipped=0.1 +2024-08-06 10:05:57,568 INFO [trainer.py:765] (4/8) Epoch 15, batch 1900, train_loss[loss=3.601, NarTop10Accuracy=0.6004, over 6442.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6174, over 6054.42 frames. ], batch size: 48, lr: 5.68e-03 +2024-08-06 10:06:23,371 INFO [trainer.py:765] (4/8) Epoch 15, batch 2000, train_loss[loss=3.62, NarTop10Accuracy=0.5997, over 6275.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6186, over 6039.92 frames. ], batch size: 49, lr: 5.67e-03 +2024-08-06 10:06:48,758 INFO [trainer.py:765] (4/8) Epoch 15, batch 2100, train_loss[loss=3.335, NarTop10Accuracy=0.6571, over 3980.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6179, over 6025.37 frames. ], batch size: 4, lr: 5.66e-03 +2024-08-06 10:07:14,171 INFO [trainer.py:765] (4/8) Epoch 15, batch 2200, train_loss[loss=3.47, NarTop10Accuracy=0.628, over 7208.00 frames. ], tot_loss[loss=3.482, NarTop10Accuracy=0.6207, over 6057.66 frames. ], batch size: 30, lr: 5.65e-03 +2024-08-06 10:07:39,628 INFO [trainer.py:765] (4/8) Epoch 15, batch 2300, train_loss[loss=3.42, NarTop10Accuracy=0.6361, over 5734.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.618, over 6070.59 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 10:08:04,361 INFO [trainer.py:765] (4/8) Epoch 15, batch 2400, train_loss[loss=3.599, NarTop10Accuracy=0.5987, over 5111.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6172, over 5864.09 frames. ], batch size: 7, lr: 5.64e-03 +2024-08-06 10:08:27,713 INFO [trainer.py:765] (4/8) Epoch 15, batch 2500, train_loss[loss=3.559, NarTop10Accuracy=0.6011, over 4880.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.619, over 5521.12 frames. ], batch size: 6, lr: 5.63e-03 +2024-08-06 10:08:49,273 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 10:09:44,183 INFO [trainer.py:765] (4/8) Epoch 16, batch 100, train_loss[loss=3.677, NarTop10Accuracy=0.5835, over 7349.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6303, over 2350.78 frames. ], batch size: 31, lr: 5.44e-03 +2024-08-06 10:10:23,207 INFO [trainer.py:765] (4/8) Epoch 16, batch 200, train_loss[loss=3.406, NarTop10Accuracy=0.6462, over 6749.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6311, over 3869.63 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 10:10:58,841 INFO [trainer.py:765] (4/8) Epoch 16, batch 300, train_loss[loss=3.269, NarTop10Accuracy=0.6555, over 7184.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6304, over 4681.03 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 10:11:29,594 INFO [trainer.py:765] (4/8) Epoch 16, batch 400, train_loss[loss=3.299, NarTop10Accuracy=0.653, over 4998.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6284, over 5149.67 frames. ], batch size: 7, lr: 5.42e-03 +2024-08-06 10:12:02,297 INFO [trainer.py:765] (4/8) Epoch 16, batch 500, train_loss[loss=3.617, NarTop10Accuracy=0.6005, over 6134.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6277, over 5409.85 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 10:12:42,340 INFO [trainer.py:765] (4/8) Epoch 16, batch 600, train_loss[loss=3.552, NarTop10Accuracy=0.6206, over 5823.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6291, over 5679.56 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 10:13:13,950 INFO [trainer.py:765] (4/8) Epoch 16, batch 700, train_loss[loss=3.403, NarTop10Accuracy=0.637, over 5025.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6264, over 5749.88 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:13:46,285 INFO [trainer.py:765] (4/8) Epoch 16, batch 800, train_loss[loss=3.458, NarTop10Accuracy=0.6301, over 4910.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6258, over 5805.81 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:14:23,295 INFO [trainer.py:765] (4/8) Epoch 16, batch 900, train_loss[loss=3.482, NarTop10Accuracy=0.6251, over 6268.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.627, over 5837.71 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 10:15:00,058 INFO [trainer.py:765] (4/8) Epoch 16, batch 1000, train_loss[loss=3.775, NarTop10Accuracy=0.5588, over 6367.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6229, over 5938.83 frames. ], batch size: 13, lr: 5.38e-03 +2024-08-06 10:15:30,509 INFO [trainer.py:765] (4/8) Epoch 16, batch 1100, train_loss[loss=3.226, NarTop10Accuracy=0.6618, over 6855.00 frames. ], tot_loss[loss=3.482, NarTop10Accuracy=0.621, over 5949.35 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 10:16:11,383 INFO [trainer.py:765] (4/8) Epoch 16, batch 1200, train_loss[loss=3.6, NarTop10Accuracy=0.6037, over 7476.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6214, over 5949.84 frames. ], batch size: 31, lr: 5.37e-03 +2024-08-06 10:16:39,396 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 10:16:49,676 INFO [trainer.py:811] (4/8) Epoch 16, validation: loss=3.375, NarTop10Accuracy=0.6455, over 1907754.00 frames. +2024-08-06 10:16:49,676 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 10:16:52,482 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 1.814e+02 1.975e+02 2.151e+02 4.776e+02, threshold=3.950e+02, percent-clipped=0.2 +2024-08-06 10:16:58,042 INFO [trainer.py:765] (4/8) Epoch 16, batch 1300, train_loss[loss=3.582, NarTop10Accuracy=0.5939, over 5172.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6232, over 6027.85 frames. ], batch size: 6, lr: 5.36e-03 +2024-08-06 10:17:29,375 INFO [trainer.py:765] (4/8) Epoch 16, batch 1400, train_loss[loss=3.515, NarTop10Accuracy=0.6117, over 6165.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6227, over 6030.95 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 10:18:02,354 INFO [trainer.py:765] (4/8) Epoch 16, batch 1500, train_loss[loss=3.408, NarTop10Accuracy=0.6433, over 6340.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6224, over 5977.42 frames. ], batch size: 48, lr: 5.35e-03 +2024-08-06 10:18:30,468 INFO [trainer.py:765] (4/8) Epoch 16, batch 1600, train_loss[loss=3.767, NarTop10Accuracy=0.5565, over 7170.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6203, over 5958.44 frames. ], batch size: 22, lr: 5.34e-03 +2024-08-06 10:18:57,272 INFO [trainer.py:765] (4/8) Epoch 16, batch 1700, train_loss[loss=3.776, NarTop10Accuracy=0.5598, over 6368.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6219, over 5939.43 frames. ], batch size: 13, lr: 5.34e-03 +2024-08-06 10:19:23,978 INFO [trainer.py:765] (4/8) Epoch 16, batch 1800, train_loss[loss=3.736, NarTop10Accuracy=0.57, over 7319.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6198, over 6000.48 frames. ], batch size: 22, lr: 5.33e-03 +2024-08-06 10:19:50,772 INFO [trainer.py:765] (4/8) Epoch 16, batch 1900, train_loss[loss=3.61, NarTop10Accuracy=0.5927, over 6428.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6187, over 6036.37 frames. ], batch size: 51, lr: 5.32e-03 +2024-08-06 10:20:16,601 INFO [trainer.py:765] (4/8) Epoch 16, batch 2000, train_loss[loss=3.513, NarTop10Accuracy=0.6144, over 6012.00 frames. ], tot_loss[loss=3.502, NarTop10Accuracy=0.617, over 6004.57 frames. ], batch size: 55, lr: 5.32e-03 +2024-08-06 10:20:42,160 INFO [trainer.py:765] (4/8) Epoch 16, batch 2100, train_loss[loss=3.405, NarTop10Accuracy=0.6408, over 3946.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.616, over 5995.76 frames. ], batch size: 4, lr: 5.31e-03 +2024-08-06 10:21:07,650 INFO [trainer.py:765] (4/8) Epoch 16, batch 2200, train_loss[loss=3.5, NarTop10Accuracy=0.6266, over 7259.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6182, over 6034.77 frames. ], batch size: 31, lr: 5.30e-03 +2024-08-06 10:21:36,081 INFO [trainer.py:765] (4/8) Epoch 16, batch 2300, train_loss[loss=3.616, NarTop10Accuracy=0.5992, over 5635.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6146, over 6068.87 frames. ], batch size: 9, lr: 5.30e-03 +2024-08-06 10:22:00,906 INFO [trainer.py:765] (4/8) Epoch 16, batch 2400, train_loss[loss=3.244, NarTop10Accuracy=0.6688, over 5162.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6164, over 5901.30 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 10:22:24,289 INFO [trainer.py:765] (4/8) Epoch 16, batch 2500, train_loss[loss=3.255, NarTop10Accuracy=0.6638, over 5028.00 frames. ], tot_loss[loss=3.466, NarTop10Accuracy=0.6232, over 5551.65 frames. ], batch size: 6, lr: 5.28e-03 +2024-08-06 10:22:45,597 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 10:23:45,727 INFO [trainer.py:765] (4/8) Epoch 17, batch 100, train_loss[loss=3.505, NarTop10Accuracy=0.6107, over 7408.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6351, over 2363.86 frames. ], batch size: 31, lr: 5.12e-03 +2024-08-06 10:24:19,033 INFO [trainer.py:765] (4/8) Epoch 17, batch 200, train_loss[loss=3.245, NarTop10Accuracy=0.6759, over 6917.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6336, over 3849.86 frames. ], batch size: 17, lr: 5.11e-03 +2024-08-06 10:24:53,441 INFO [trainer.py:765] (4/8) Epoch 17, batch 300, train_loss[loss=3.633, NarTop10Accuracy=0.5952, over 7017.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6326, over 4682.69 frames. ], batch size: 22, lr: 5.10e-03 +2024-08-06 10:25:28,013 INFO [trainer.py:765] (4/8) Epoch 17, batch 400, train_loss[loss=3.675, NarTop10Accuracy=0.5869, over 5114.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6309, over 5136.29 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 10:25:58,606 INFO [trainer.py:765] (4/8) Epoch 17, batch 500, train_loss[loss=3.377, NarTop10Accuracy=0.6389, over 6150.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6311, over 5397.47 frames. ], batch size: 11, lr: 5.09e-03 +2024-08-06 10:26:29,756 INFO [trainer.py:765] (4/8) Epoch 17, batch 600, train_loss[loss=3.686, NarTop10Accuracy=0.5774, over 5816.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6312, over 5674.14 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 10:27:07,499 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 10:27:17,547 INFO [trainer.py:811] (4/8) Epoch 17, validation: loss=3.327, NarTop10Accuracy=0.6554, over 1907754.00 frames. +2024-08-06 10:27:17,548 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 10:27:18,066 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 1.825e+02 1.985e+02 2.150e+02 4.169e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 10:27:18,072 INFO [trainer.py:765] (4/8) Epoch 17, batch 700, train_loss[loss=3.05, NarTop10Accuracy=0.707, over 4985.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6259, over 5745.59 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 10:27:49,841 INFO [trainer.py:765] (4/8) Epoch 17, batch 800, train_loss[loss=3.489, NarTop10Accuracy=0.6269, over 5094.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6277, over 5804.74 frames. ], batch size: 6, lr: 5.07e-03 +2024-08-06 10:28:24,838 INFO [trainer.py:765] (4/8) Epoch 17, batch 900, train_loss[loss=3.122, NarTop10Accuracy=0.6888, over 6691.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6283, over 5824.67 frames. ], batch size: 14, lr: 5.07e-03 +2024-08-06 10:28:59,684 INFO [trainer.py:765] (4/8) Epoch 17, batch 1000, train_loss[loss=3.477, NarTop10Accuracy=0.6293, over 6235.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6278, over 5912.29 frames. ], batch size: 13, lr: 5.06e-03 +2024-08-06 10:29:36,659 INFO [trainer.py:765] (4/8) Epoch 17, batch 1100, train_loss[loss=3.097, NarTop10Accuracy=0.6877, over 6839.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6259, over 5954.28 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 10:30:08,242 INFO [trainer.py:765] (4/8) Epoch 17, batch 1200, train_loss[loss=3.389, NarTop10Accuracy=0.6305, over 7456.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6252, over 5963.07 frames. ], batch size: 32, lr: 5.05e-03 +2024-08-06 10:30:47,102 INFO [trainer.py:765] (4/8) Epoch 17, batch 1300, train_loss[loss=3.347, NarTop10Accuracy=0.6618, over 5064.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6246, over 6030.91 frames. ], batch size: 6, lr: 5.04e-03 +2024-08-06 10:31:20,894 INFO [trainer.py:765] (4/8) Epoch 17, batch 1400, train_loss[loss=3.491, NarTop10Accuracy=0.6241, over 6144.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6236, over 6032.23 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 10:31:51,401 INFO [trainer.py:765] (4/8) Epoch 17, batch 1500, train_loss[loss=3.549, NarTop10Accuracy=0.6183, over 6000.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6254, over 5980.07 frames. ], batch size: 49, lr: 5.03e-03 +2024-08-06 10:32:19,401 INFO [trainer.py:765] (4/8) Epoch 17, batch 1600, train_loss[loss=3.511, NarTop10Accuracy=0.6138, over 7250.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6229, over 5958.92 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 10:32:50,395 INFO [trainer.py:765] (4/8) Epoch 17, batch 1700, train_loss[loss=3.687, NarTop10Accuracy=0.5795, over 6168.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6203, over 5949.50 frames. ], batch size: 13, lr: 5.02e-03 +2024-08-06 10:33:17,035 INFO [trainer.py:765] (4/8) Epoch 17, batch 1800, train_loss[loss=3.449, NarTop10Accuracy=0.6268, over 7241.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.6208, over 6017.93 frames. ], batch size: 22, lr: 5.02e-03 +2024-08-06 10:33:43,597 INFO [trainer.py:765] (4/8) Epoch 17, batch 1900, train_loss[loss=3.834, NarTop10Accuracy=0.5489, over 6323.00 frames. ], tot_loss[loss=3.484, NarTop10Accuracy=0.621, over 6054.91 frames. ], batch size: 50, lr: 5.01e-03 +2024-08-06 10:34:09,288 INFO [trainer.py:765] (4/8) Epoch 17, batch 2000, train_loss[loss=3.855, NarTop10Accuracy=0.5493, over 6101.00 frames. ], tot_loss[loss=3.48, NarTop10Accuracy=0.6216, over 6024.89 frames. ], batch size: 49, lr: 5.00e-03 +2024-08-06 10:34:34,802 INFO [trainer.py:765] (4/8) Epoch 17, batch 2100, train_loss[loss=3.503, NarTop10Accuracy=0.6093, over 4713.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6186, over 6017.10 frames. ], batch size: 5, lr: 5.00e-03 +2024-08-06 10:35:00,245 INFO [trainer.py:765] (4/8) Epoch 17, batch 2200, train_loss[loss=3.316, NarTop10Accuracy=0.6587, over 7468.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6218, over 6053.68 frames. ], batch size: 31, lr: 4.99e-03 +2024-08-06 10:35:25,733 INFO [trainer.py:765] (4/8) Epoch 17, batch 2300, train_loss[loss=3.7, NarTop10Accuracy=0.5817, over 5673.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6194, over 6083.42 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 10:35:50,526 INFO [trainer.py:765] (4/8) Epoch 17, batch 2400, train_loss[loss=3.4, NarTop10Accuracy=0.6405, over 5102.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6188, over 5876.36 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 10:36:14,105 INFO [trainer.py:765] (4/8) Epoch 17, batch 2500, train_loss[loss=3.559, NarTop10Accuracy=0.6084, over 5101.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6222, over 5533.02 frames. ], batch size: 6, lr: 4.98e-03 +2024-08-06 10:36:35,858 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 10:37:32,052 INFO [trainer.py:765] (4/8) Epoch 18, batch 100, train_loss[loss=3.385, NarTop10Accuracy=0.6442, over 7208.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6341, over 2369.28 frames. ], batch size: 31, lr: 4.83e-03 +2024-08-06 10:37:39,163 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 10:37:49,085 INFO [trainer.py:811] (4/8) Epoch 18, validation: loss=3.339, NarTop10Accuracy=0.6526, over 1907754.00 frames. +2024-08-06 10:37:49,086 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 10:37:49,684 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.841e+02 1.993e+02 2.161e+02 3.871e+02, threshold=3.985e+02, percent-clipped=0.0 +2024-08-06 10:38:18,144 INFO [trainer.py:765] (4/8) Epoch 18, batch 200, train_loss[loss=3.373, NarTop10Accuracy=0.6356, over 6859.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6344, over 3859.15 frames. ], batch size: 17, lr: 4.82e-03 +2024-08-06 10:38:50,198 INFO [trainer.py:765] (4/8) Epoch 18, batch 300, train_loss[loss=3.501, NarTop10Accuracy=0.6158, over 7285.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6357, over 4682.72 frames. ], batch size: 22, lr: 4.81e-03 +2024-08-06 10:39:23,743 INFO [trainer.py:765] (4/8) Epoch 18, batch 400, train_loss[loss=3.561, NarTop10Accuracy=0.6064, over 5254.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6343, over 5135.13 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 10:39:54,102 INFO [trainer.py:765] (4/8) Epoch 18, batch 500, train_loss[loss=3.345, NarTop10Accuracy=0.6467, over 5948.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.637, over 5393.74 frames. ], batch size: 11, lr: 4.80e-03 +2024-08-06 10:40:28,526 INFO [trainer.py:765] (4/8) Epoch 18, batch 600, train_loss[loss=3.443, NarTop10Accuracy=0.6311, over 5743.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.635, over 5663.26 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 10:41:02,143 INFO [trainer.py:765] (4/8) Epoch 18, batch 700, train_loss[loss=3.358, NarTop10Accuracy=0.6456, over 4973.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6327, over 5728.38 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:41:38,519 INFO [trainer.py:765] (4/8) Epoch 18, batch 800, train_loss[loss=3.268, NarTop10Accuracy=0.6587, over 5073.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6336, over 5784.28 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:42:12,611 INFO [trainer.py:765] (4/8) Epoch 18, batch 900, train_loss[loss=3.251, NarTop10Accuracy=0.6604, over 6304.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6327, over 5812.42 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:42:46,702 INFO [trainer.py:765] (4/8) Epoch 18, batch 1000, train_loss[loss=3.3, NarTop10Accuracy=0.657, over 6260.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6301, over 5924.36 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:43:24,183 INFO [trainer.py:765] (4/8) Epoch 18, batch 1100, train_loss[loss=3.587, NarTop10Accuracy=0.5979, over 6781.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6243, over 5952.61 frames. ], batch size: 17, lr: 4.77e-03 +2024-08-06 10:44:02,363 INFO [trainer.py:765] (4/8) Epoch 18, batch 1200, train_loss[loss=3.478, NarTop10Accuracy=0.6169, over 7150.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6261, over 5939.94 frames. ], batch size: 30, lr: 4.77e-03 +2024-08-06 10:44:35,919 INFO [trainer.py:765] (4/8) Epoch 18, batch 1300, train_loss[loss=3.144, NarTop10Accuracy=0.6845, over 5004.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.628, over 6012.94 frames. ], batch size: 6, lr: 4.76e-03 +2024-08-06 10:45:10,237 INFO [trainer.py:765] (4/8) Epoch 18, batch 1400, train_loss[loss=3.46, NarTop10Accuracy=0.6293, over 6011.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6259, over 6028.05 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 10:45:40,975 INFO [trainer.py:765] (4/8) Epoch 18, batch 1500, train_loss[loss=3.857, NarTop10Accuracy=0.5414, over 6073.00 frames. ], tot_loss[loss=3.467, NarTop10Accuracy=0.6244, over 5949.26 frames. ], batch size: 48, lr: 4.75e-03 +2024-08-06 10:46:09,055 INFO [trainer.py:765] (4/8) Epoch 18, batch 1600, train_loss[loss=3.5, NarTop10Accuracy=0.615, over 6947.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.6244, over 5954.97 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 10:46:35,858 INFO [trainer.py:765] (4/8) Epoch 18, batch 1700, train_loss[loss=3.541, NarTop10Accuracy=0.6076, over 6214.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6255, over 5947.97 frames. ], batch size: 13, lr: 4.74e-03 +2024-08-06 10:47:02,437 INFO [trainer.py:765] (4/8) Epoch 18, batch 1800, train_loss[loss=3.494, NarTop10Accuracy=0.6241, over 7165.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.624, over 6012.59 frames. ], batch size: 22, lr: 4.74e-03 +2024-08-06 10:47:29,093 INFO [trainer.py:765] (4/8) Epoch 18, batch 1900, train_loss[loss=3.715, NarTop10Accuracy=0.581, over 6228.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6222, over 6047.19 frames. ], batch size: 48, lr: 4.73e-03 +2024-08-06 10:47:54,883 INFO [trainer.py:765] (4/8) Epoch 18, batch 2000, train_loss[loss=3.5, NarTop10Accuracy=0.615, over 6033.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6224, over 6035.10 frames. ], batch size: 48, lr: 4.73e-03 +2024-08-06 10:48:20,369 INFO [trainer.py:765] (4/8) Epoch 18, batch 2100, train_loss[loss=3.377, NarTop10Accuracy=0.6493, over 4769.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6226, over 6018.42 frames. ], batch size: 5, lr: 4.72e-03 +2024-08-06 10:48:24,746 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 10:48:35,039 INFO [trainer.py:811] (4/8) Epoch 18, validation: loss=3.307, NarTop10Accuracy=0.6593, over 1907754.00 frames. +2024-08-06 10:48:35,040 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 10:48:35,534 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 1.855e+02 2.003e+02 2.193e+02 3.481e+02, threshold=4.005e+02, percent-clipped=0.0 +2024-08-06 10:48:56,095 INFO [trainer.py:765] (4/8) Epoch 18, batch 2200, train_loss[loss=3.355, NarTop10Accuracy=0.6406, over 7021.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6234, over 6057.52 frames. ], batch size: 31, lr: 4.72e-03 +2024-08-06 10:49:21,520 INFO [trainer.py:765] (4/8) Epoch 18, batch 2300, train_loss[loss=3.381, NarTop10Accuracy=0.6368, over 5820.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6219, over 6088.13 frames. ], batch size: 9, lr: 4.71e-03 +2024-08-06 10:49:46,256 INFO [trainer.py:765] (4/8) Epoch 18, batch 2400, train_loss[loss=3.264, NarTop10Accuracy=0.6676, over 5122.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6205, over 5896.38 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 10:50:09,707 INFO [trainer.py:765] (4/8) Epoch 18, batch 2500, train_loss[loss=2.976, NarTop10Accuracy=0.7255, over 4920.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6253, over 5551.12 frames. ], batch size: 6, lr: 4.70e-03 +2024-08-06 10:50:30,818 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 10:51:33,564 INFO [trainer.py:765] (4/8) Epoch 19, batch 100, train_loss[loss=3.274, NarTop10Accuracy=0.6596, over 7074.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.639, over 2374.46 frames. ], batch size: 30, lr: 4.57e-03 +2024-08-06 10:52:06,164 INFO [trainer.py:765] (4/8) Epoch 19, batch 200, train_loss[loss=3.502, NarTop10Accuracy=0.617, over 6951.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6387, over 3880.63 frames. ], batch size: 17, lr: 4.56e-03 +2024-08-06 10:52:40,031 INFO [trainer.py:765] (4/8) Epoch 19, batch 300, train_loss[loss=3.418, NarTop10Accuracy=0.6412, over 7092.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6384, over 4673.52 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 10:53:12,829 INFO [trainer.py:765] (4/8) Epoch 19, batch 400, train_loss[loss=3.338, NarTop10Accuracy=0.6526, over 5222.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6344, over 5127.85 frames. ], batch size: 7, lr: 4.55e-03 +2024-08-06 10:53:45,020 INFO [trainer.py:765] (4/8) Epoch 19, batch 500, train_loss[loss=3.428, NarTop10Accuracy=0.626, over 6200.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6366, over 5424.09 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 10:54:18,600 INFO [trainer.py:765] (4/8) Epoch 19, batch 600, train_loss[loss=3.329, NarTop10Accuracy=0.6514, over 5688.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6347, over 5682.38 frames. ], batch size: 9, lr: 4.54e-03 +2024-08-06 10:54:54,112 INFO [trainer.py:765] (4/8) Epoch 19, batch 700, train_loss[loss=3.484, NarTop10Accuracy=0.6243, over 5111.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6323, over 5751.29 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 10:55:29,925 INFO [trainer.py:765] (4/8) Epoch 19, batch 800, train_loss[loss=3.425, NarTop10Accuracy=0.6374, over 5084.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6333, over 5795.61 frames. ], batch size: 6, lr: 4.53e-03 +2024-08-06 10:56:02,237 INFO [trainer.py:765] (4/8) Epoch 19, batch 900, train_loss[loss=3.477, NarTop10Accuracy=0.6258, over 6144.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6327, over 5820.12 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 10:56:38,299 INFO [trainer.py:765] (4/8) Epoch 19, batch 1000, train_loss[loss=3.258, NarTop10Accuracy=0.657, over 6258.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6326, over 5929.77 frames. ], batch size: 13, lr: 4.52e-03 +2024-08-06 10:57:15,187 INFO [trainer.py:765] (4/8) Epoch 19, batch 1100, train_loss[loss=3.362, NarTop10Accuracy=0.6478, over 7012.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6303, over 5954.94 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 10:57:46,665 INFO [trainer.py:765] (4/8) Epoch 19, batch 1200, train_loss[loss=3.361, NarTop10Accuracy=0.6436, over 7329.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6295, over 5953.70 frames. ], batch size: 30, lr: 4.51e-03 +2024-08-06 10:58:23,900 INFO [trainer.py:765] (4/8) Epoch 19, batch 1300, train_loss[loss=3.052, NarTop10Accuracy=0.7079, over 5030.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6285, over 6023.99 frames. ], batch size: 6, lr: 4.51e-03 +2024-08-06 10:58:58,028 INFO [trainer.py:765] (4/8) Epoch 19, batch 1400, train_loss[loss=3.424, NarTop10Accuracy=0.6277, over 6140.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.627, over 6054.51 frames. ], batch size: 11, lr: 4.50e-03 +2024-08-06 10:59:30,769 INFO [trainer.py:765] (4/8) Epoch 19, batch 1500, train_loss[loss=3.69, NarTop10Accuracy=0.5818, over 6427.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6272, over 5993.08 frames. ], batch size: 49, lr: 4.50e-03 +2024-08-06 10:59:40,830 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 10:59:50,899 INFO [trainer.py:811] (4/8) Epoch 19, validation: loss=3.276, NarTop10Accuracy=0.6653, over 1907754.00 frames. +2024-08-06 10:59:50,900 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 10:59:51,426 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.829e+02 1.984e+02 2.176e+02 3.542e+02, threshold=3.967e+02, percent-clipped=0.0 +2024-08-06 11:00:08,816 INFO [trainer.py:765] (4/8) Epoch 19, batch 1600, train_loss[loss=3.672, NarTop10Accuracy=0.5746, over 7223.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6261, over 5954.87 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:00:35,588 INFO [trainer.py:765] (4/8) Epoch 19, batch 1700, train_loss[loss=3.645, NarTop10Accuracy=0.5896, over 6305.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6252, over 5933.11 frames. ], batch size: 13, lr: 4.49e-03 +2024-08-06 11:01:02,256 INFO [trainer.py:765] (4/8) Epoch 19, batch 1800, train_loss[loss=3.358, NarTop10Accuracy=0.6521, over 7249.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6274, over 5998.54 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:01:28,930 INFO [trainer.py:765] (4/8) Epoch 19, batch 1900, train_loss[loss=3.532, NarTop10Accuracy=0.6139, over 6418.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6258, over 6038.07 frames. ], batch size: 49, lr: 4.48e-03 +2024-08-06 11:01:54,633 INFO [trainer.py:765] (4/8) Epoch 19, batch 2000, train_loss[loss=3.502, NarTop10Accuracy=0.6197, over 6393.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6276, over 6024.90 frames. ], batch size: 49, lr: 4.48e-03 +2024-08-06 11:02:20,186 INFO [trainer.py:765] (4/8) Epoch 19, batch 2100, train_loss[loss=3.772, NarTop10Accuracy=0.5671, over 4000.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6261, over 6009.53 frames. ], batch size: 4, lr: 4.47e-03 +2024-08-06 11:02:45,695 INFO [trainer.py:765] (4/8) Epoch 19, batch 2200, train_loss[loss=3.472, NarTop10Accuracy=0.6286, over 7097.00 frames. ], tot_loss[loss=3.466, NarTop10Accuracy=0.6251, over 6037.20 frames. ], batch size: 30, lr: 4.47e-03 +2024-08-06 11:03:11,131 INFO [trainer.py:765] (4/8) Epoch 19, batch 2300, train_loss[loss=3.215, NarTop10Accuracy=0.674, over 5748.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6251, over 6068.89 frames. ], batch size: 9, lr: 4.46e-03 +2024-08-06 11:03:35,951 INFO [trainer.py:765] (4/8) Epoch 19, batch 2400, train_loss[loss=3.089, NarTop10Accuracy=0.6964, over 5104.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6233, over 5885.75 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 11:03:59,406 INFO [trainer.py:765] (4/8) Epoch 19, batch 2500, train_loss[loss=3.401, NarTop10Accuracy=0.6241, over 5096.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6288, over 5544.99 frames. ], batch size: 6, lr: 4.45e-03 +2024-08-06 11:04:24,219 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 11:05:26,561 INFO [trainer.py:765] (4/8) Epoch 20, batch 100, train_loss[loss=3.319, NarTop10Accuracy=0.6492, over 7010.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6391, over 2379.51 frames. ], batch size: 30, lr: 4.33e-03 +2024-08-06 11:05:57,409 INFO [trainer.py:765] (4/8) Epoch 20, batch 200, train_loss[loss=3.153, NarTop10Accuracy=0.6895, over 6708.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6402, over 3867.79 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 11:06:30,634 INFO [trainer.py:765] (4/8) Epoch 20, batch 300, train_loss[loss=3.001, NarTop10Accuracy=0.7287, over 7291.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6396, over 4671.87 frames. ], batch size: 22, lr: 4.32e-03 +2024-08-06 11:07:06,396 INFO [trainer.py:765] (4/8) Epoch 20, batch 400, train_loss[loss=3.202, NarTop10Accuracy=0.6824, over 5136.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6386, over 5115.52 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 11:07:38,166 INFO [trainer.py:765] (4/8) Epoch 20, batch 500, train_loss[loss=3.562, NarTop10Accuracy=0.6017, over 6257.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6372, over 5399.00 frames. ], batch size: 11, lr: 4.31e-03 +2024-08-06 11:08:11,568 INFO [trainer.py:765] (4/8) Epoch 20, batch 600, train_loss[loss=3.209, NarTop10Accuracy=0.6828, over 5751.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6386, over 5675.74 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 11:08:46,274 INFO [trainer.py:765] (4/8) Epoch 20, batch 700, train_loss[loss=3.034, NarTop10Accuracy=0.7104, over 4982.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6364, over 5748.76 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 11:09:23,425 INFO [trainer.py:765] (4/8) Epoch 20, batch 800, train_loss[loss=3.547, NarTop10Accuracy=0.6075, over 5156.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.636, over 5793.84 frames. ], batch size: 6, lr: 4.30e-03 +2024-08-06 11:09:53,513 INFO [trainer.py:765] (4/8) Epoch 20, batch 900, train_loss[loss=3.578, NarTop10Accuracy=0.6091, over 6236.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.633, over 5828.38 frames. ], batch size: 13, lr: 4.30e-03 +2024-08-06 11:10:12,199 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 11:10:23,738 INFO [trainer.py:811] (4/8) Epoch 20, validation: loss=3.279, NarTop10Accuracy=0.6658, over 1907754.00 frames. +2024-08-06 11:10:23,739 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 11:10:24,298 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.847e+02 2.007e+02 2.180e+02 4.417e+02, threshold=4.013e+02, percent-clipped=0.1 +2024-08-06 11:10:42,966 INFO [trainer.py:765] (4/8) Epoch 20, batch 1000, train_loss[loss=3.141, NarTop10Accuracy=0.6968, over 6203.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6325, over 5918.72 frames. ], batch size: 13, lr: 4.29e-03 +2024-08-06 11:11:21,023 INFO [trainer.py:765] (4/8) Epoch 20, batch 1100, train_loss[loss=3.251, NarTop10Accuracy=0.6655, over 6920.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.63, over 5938.53 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 11:11:55,394 INFO [trainer.py:765] (4/8) Epoch 20, batch 1200, train_loss[loss=3.421, NarTop10Accuracy=0.6332, over 7461.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6312, over 5923.01 frames. ], batch size: 31, lr: 4.28e-03 +2024-08-06 11:12:30,752 INFO [trainer.py:765] (4/8) Epoch 20, batch 1300, train_loss[loss=3.437, NarTop10Accuracy=0.6173, over 4869.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6335, over 5997.97 frames. ], batch size: 6, lr: 4.28e-03 +2024-08-06 11:13:10,292 INFO [trainer.py:765] (4/8) Epoch 20, batch 1400, train_loss[loss=3.598, NarTop10Accuracy=0.6021, over 6223.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6301, over 6028.48 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 11:13:38,990 INFO [trainer.py:765] (4/8) Epoch 20, batch 1500, train_loss[loss=3.527, NarTop10Accuracy=0.6167, over 6483.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6287, over 5975.38 frames. ], batch size: 50, lr: 4.27e-03 +2024-08-06 11:14:07,052 INFO [trainer.py:765] (4/8) Epoch 20, batch 1600, train_loss[loss=3.417, NarTop10Accuracy=0.6347, over 7157.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6282, over 5962.94 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 11:14:33,911 INFO [trainer.py:765] (4/8) Epoch 20, batch 1700, train_loss[loss=3.714, NarTop10Accuracy=0.575, over 6391.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6295, over 5932.58 frames. ], batch size: 13, lr: 4.26e-03 +2024-08-06 11:15:00,590 INFO [trainer.py:765] (4/8) Epoch 20, batch 1800, train_loss[loss=3.369, NarTop10Accuracy=0.6426, over 7015.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6284, over 6005.49 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 11:15:27,277 INFO [trainer.py:765] (4/8) Epoch 20, batch 1900, train_loss[loss=3.527, NarTop10Accuracy=0.6117, over 6119.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.626, over 6037.02 frames. ], batch size: 48, lr: 4.26e-03 +2024-08-06 11:15:56,439 INFO [trainer.py:765] (4/8) Epoch 20, batch 2000, train_loss[loss=3.472, NarTop10Accuracy=0.625, over 6187.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6281, over 5999.94 frames. ], batch size: 49, lr: 4.25e-03 +2024-08-06 11:16:21,958 INFO [trainer.py:765] (4/8) Epoch 20, batch 2100, train_loss[loss=3.082, NarTop10Accuracy=0.7008, over 4822.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.627, over 5986.40 frames. ], batch size: 5, lr: 4.25e-03 +2024-08-06 11:16:47,406 INFO [trainer.py:765] (4/8) Epoch 20, batch 2200, train_loss[loss=3.439, NarTop10Accuracy=0.6265, over 7254.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6262, over 6012.79 frames. ], batch size: 30, lr: 4.24e-03 +2024-08-06 11:17:12,908 INFO [trainer.py:765] (4/8) Epoch 20, batch 2300, train_loss[loss=3.522, NarTop10Accuracy=0.6088, over 5756.00 frames. ], tot_loss[loss=3.467, NarTop10Accuracy=0.6238, over 6049.24 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 11:17:37,715 INFO [trainer.py:765] (4/8) Epoch 20, batch 2400, train_loss[loss=3.111, NarTop10Accuracy=0.7002, over 5126.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6229, over 5854.20 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 11:18:01,248 INFO [trainer.py:765] (4/8) Epoch 20, batch 2500, train_loss[loss=3.5, NarTop10Accuracy=0.6215, over 4988.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6301, over 5510.45 frames. ], batch size: 6, lr: 4.23e-03 +2024-08-06 11:18:22,227 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 11:19:21,459 INFO [trainer.py:765] (4/8) Epoch 21, batch 100, train_loss[loss=3.215, NarTop10Accuracy=0.6766, over 7345.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6392, over 2363.02 frames. ], batch size: 31, lr: 4.12e-03 +2024-08-06 11:19:56,522 INFO [trainer.py:765] (4/8) Epoch 21, batch 200, train_loss[loss=3.379, NarTop10Accuracy=0.6442, over 6905.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6404, over 3868.31 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 11:20:26,597 INFO [trainer.py:765] (4/8) Epoch 21, batch 300, train_loss[loss=3.674, NarTop10Accuracy=0.5801, over 7093.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.643, over 4678.22 frames. ], batch size: 22, lr: 4.11e-03 +2024-08-06 11:20:54,240 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 11:21:04,970 INFO [trainer.py:811] (4/8) Epoch 21, validation: loss=3.291, NarTop10Accuracy=0.6625, over 1907754.00 frames. +2024-08-06 11:21:04,970 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 11:21:05,486 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 1.858e+02 2.007e+02 2.193e+02 3.729e+02, threshold=4.015e+02, percent-clipped=0.0 +2024-08-06 11:21:12,220 INFO [trainer.py:765] (4/8) Epoch 21, batch 400, train_loss[loss=3.474, NarTop10Accuracy=0.6155, over 5129.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6413, over 5121.26 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 11:21:47,569 INFO [trainer.py:765] (4/8) Epoch 21, batch 500, train_loss[loss=3.42, NarTop10Accuracy=0.6444, over 6133.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6424, over 5404.06 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 11:22:18,237 INFO [trainer.py:765] (4/8) Epoch 21, batch 600, train_loss[loss=3.46, NarTop10Accuracy=0.6197, over 5708.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6407, over 5673.58 frames. ], batch size: 9, lr: 4.10e-03 +2024-08-06 11:22:56,842 INFO [trainer.py:765] (4/8) Epoch 21, batch 700, train_loss[loss=3.37, NarTop10Accuracy=0.6559, over 5023.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6364, over 5749.71 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 11:23:33,075 INFO [trainer.py:765] (4/8) Epoch 21, batch 800, train_loss[loss=3.438, NarTop10Accuracy=0.6301, over 5121.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6359, over 5807.77 frames. ], batch size: 6, lr: 4.09e-03 +2024-08-06 11:24:03,021 INFO [trainer.py:765] (4/8) Epoch 21, batch 900, train_loss[loss=3.588, NarTop10Accuracy=0.5911, over 6160.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6354, over 5826.91 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 11:24:37,089 INFO [trainer.py:765] (4/8) Epoch 21, batch 1000, train_loss[loss=3.428, NarTop10Accuracy=0.6434, over 6671.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6329, over 5934.67 frames. ], batch size: 14, lr: 4.09e-03 +2024-08-06 11:25:16,428 INFO [trainer.py:765] (4/8) Epoch 21, batch 1100, train_loss[loss=3.643, NarTop10Accuracy=0.5939, over 6694.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6323, over 5960.62 frames. ], batch size: 17, lr: 4.08e-03 +2024-08-06 11:25:47,740 INFO [trainer.py:765] (4/8) Epoch 21, batch 1200, train_loss[loss=3.395, NarTop10Accuracy=0.6476, over 7049.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6337, over 5951.43 frames. ], batch size: 30, lr: 4.08e-03 +2024-08-06 11:26:23,056 INFO [trainer.py:765] (4/8) Epoch 21, batch 1300, train_loss[loss=3.397, NarTop10Accuracy=0.6354, over 5119.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6337, over 6006.50 frames. ], batch size: 6, lr: 4.07e-03 +2024-08-06 11:27:00,082 INFO [trainer.py:765] (4/8) Epoch 21, batch 1400, train_loss[loss=3.233, NarTop10Accuracy=0.6806, over 6151.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6316, over 6035.43 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 11:27:35,326 INFO [trainer.py:765] (4/8) Epoch 21, batch 1500, train_loss[loss=3.823, NarTop10Accuracy=0.55, over 6491.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6305, over 5960.93 frames. ], batch size: 49, lr: 4.07e-03 +2024-08-06 11:28:03,315 INFO [trainer.py:765] (4/8) Epoch 21, batch 1600, train_loss[loss=3.169, NarTop10Accuracy=0.6783, over 7327.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6304, over 5953.63 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 11:28:30,105 INFO [trainer.py:765] (4/8) Epoch 21, batch 1700, train_loss[loss=3.517, NarTop10Accuracy=0.6078, over 6221.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6286, over 5935.79 frames. ], batch size: 13, lr: 4.06e-03 +2024-08-06 11:28:56,641 INFO [trainer.py:765] (4/8) Epoch 21, batch 1800, train_loss[loss=3.572, NarTop10Accuracy=0.6028, over 7073.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6286, over 5995.83 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 11:29:23,198 INFO [trainer.py:765] (4/8) Epoch 21, batch 1900, train_loss[loss=3.49, NarTop10Accuracy=0.6249, over 6389.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6278, over 6041.41 frames. ], batch size: 49, lr: 4.05e-03 +2024-08-06 11:29:49,028 INFO [trainer.py:765] (4/8) Epoch 21, batch 2000, train_loss[loss=3.451, NarTop10Accuracy=0.6319, over 5446.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6274, over 6003.01 frames. ], batch size: 49, lr: 4.05e-03 +2024-08-06 11:30:14,529 INFO [trainer.py:765] (4/8) Epoch 21, batch 2100, train_loss[loss=3.027, NarTop10Accuracy=0.6815, over 3974.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6274, over 5997.14 frames. ], batch size: 4, lr: 4.04e-03 +2024-08-06 11:30:39,871 INFO [trainer.py:765] (4/8) Epoch 21, batch 2200, train_loss[loss=3.828, NarTop10Accuracy=0.5447, over 7147.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6274, over 6036.42 frames. ], batch size: 30, lr: 4.04e-03 +2024-08-06 11:31:05,472 INFO [trainer.py:765] (4/8) Epoch 21, batch 2300, train_loss[loss=3.385, NarTop10Accuracy=0.6457, over 5721.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6272, over 6068.15 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 11:31:23,873 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 11:31:34,439 INFO [trainer.py:811] (4/8) Epoch 21, validation: loss=3.272, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 11:31:34,439 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 11:31:34,937 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 1.892e+02 2.038e+02 2.210e+02 4.910e+02, threshold=4.076e+02, percent-clipped=0.1 +2024-08-06 11:31:40,753 INFO [trainer.py:765] (4/8) Epoch 21, batch 2400, train_loss[loss=3.232, NarTop10Accuracy=0.667, over 5083.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6267, over 5872.38 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 11:32:04,056 INFO [trainer.py:765] (4/8) Epoch 21, batch 2500, train_loss[loss=3.19, NarTop10Accuracy=0.6696, over 5033.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6317, over 5531.22 frames. ], batch size: 6, lr: 4.03e-03 +2024-08-06 11:32:25,910 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 11:33:29,683 INFO [trainer.py:765] (4/8) Epoch 22, batch 100, train_loss[loss=3.593, NarTop10Accuracy=0.5944, over 7258.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6412, over 2369.51 frames. ], batch size: 30, lr: 3.93e-03 +2024-08-06 11:34:05,037 INFO [trainer.py:765] (4/8) Epoch 22, batch 200, train_loss[loss=3.274, NarTop10Accuracy=0.6625, over 6836.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6442, over 3877.04 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 11:34:37,619 INFO [trainer.py:765] (4/8) Epoch 22, batch 300, train_loss[loss=3.251, NarTop10Accuracy=0.6748, over 7029.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6451, over 4667.66 frames. ], batch size: 22, lr: 3.92e-03 +2024-08-06 11:35:09,969 INFO [trainer.py:765] (4/8) Epoch 22, batch 400, train_loss[loss=3.283, NarTop10Accuracy=0.6655, over 5621.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6415, over 5119.23 frames. ], batch size: 8, lr: 3.92e-03 +2024-08-06 11:35:42,508 INFO [trainer.py:765] (4/8) Epoch 22, batch 500, train_loss[loss=3.369, NarTop10Accuracy=0.6534, over 6073.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6403, over 5390.58 frames. ], batch size: 11, lr: 3.91e-03 +2024-08-06 11:36:16,059 INFO [trainer.py:765] (4/8) Epoch 22, batch 600, train_loss[loss=3.172, NarTop10Accuracy=0.6918, over 5741.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6408, over 5672.09 frames. ], batch size: 9, lr: 3.91e-03 +2024-08-06 11:36:53,858 INFO [trainer.py:765] (4/8) Epoch 22, batch 700, train_loss[loss=3.215, NarTop10Accuracy=0.6767, over 4292.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6385, over 5736.54 frames. ], batch size: 5, lr: 3.91e-03 +2024-08-06 11:37:28,480 INFO [trainer.py:765] (4/8) Epoch 22, batch 800, train_loss[loss=3.083, NarTop10Accuracy=0.7084, over 5133.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6381, over 5796.90 frames. ], batch size: 6, lr: 3.90e-03 +2024-08-06 11:38:03,950 INFO [trainer.py:765] (4/8) Epoch 22, batch 900, train_loss[loss=3.375, NarTop10Accuracy=0.6573, over 6175.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6396, over 5827.71 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 11:38:38,329 INFO [trainer.py:765] (4/8) Epoch 22, batch 1000, train_loss[loss=3.18, NarTop10Accuracy=0.6767, over 6349.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6382, over 5917.72 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 11:39:14,789 INFO [trainer.py:765] (4/8) Epoch 22, batch 1100, train_loss[loss=3.273, NarTop10Accuracy=0.6573, over 6838.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6378, over 5938.33 frames. ], batch size: 17, lr: 3.89e-03 +2024-08-06 11:39:48,523 INFO [trainer.py:765] (4/8) Epoch 22, batch 1200, train_loss[loss=3.387, NarTop10Accuracy=0.6361, over 7392.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6359, over 5944.22 frames. ], batch size: 31, lr: 3.89e-03 +2024-08-06 11:40:25,246 INFO [trainer.py:765] (4/8) Epoch 22, batch 1300, train_loss[loss=3.42, NarTop10Accuracy=0.6133, over 5061.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.635, over 6011.42 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 11:41:00,610 INFO [trainer.py:765] (4/8) Epoch 22, batch 1400, train_loss[loss=3.441, NarTop10Accuracy=0.6277, over 6230.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6354, over 6009.29 frames. ], batch size: 11, lr: 3.88e-03 +2024-08-06 11:41:31,585 INFO [trainer.py:765] (4/8) Epoch 22, batch 1500, train_loss[loss=3.636, NarTop10Accuracy=0.5966, over 5906.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6342, over 5951.92 frames. ], batch size: 50, lr: 3.88e-03 +2024-08-06 11:41:59,677 INFO [trainer.py:765] (4/8) Epoch 22, batch 1600, train_loss[loss=3.395, NarTop10Accuracy=0.6453, over 7023.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6333, over 5965.01 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 11:42:26,464 INFO [trainer.py:765] (4/8) Epoch 22, batch 1700, train_loss[loss=3.344, NarTop10Accuracy=0.6403, over 6295.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6311, over 5955.36 frames. ], batch size: 13, lr: 3.87e-03 +2024-08-06 11:42:50,724 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 11:43:00,818 INFO [trainer.py:811] (4/8) Epoch 22, validation: loss=3.305, NarTop10Accuracy=0.6597, over 1907754.00 frames. +2024-08-06 11:43:00,819 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 11:43:01,327 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.500e+02 1.900e+02 2.042e+02 2.234e+02 3.494e+02, threshold=4.085e+02, percent-clipped=0.0 +2024-08-06 11:43:03,219 INFO [trainer.py:765] (4/8) Epoch 22, batch 1800, train_loss[loss=3.463, NarTop10Accuracy=0.6234, over 6982.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6321, over 6007.40 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 11:43:29,752 INFO [trainer.py:765] (4/8) Epoch 22, batch 1900, train_loss[loss=3.61, NarTop10Accuracy=0.6003, over 6681.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6312, over 6052.33 frames. ], batch size: 49, lr: 3.87e-03 +2024-08-06 11:43:55,485 INFO [trainer.py:765] (4/8) Epoch 22, batch 2000, train_loss[loss=3.859, NarTop10Accuracy=0.5461, over 6157.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6315, over 6028.14 frames. ], batch size: 48, lr: 3.86e-03 +2024-08-06 11:44:20,932 INFO [trainer.py:765] (4/8) Epoch 22, batch 2100, train_loss[loss=3.295, NarTop10Accuracy=0.6489, over 3868.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6323, over 6007.77 frames. ], batch size: 4, lr: 3.86e-03 +2024-08-06 11:44:46,456 INFO [trainer.py:765] (4/8) Epoch 22, batch 2200, train_loss[loss=3.721, NarTop10Accuracy=0.5778, over 7060.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6317, over 6053.26 frames. ], batch size: 30, lr: 3.86e-03 +2024-08-06 11:45:11,882 INFO [trainer.py:765] (4/8) Epoch 22, batch 2300, train_loss[loss=3.34, NarTop10Accuracy=0.6489, over 5797.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6285, over 6089.07 frames. ], batch size: 9, lr: 3.85e-03 +2024-08-06 11:45:36,583 INFO [trainer.py:765] (4/8) Epoch 22, batch 2400, train_loss[loss=3.244, NarTop10Accuracy=0.6542, over 5183.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6264, over 5882.10 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 11:46:00,081 INFO [trainer.py:765] (4/8) Epoch 22, batch 2500, train_loss[loss=3.179, NarTop10Accuracy=0.6837, over 5076.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.632, over 5545.82 frames. ], batch size: 6, lr: 3.85e-03 +2024-08-06 11:46:21,464 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 11:47:20,476 INFO [trainer.py:765] (4/8) Epoch 23, batch 100, train_loss[loss=3.209, NarTop10Accuracy=0.6766, over 7147.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6467, over 2368.58 frames. ], batch size: 30, lr: 3.75e-03 +2024-08-06 11:47:52,035 INFO [trainer.py:765] (4/8) Epoch 23, batch 200, train_loss[loss=3.556, NarTop10Accuracy=0.6176, over 6901.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6497, over 3877.83 frames. ], batch size: 17, lr: 3.75e-03 +2024-08-06 11:48:33,922 INFO [trainer.py:765] (4/8) Epoch 23, batch 300, train_loss[loss=3.264, NarTop10Accuracy=0.6546, over 7201.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6447, over 4678.91 frames. ], batch size: 22, lr: 3.75e-03 +2024-08-06 11:49:06,656 INFO [trainer.py:765] (4/8) Epoch 23, batch 400, train_loss[loss=3.381, NarTop10Accuracy=0.6437, over 5157.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6432, over 5111.89 frames. ], batch size: 7, lr: 3.74e-03 +2024-08-06 11:49:37,619 INFO [trainer.py:765] (4/8) Epoch 23, batch 500, train_loss[loss=3.568, NarTop10Accuracy=0.6114, over 6250.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6435, over 5397.25 frames. ], batch size: 11, lr: 3.74e-03 +2024-08-06 11:50:06,740 INFO [trainer.py:765] (4/8) Epoch 23, batch 600, train_loss[loss=3.614, NarTop10Accuracy=0.6018, over 5902.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6441, over 5662.18 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 11:50:47,601 INFO [trainer.py:765] (4/8) Epoch 23, batch 700, train_loss[loss=3.565, NarTop10Accuracy=0.6063, over 4940.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6409, over 5737.48 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:21,345 INFO [trainer.py:765] (4/8) Epoch 23, batch 800, train_loss[loss=3.447, NarTop10Accuracy=0.6305, over 5058.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6402, over 5778.55 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:52,397 INFO [trainer.py:765] (4/8) Epoch 23, batch 900, train_loss[loss=3.201, NarTop10Accuracy=0.6741, over 6719.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.642, over 5799.79 frames. ], batch size: 14, lr: 3.73e-03 +2024-08-06 11:52:33,918 INFO [trainer.py:765] (4/8) Epoch 23, batch 1000, train_loss[loss=3.192, NarTop10Accuracy=0.689, over 6256.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6397, over 5918.12 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 11:53:08,608 INFO [trainer.py:765] (4/8) Epoch 23, batch 1100, train_loss[loss=3.601, NarTop10Accuracy=0.5983, over 6918.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6356, over 5955.97 frames. ], batch size: 17, lr: 3.72e-03 +2024-08-06 11:53:40,339 INFO [trainer.py:765] (4/8) Epoch 23, batch 1200, train_loss[loss=3.56, NarTop10Accuracy=0.6143, over 7143.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6348, over 5946.55 frames. ], batch size: 30, lr: 3.72e-03 +2024-08-06 11:53:42,825 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 11:53:53,935 INFO [trainer.py:811] (4/8) Epoch 23, validation: loss=3.236, NarTop10Accuracy=0.6739, over 1907754.00 frames. +2024-08-06 11:53:53,935 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 11:53:54,457 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.901e+02 2.047e+02 2.234e+02 4.368e+02, threshold=4.093e+02, percent-clipped=0.1 +2024-08-06 11:54:30,447 INFO [trainer.py:765] (4/8) Epoch 23, batch 1300, train_loss[loss=3.614, NarTop10Accuracy=0.6043, over 5004.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6336, over 6006.43 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 11:55:04,197 INFO [trainer.py:765] (4/8) Epoch 23, batch 1400, train_loss[loss=3.253, NarTop10Accuracy=0.6626, over 6109.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6343, over 6039.30 frames. ], batch size: 11, lr: 3.71e-03 +2024-08-06 11:55:35,398 INFO [trainer.py:765] (4/8) Epoch 23, batch 1500, train_loss[loss=3.647, NarTop10Accuracy=0.5914, over 5772.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6326, over 5980.91 frames. ], batch size: 49, lr: 3.71e-03 +2024-08-06 11:56:03,428 INFO [trainer.py:765] (4/8) Epoch 23, batch 1600, train_loss[loss=3.286, NarTop10Accuracy=0.6677, over 7159.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6335, over 5942.81 frames. ], batch size: 22, lr: 3.71e-03 +2024-08-06 11:56:30,202 INFO [trainer.py:765] (4/8) Epoch 23, batch 1700, train_loss[loss=3.803, NarTop10Accuracy=0.5595, over 6394.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6296, over 5940.14 frames. ], batch size: 13, lr: 3.70e-03 +2024-08-06 11:56:56,969 INFO [trainer.py:765] (4/8) Epoch 23, batch 1800, train_loss[loss=3.206, NarTop10Accuracy=0.6656, over 7181.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.633, over 6005.06 frames. ], batch size: 21, lr: 3.70e-03 +2024-08-06 11:57:23,597 INFO [trainer.py:765] (4/8) Epoch 23, batch 1900, train_loss[loss=3.457, NarTop10Accuracy=0.6337, over 5902.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6322, over 6039.92 frames. ], batch size: 51, lr: 3.70e-03 +2024-08-06 11:57:49,251 INFO [trainer.py:765] (4/8) Epoch 23, batch 2000, train_loss[loss=3.618, NarTop10Accuracy=0.5985, over 6315.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6323, over 6013.48 frames. ], batch size: 50, lr: 3.69e-03 +2024-08-06 11:58:14,770 INFO [trainer.py:765] (4/8) Epoch 23, batch 2100, train_loss[loss=3.767, NarTop10Accuracy=0.5583, over 3943.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6308, over 6006.74 frames. ], batch size: 4, lr: 3.69e-03 +2024-08-06 11:58:40,237 INFO [trainer.py:765] (4/8) Epoch 23, batch 2200, train_loss[loss=3.707, NarTop10Accuracy=0.57, over 7236.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6313, over 6040.43 frames. ], batch size: 30, lr: 3.69e-03 +2024-08-06 11:59:08,916 INFO [trainer.py:765] (4/8) Epoch 23, batch 2300, train_loss[loss=3.24, NarTop10Accuracy=0.6641, over 5672.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.63, over 6054.65 frames. ], batch size: 9, lr: 3.68e-03 +2024-08-06 11:59:33,601 INFO [trainer.py:765] (4/8) Epoch 23, batch 2400, train_loss[loss=3.054, NarTop10Accuracy=0.7108, over 5157.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6284, over 5874.27 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 11:59:57,011 INFO [trainer.py:765] (4/8) Epoch 23, batch 2500, train_loss[loss=3.019, NarTop10Accuracy=0.7165, over 5040.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6341, over 5538.56 frames. ], batch size: 6, lr: 3.68e-03 +2024-08-06 12:00:18,736 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 12:01:22,111 INFO [trainer.py:765] (4/8) Epoch 24, batch 100, train_loss[loss=3.708, NarTop10Accuracy=0.577, over 7532.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6428, over 2374.44 frames. ], batch size: 31, lr: 3.59e-03 +2024-08-06 12:01:51,342 INFO [trainer.py:765] (4/8) Epoch 24, batch 200, train_loss[loss=3.588, NarTop10Accuracy=0.6052, over 7004.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6448, over 3864.09 frames. ], batch size: 17, lr: 3.59e-03 +2024-08-06 12:02:23,513 INFO [trainer.py:765] (4/8) Epoch 24, batch 300, train_loss[loss=3.233, NarTop10Accuracy=0.6733, over 6983.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6437, over 4655.28 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 12:03:02,847 INFO [trainer.py:765] (4/8) Epoch 24, batch 400, train_loss[loss=3.321, NarTop10Accuracy=0.6542, over 5089.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6446, over 5122.11 frames. ], batch size: 7, lr: 3.59e-03 +2024-08-06 12:03:31,257 INFO [trainer.py:765] (4/8) Epoch 24, batch 500, train_loss[loss=3.22, NarTop10Accuracy=0.6873, over 6057.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6459, over 5407.48 frames. ], batch size: 11, lr: 3.58e-03 +2024-08-06 12:04:00,173 INFO [trainer.py:765] (4/8) Epoch 24, batch 600, train_loss[loss=3.445, NarTop10Accuracy=0.6291, over 5740.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6457, over 5659.71 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 12:04:12,531 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 12:04:22,775 INFO [trainer.py:811] (4/8) Epoch 24, validation: loss=3.282, NarTop10Accuracy=0.6644, over 1907754.00 frames. +2024-08-06 12:04:22,775 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 12:04:23,311 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 1.905e+02 2.071e+02 2.258e+02 3.709e+02, threshold=4.142e+02, percent-clipped=0.0 +2024-08-06 12:04:51,733 INFO [trainer.py:765] (4/8) Epoch 24, batch 700, train_loss[loss=3.118, NarTop10Accuracy=0.7011, over 4906.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6444, over 5739.47 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 12:05:21,275 INFO [trainer.py:765] (4/8) Epoch 24, batch 800, train_loss[loss=3.491, NarTop10Accuracy=0.6238, over 5101.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6422, over 5796.15 frames. ], batch size: 6, lr: 3.57e-03 +2024-08-06 12:05:51,754 INFO [trainer.py:765] (4/8) Epoch 24, batch 900, train_loss[loss=3.621, NarTop10Accuracy=0.5855, over 6670.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6412, over 5822.04 frames. ], batch size: 14, lr: 3.57e-03 +2024-08-06 12:06:32,812 INFO [trainer.py:765] (4/8) Epoch 24, batch 1000, train_loss[loss=3.295, NarTop10Accuracy=0.6646, over 6158.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.64, over 5909.25 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 12:07:09,040 INFO [trainer.py:765] (4/8) Epoch 24, batch 1100, train_loss[loss=3.19, NarTop10Accuracy=0.6754, over 6835.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6397, over 5959.30 frames. ], batch size: 17, lr: 3.56e-03 +2024-08-06 12:07:38,134 INFO [trainer.py:765] (4/8) Epoch 24, batch 1200, train_loss[loss=3.415, NarTop10Accuracy=0.6355, over 7454.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6365, over 5963.42 frames. ], batch size: 31, lr: 3.56e-03 +2024-08-06 12:08:20,731 INFO [trainer.py:765] (4/8) Epoch 24, batch 1300, train_loss[loss=3.26, NarTop10Accuracy=0.6617, over 5134.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6358, over 6023.56 frames. ], batch size: 6, lr: 3.56e-03 +2024-08-06 12:08:56,065 INFO [trainer.py:765] (4/8) Epoch 24, batch 1400, train_loss[loss=2.975, NarTop10Accuracy=0.7042, over 6174.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6339, over 6034.40 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 12:09:24,338 INFO [trainer.py:765] (4/8) Epoch 24, batch 1500, train_loss[loss=3.567, NarTop10Accuracy=0.5998, over 6507.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6342, over 5973.80 frames. ], batch size: 49, lr: 3.55e-03 +2024-08-06 12:09:52,525 INFO [trainer.py:765] (4/8) Epoch 24, batch 1600, train_loss[loss=3.386, NarTop10Accuracy=0.6392, over 7069.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6351, over 5952.06 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 12:10:22,548 INFO [trainer.py:765] (4/8) Epoch 24, batch 1700, train_loss[loss=3.52, NarTop10Accuracy=0.6137, over 6206.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6329, over 5946.12 frames. ], batch size: 13, lr: 3.55e-03 +2024-08-06 12:10:49,274 INFO [trainer.py:765] (4/8) Epoch 24, batch 1800, train_loss[loss=3.236, NarTop10Accuracy=0.6651, over 7141.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6349, over 6015.29 frames. ], batch size: 22, lr: 3.54e-03 +2024-08-06 12:11:15,847 INFO [trainer.py:765] (4/8) Epoch 24, batch 1900, train_loss[loss=3.439, NarTop10Accuracy=0.6243, over 6450.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6353, over 6041.71 frames. ], batch size: 49, lr: 3.54e-03 +2024-08-06 12:11:41,666 INFO [trainer.py:765] (4/8) Epoch 24, batch 2000, train_loss[loss=3.411, NarTop10Accuracy=0.6311, over 6434.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6356, over 6023.87 frames. ], batch size: 49, lr: 3.54e-03 +2024-08-06 12:12:07,104 INFO [trainer.py:765] (4/8) Epoch 24, batch 2100, train_loss[loss=3.053, NarTop10Accuracy=0.7046, over 4740.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6359, over 6010.29 frames. ], batch size: 5, lr: 3.54e-03 +2024-08-06 12:12:33,372 INFO [trainer.py:765] (4/8) Epoch 24, batch 2200, train_loss[loss=3.439, NarTop10Accuracy=0.6308, over 7371.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6356, over 6039.74 frames. ], batch size: 30, lr: 3.53e-03 +2024-08-06 12:12:58,772 INFO [trainer.py:765] (4/8) Epoch 24, batch 2300, train_loss[loss=3.452, NarTop10Accuracy=0.6249, over 5859.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6325, over 6068.36 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 12:13:23,487 INFO [trainer.py:765] (4/8) Epoch 24, batch 2400, train_loss[loss=3.276, NarTop10Accuracy=0.6659, over 5191.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6302, over 5875.96 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 12:13:47,005 INFO [trainer.py:765] (4/8) Epoch 24, batch 2500, train_loss[loss=3.428, NarTop10Accuracy=0.633, over 5107.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6368, over 5530.60 frames. ], batch size: 6, lr: 3.52e-03 +2024-08-06 12:14:08,186 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 12:14:50,196 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 12:15:00,657 INFO [trainer.py:811] (4/8) Epoch 25, validation: loss=3.279, NarTop10Accuracy=0.6656, over 1907754.00 frames. +2024-08-06 12:15:00,659 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 12:15:01,363 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.921e+02 2.068e+02 2.276e+02 6.228e+02, threshold=4.136e+02, percent-clipped=0.3 +2024-08-06 12:15:17,917 INFO [trainer.py:765] (4/8) Epoch 25, batch 100, train_loss[loss=3.283, NarTop10Accuracy=0.6684, over 7380.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6494, over 2371.71 frames. ], batch size: 31, lr: 3.45e-03 +2024-08-06 12:15:53,499 INFO [trainer.py:765] (4/8) Epoch 25, batch 200, train_loss[loss=3.356, NarTop10Accuracy=0.6484, over 6897.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.648, over 3875.19 frames. ], batch size: 17, lr: 3.44e-03 +2024-08-06 12:16:23,596 INFO [trainer.py:765] (4/8) Epoch 25, batch 300, train_loss[loss=3.334, NarTop10Accuracy=0.6526, over 6943.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6493, over 4673.69 frames. ], batch size: 22, lr: 3.44e-03 +2024-08-06 12:16:59,163 INFO [trainer.py:765] (4/8) Epoch 25, batch 400, train_loss[loss=3.254, NarTop10Accuracy=0.6717, over 5233.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6481, over 5121.41 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 12:17:32,096 INFO [trainer.py:765] (4/8) Epoch 25, batch 500, train_loss[loss=3.232, NarTop10Accuracy=0.6747, over 6219.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6487, over 5397.39 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 12:18:05,181 INFO [trainer.py:765] (4/8) Epoch 25, batch 600, train_loss[loss=3.212, NarTop10Accuracy=0.6809, over 5799.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6473, over 5670.91 frames. ], batch size: 9, lr: 3.43e-03 +2024-08-06 12:18:39,598 INFO [trainer.py:765] (4/8) Epoch 25, batch 700, train_loss[loss=3.154, NarTop10Accuracy=0.6881, over 5104.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6439, over 5727.74 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 12:19:16,014 INFO [trainer.py:765] (4/8) Epoch 25, batch 800, train_loss[loss=3.318, NarTop10Accuracy=0.6693, over 5148.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6423, over 5786.77 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 12:19:49,558 INFO [trainer.py:765] (4/8) Epoch 25, batch 900, train_loss[loss=3.159, NarTop10Accuracy=0.6873, over 6278.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6424, over 5811.87 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 12:20:23,876 INFO [trainer.py:765] (4/8) Epoch 25, batch 1000, train_loss[loss=3.346, NarTop10Accuracy=0.6589, over 6337.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6421, over 5908.06 frames. ], batch size: 13, lr: 3.42e-03 +2024-08-06 12:21:01,915 INFO [trainer.py:765] (4/8) Epoch 25, batch 1100, train_loss[loss=3.059, NarTop10Accuracy=0.6935, over 6817.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6395, over 5943.04 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 12:21:40,637 INFO [trainer.py:765] (4/8) Epoch 25, batch 1200, train_loss[loss=3.575, NarTop10Accuracy=0.6033, over 7304.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6401, over 5938.41 frames. ], batch size: 30, lr: 3.42e-03 +2024-08-06 12:22:11,838 INFO [trainer.py:765] (4/8) Epoch 25, batch 1300, train_loss[loss=3.587, NarTop10Accuracy=0.5986, over 5072.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6403, over 6000.69 frames. ], batch size: 6, lr: 3.41e-03 +2024-08-06 12:22:48,550 INFO [trainer.py:765] (4/8) Epoch 25, batch 1400, train_loss[loss=3.492, NarTop10Accuracy=0.6138, over 6154.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6404, over 6033.23 frames. ], batch size: 11, lr: 3.41e-03 +2024-08-06 12:23:21,655 INFO [trainer.py:765] (4/8) Epoch 25, batch 1500, train_loss[loss=3.852, NarTop10Accuracy=0.5408, over 5751.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6386, over 5969.53 frames. ], batch size: 49, lr: 3.41e-03 +2024-08-06 12:23:49,717 INFO [trainer.py:765] (4/8) Epoch 25, batch 1600, train_loss[loss=3.234, NarTop10Accuracy=0.6753, over 7339.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6373, over 5946.67 frames. ], batch size: 22, lr: 3.41e-03 +2024-08-06 12:24:16,372 INFO [trainer.py:765] (4/8) Epoch 25, batch 1700, train_loss[loss=3.577, NarTop10Accuracy=0.5928, over 6378.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.635, over 5934.97 frames. ], batch size: 13, lr: 3.40e-03 +2024-08-06 12:24:43,092 INFO [trainer.py:765] (4/8) Epoch 25, batch 1800, train_loss[loss=3.44, NarTop10Accuracy=0.6313, over 7080.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6357, over 5993.50 frames. ], batch size: 22, lr: 3.40e-03 +2024-08-06 12:25:09,776 INFO [trainer.py:765] (4/8) Epoch 25, batch 1900, train_loss[loss=3.562, NarTop10Accuracy=0.6069, over 6301.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.632, over 6031.02 frames. ], batch size: 51, lr: 3.40e-03 +2024-08-06 12:25:35,710 INFO [trainer.py:765] (4/8) Epoch 25, batch 2000, train_loss[loss=3.696, NarTop10Accuracy=0.5875, over 6388.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.636, over 6022.24 frames. ], batch size: 50, lr: 3.40e-03 +2024-08-06 12:25:47,854 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 12:25:58,846 INFO [trainer.py:811] (4/8) Epoch 25, validation: loss=3.265, NarTop10Accuracy=0.667, over 1907754.00 frames. +2024-08-06 12:25:58,847 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 12:25:59,344 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 1.947e+02 2.092e+02 2.280e+02 8.190e+02, threshold=4.185e+02, percent-clipped=0.2 +2024-08-06 12:26:12,224 INFO [trainer.py:765] (4/8) Epoch 25, batch 2100, train_loss[loss=3.159, NarTop10Accuracy=0.7067, over 3897.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6376, over 6013.35 frames. ], batch size: 4, lr: 3.39e-03 +2024-08-06 12:26:37,833 INFO [trainer.py:765] (4/8) Epoch 25, batch 2200, train_loss[loss=3.511, NarTop10Accuracy=0.6163, over 7211.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6352, over 6046.83 frames. ], batch size: 30, lr: 3.39e-03 +2024-08-06 12:27:03,344 INFO [trainer.py:765] (4/8) Epoch 25, batch 2300, train_loss[loss=3.56, NarTop10Accuracy=0.6045, over 5868.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6337, over 6082.98 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 12:27:28,151 INFO [trainer.py:765] (4/8) Epoch 25, batch 2400, train_loss[loss=3.173, NarTop10Accuracy=0.6726, over 5262.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6338, over 5880.16 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 12:27:51,732 INFO [trainer.py:765] (4/8) Epoch 25, batch 2500, train_loss[loss=3.555, NarTop10Accuracy=0.6109, over 4984.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6369, over 5535.31 frames. ], batch size: 6, lr: 3.38e-03 +2024-08-06 12:28:13,063 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 12:29:08,881 INFO [trainer.py:765] (4/8) Epoch 26, batch 100, train_loss[loss=3.496, NarTop10Accuracy=0.6121, over 7100.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6465, over 2363.65 frames. ], batch size: 30, lr: 3.31e-03 +2024-08-06 12:29:44,318 INFO [trainer.py:765] (4/8) Epoch 26, batch 200, train_loss[loss=3.267, NarTop10Accuracy=0.6667, over 6743.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6443, over 3870.08 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 12:30:19,754 INFO [trainer.py:765] (4/8) Epoch 26, batch 300, train_loss[loss=3.365, NarTop10Accuracy=0.6566, over 7087.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6465, over 4679.04 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 12:30:52,510 INFO [trainer.py:765] (4/8) Epoch 26, batch 400, train_loss[loss=3.182, NarTop10Accuracy=0.6788, over 5878.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6468, over 5140.62 frames. ], batch size: 8, lr: 3.30e-03 +2024-08-06 12:31:26,531 INFO [trainer.py:765] (4/8) Epoch 26, batch 500, train_loss[loss=3.227, NarTop10Accuracy=0.6782, over 6203.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6493, over 5417.88 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 12:31:59,782 INFO [trainer.py:765] (4/8) Epoch 26, batch 600, train_loss[loss=3.295, NarTop10Accuracy=0.6408, over 5809.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6465, over 5675.66 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 12:32:36,966 INFO [trainer.py:765] (4/8) Epoch 26, batch 700, train_loss[loss=3.265, NarTop10Accuracy=0.6573, over 5070.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6451, over 5733.40 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 12:33:10,809 INFO [trainer.py:765] (4/8) Epoch 26, batch 800, train_loss[loss=3.274, NarTop10Accuracy=0.65, over 5119.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.644, over 5790.99 frames. ], batch size: 6, lr: 3.29e-03 +2024-08-06 12:33:46,257 INFO [trainer.py:765] (4/8) Epoch 26, batch 900, train_loss[loss=3.625, NarTop10Accuracy=0.5913, over 6193.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6418, over 5808.06 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 12:34:22,902 INFO [trainer.py:765] (4/8) Epoch 26, batch 1000, train_loss[loss=3.167, NarTop10Accuracy=0.6844, over 6738.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.642, over 5916.57 frames. ], batch size: 14, lr: 3.29e-03 +2024-08-06 12:34:57,798 INFO [trainer.py:765] (4/8) Epoch 26, batch 1100, train_loss[loss=3.339, NarTop10Accuracy=0.6487, over 6808.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6422, over 5940.83 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 12:35:31,893 INFO [trainer.py:765] (4/8) Epoch 26, batch 1200, train_loss[loss=3.291, NarTop10Accuracy=0.656, over 7505.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6429, over 5944.53 frames. ], batch size: 31, lr: 3.28e-03 +2024-08-06 12:36:10,658 INFO [trainer.py:765] (4/8) Epoch 26, batch 1300, train_loss[loss=3.376, NarTop10Accuracy=0.64, over 4233.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6421, over 5999.64 frames. ], batch size: 5, lr: 3.28e-03 +2024-08-06 12:36:44,564 INFO [trainer.py:765] (4/8) Epoch 26, batch 1400, train_loss[loss=3.182, NarTop10Accuracy=0.6884, over 6202.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6386, over 6018.18 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 12:37:03,594 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 12:37:13,567 INFO [trainer.py:811] (4/8) Epoch 26, validation: loss=3.231, NarTop10Accuracy=0.6753, over 1907754.00 frames. +2024-08-06 12:37:13,568 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 12:37:14,078 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.928e+02 2.102e+02 2.299e+02 4.602e+02, threshold=4.203e+02, percent-clipped=0.2 +2024-08-06 12:37:23,028 INFO [trainer.py:765] (4/8) Epoch 26, batch 1500, train_loss[loss=3.734, NarTop10Accuracy=0.5681, over 6086.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6385, over 5963.08 frames. ], batch size: 49, lr: 3.28e-03 +2024-08-06 12:37:51,061 INFO [trainer.py:765] (4/8) Epoch 26, batch 1600, train_loss[loss=3.361, NarTop10Accuracy=0.6496, over 6950.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6391, over 5951.68 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:38:17,853 INFO [trainer.py:765] (4/8) Epoch 26, batch 1700, train_loss[loss=3.527, NarTop10Accuracy=0.6187, over 6274.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6356, over 5953.30 frames. ], batch size: 13, lr: 3.27e-03 +2024-08-06 12:38:44,384 INFO [trainer.py:765] (4/8) Epoch 26, batch 1800, train_loss[loss=3.322, NarTop10Accuracy=0.6526, over 7227.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6351, over 6015.21 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:39:10,952 INFO [trainer.py:765] (4/8) Epoch 26, batch 1900, train_loss[loss=3.73, NarTop10Accuracy=0.5684, over 6152.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.633, over 6045.89 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 12:39:36,610 INFO [trainer.py:765] (4/8) Epoch 26, batch 2000, train_loss[loss=3.475, NarTop10Accuracy=0.627, over 5715.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6339, over 6012.98 frames. ], batch size: 48, lr: 3.26e-03 +2024-08-06 12:40:02,148 INFO [trainer.py:765] (4/8) Epoch 26, batch 2100, train_loss[loss=3.527, NarTop10Accuracy=0.6024, over 3926.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6358, over 5998.72 frames. ], batch size: 4, lr: 3.26e-03 +2024-08-06 12:40:27,759 INFO [trainer.py:765] (4/8) Epoch 26, batch 2200, train_loss[loss=3.437, NarTop10Accuracy=0.6309, over 7258.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6361, over 6030.60 frames. ], batch size: 31, lr: 3.26e-03 +2024-08-06 12:40:53,233 INFO [trainer.py:765] (4/8) Epoch 26, batch 2300, train_loss[loss=3.262, NarTop10Accuracy=0.672, over 5842.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6357, over 6072.91 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 12:41:17,931 INFO [trainer.py:765] (4/8) Epoch 26, batch 2400, train_loss[loss=3.241, NarTop10Accuracy=0.6825, over 5257.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6347, over 5870.32 frames. ], batch size: 7, lr: 3.25e-03 +2024-08-06 12:41:44,478 INFO [trainer.py:765] (4/8) Epoch 26, batch 2500, train_loss[loss=3.158, NarTop10Accuracy=0.6884, over 5118.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6409, over 5528.10 frames. ], batch size: 6, lr: 3.25e-03 +2024-08-06 12:42:05,437 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 12:43:12,533 INFO [trainer.py:765] (4/8) Epoch 27, batch 100, train_loss[loss=3.613, NarTop10Accuracy=0.5951, over 7362.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6513, over 2366.25 frames. ], batch size: 32, lr: 3.19e-03 +2024-08-06 12:43:43,576 INFO [trainer.py:765] (4/8) Epoch 27, batch 200, train_loss[loss=3.559, NarTop10Accuracy=0.6014, over 7047.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6512, over 3860.95 frames. ], batch size: 17, lr: 3.18e-03 +2024-08-06 12:44:13,786 INFO [trainer.py:765] (4/8) Epoch 27, batch 300, train_loss[loss=3.304, NarTop10Accuracy=0.6627, over 7084.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6511, over 4681.66 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 12:44:50,461 INFO [trainer.py:765] (4/8) Epoch 27, batch 400, train_loss[loss=3.348, NarTop10Accuracy=0.6529, over 5165.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6516, over 5137.99 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 12:45:20,670 INFO [trainer.py:765] (4/8) Epoch 27, batch 500, train_loss[loss=3.114, NarTop10Accuracy=0.6991, over 6147.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6513, over 5400.18 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 12:45:55,260 INFO [trainer.py:765] (4/8) Epoch 27, batch 600, train_loss[loss=3.348, NarTop10Accuracy=0.6463, over 5838.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6489, over 5676.49 frames. ], batch size: 9, lr: 3.17e-03 +2024-08-06 12:46:26,747 INFO [trainer.py:765] (4/8) Epoch 27, batch 700, train_loss[loss=3.506, NarTop10Accuracy=0.6256, over 5135.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6482, over 5736.44 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:05,016 INFO [trainer.py:765] (4/8) Epoch 27, batch 800, train_loss[loss=3.237, NarTop10Accuracy=0.6821, over 5024.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6454, over 5810.22 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:32,742 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 12:47:42,765 INFO [trainer.py:811] (4/8) Epoch 27, validation: loss=3.258, NarTop10Accuracy=0.6695, over 1907754.00 frames. +2024-08-06 12:47:42,766 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 12:47:43,335 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 1.939e+02 2.100e+02 2.298e+02 4.859e+02, threshold=4.201e+02, percent-clipped=0.2 +2024-08-06 12:47:47,259 INFO [trainer.py:765] (4/8) Epoch 27, batch 900, train_loss[loss=3.512, NarTop10Accuracy=0.6252, over 6233.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6458, over 5835.80 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 12:48:22,862 INFO [trainer.py:765] (4/8) Epoch 27, batch 1000, train_loss[loss=3.186, NarTop10Accuracy=0.661, over 6198.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6432, over 5928.61 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 12:48:58,084 INFO [trainer.py:765] (4/8) Epoch 27, batch 1100, train_loss[loss=3.78, NarTop10Accuracy=0.5587, over 6942.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6426, over 5954.37 frames. ], batch size: 17, lr: 3.16e-03 +2024-08-06 12:49:34,896 INFO [trainer.py:765] (4/8) Epoch 27, batch 1200, train_loss[loss=3.275, NarTop10Accuracy=0.6591, over 7331.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6426, over 5952.80 frames. ], batch size: 31, lr: 3.16e-03 +2024-08-06 12:50:06,241 INFO [trainer.py:765] (4/8) Epoch 27, batch 1300, train_loss[loss=3.395, NarTop10Accuracy=0.6453, over 5077.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6437, over 6015.68 frames. ], batch size: 6, lr: 3.16e-03 +2024-08-06 12:50:42,949 INFO [trainer.py:765] (4/8) Epoch 27, batch 1400, train_loss[loss=3.306, NarTop10Accuracy=0.6567, over 6279.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6412, over 6029.08 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 12:51:11,277 INFO [trainer.py:765] (4/8) Epoch 27, batch 1500, train_loss[loss=3.385, NarTop10Accuracy=0.6401, over 5981.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6425, over 5973.22 frames. ], batch size: 49, lr: 3.15e-03 +2024-08-06 12:51:39,351 INFO [trainer.py:765] (4/8) Epoch 27, batch 1600, train_loss[loss=3.33, NarTop10Accuracy=0.6584, over 7122.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6408, over 5956.59 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:52:06,061 INFO [trainer.py:765] (4/8) Epoch 27, batch 1700, train_loss[loss=3.614, NarTop10Accuracy=0.6015, over 6344.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.64, over 5933.10 frames. ], batch size: 13, lr: 3.15e-03 +2024-08-06 12:52:32,667 INFO [trainer.py:765] (4/8) Epoch 27, batch 1800, train_loss[loss=3.37, NarTop10Accuracy=0.6441, over 7250.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6415, over 6017.66 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:53:02,288 INFO [trainer.py:765] (4/8) Epoch 27, batch 1900, train_loss[loss=3.794, NarTop10Accuracy=0.5585, over 5878.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6363, over 6056.69 frames. ], batch size: 50, lr: 3.14e-03 +2024-08-06 12:53:27,997 INFO [trainer.py:765] (4/8) Epoch 27, batch 2000, train_loss[loss=3.464, NarTop10Accuracy=0.6326, over 5548.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6375, over 6011.35 frames. ], batch size: 48, lr: 3.14e-03 +2024-08-06 12:53:53,537 INFO [trainer.py:765] (4/8) Epoch 27, batch 2100, train_loss[loss=3.62, NarTop10Accuracy=0.5879, over 4714.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6381, over 6002.58 frames. ], batch size: 5, lr: 3.14e-03 +2024-08-06 12:54:18,996 INFO [trainer.py:765] (4/8) Epoch 27, batch 2200, train_loss[loss=3.272, NarTop10Accuracy=0.6609, over 6924.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6386, over 6035.52 frames. ], batch size: 30, lr: 3.14e-03 +2024-08-06 12:54:44,479 INFO [trainer.py:765] (4/8) Epoch 27, batch 2300, train_loss[loss=3.245, NarTop10Accuracy=0.6652, over 5730.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6366, over 6064.65 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 12:55:09,217 INFO [trainer.py:765] (4/8) Epoch 27, batch 2400, train_loss[loss=3.305, NarTop10Accuracy=0.647, over 5210.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6324, over 5861.20 frames. ], batch size: 7, lr: 3.13e-03 +2024-08-06 12:55:32,726 INFO [trainer.py:765] (4/8) Epoch 27, batch 2500, train_loss[loss=3.304, NarTop10Accuracy=0.6615, over 5149.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6379, over 5539.61 frames. ], batch size: 6, lr: 3.13e-03 +2024-08-06 12:55:54,626 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 12:56:46,803 INFO [trainer.py:765] (4/8) Epoch 28, batch 100, train_loss[loss=3.06, NarTop10Accuracy=0.6992, over 7071.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6517, over 2391.11 frames. ], batch size: 30, lr: 3.07e-03 +2024-08-06 12:57:23,205 INFO [trainer.py:765] (4/8) Epoch 28, batch 200, train_loss[loss=3.25, NarTop10Accuracy=0.6699, over 6726.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6511, over 3877.14 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 12:57:55,704 INFO [trainer.py:765] (4/8) Epoch 28, batch 300, train_loss[loss=3.327, NarTop10Accuracy=0.6461, over 7186.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6471, over 4676.23 frames. ], batch size: 23, lr: 3.07e-03 +2024-08-06 12:57:56,458 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 12:58:06,828 INFO [trainer.py:811] (4/8) Epoch 28, validation: loss=3.275, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 12:58:06,829 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 12:58:07,333 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 1.944e+02 2.106e+02 2.298e+02 4.786e+02, threshold=4.211e+02, percent-clipped=0.1 +2024-08-06 12:58:34,932 INFO [trainer.py:765] (4/8) Epoch 28, batch 400, train_loss[loss=3.162, NarTop10Accuracy=0.6755, over 5103.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.647, over 5124.59 frames. ], batch size: 7, lr: 3.06e-03 +2024-08-06 12:59:11,438 INFO [trainer.py:765] (4/8) Epoch 28, batch 500, train_loss[loss=2.959, NarTop10Accuracy=0.7195, over 6161.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6492, over 5405.75 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 12:59:44,488 INFO [trainer.py:765] (4/8) Epoch 28, batch 600, train_loss[loss=3.174, NarTop10Accuracy=0.6945, over 5740.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6485, over 5658.54 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 13:00:20,013 INFO [trainer.py:765] (4/8) Epoch 28, batch 700, train_loss[loss=3.348, NarTop10Accuracy=0.6476, over 4942.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6482, over 5730.86 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 13:00:56,434 INFO [trainer.py:765] (4/8) Epoch 28, batch 800, train_loss[loss=3.625, NarTop10Accuracy=0.5965, over 5179.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.647, over 5779.33 frames. ], batch size: 6, lr: 3.05e-03 +2024-08-06 13:01:31,043 INFO [trainer.py:765] (4/8) Epoch 28, batch 900, train_loss[loss=3.217, NarTop10Accuracy=0.6744, over 6321.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6462, over 5812.92 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 13:02:06,495 INFO [trainer.py:765] (4/8) Epoch 28, batch 1000, train_loss[loss=3.62, NarTop10Accuracy=0.5944, over 6187.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6439, over 5920.51 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 13:02:41,230 INFO [trainer.py:765] (4/8) Epoch 28, batch 1100, train_loss[loss=3.212, NarTop10Accuracy=0.679, over 6836.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6437, over 5958.04 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 13:03:16,896 INFO [trainer.py:765] (4/8) Epoch 28, batch 1200, train_loss[loss=3.357, NarTop10Accuracy=0.652, over 7358.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6425, over 5949.04 frames. ], batch size: 31, lr: 3.05e-03 +2024-08-06 13:03:54,154 INFO [trainer.py:765] (4/8) Epoch 28, batch 1300, train_loss[loss=3.251, NarTop10Accuracy=0.6711, over 4999.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6422, over 6006.87 frames. ], batch size: 6, lr: 3.04e-03 +2024-08-06 13:04:28,713 INFO [trainer.py:765] (4/8) Epoch 28, batch 1400, train_loss[loss=3.579, NarTop10Accuracy=0.6021, over 6006.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6402, over 6022.57 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 13:05:02,350 INFO [trainer.py:765] (4/8) Epoch 28, batch 1500, train_loss[loss=3.524, NarTop10Accuracy=0.6248, over 6088.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6413, over 5964.60 frames. ], batch size: 49, lr: 3.04e-03 +2024-08-06 13:05:30,371 INFO [trainer.py:765] (4/8) Epoch 28, batch 1600, train_loss[loss=3.584, NarTop10Accuracy=0.5995, over 7152.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6409, over 5947.96 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 13:05:57,130 INFO [trainer.py:765] (4/8) Epoch 28, batch 1700, train_loss[loss=3.727, NarTop10Accuracy=0.5774, over 6236.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.642, over 5935.89 frames. ], batch size: 13, lr: 3.04e-03 +2024-08-06 13:06:23,732 INFO [trainer.py:765] (4/8) Epoch 28, batch 1800, train_loss[loss=3.533, NarTop10Accuracy=0.5983, over 7168.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6425, over 6011.39 frames. ], batch size: 22, lr: 3.03e-03 +2024-08-06 13:06:50,373 INFO [trainer.py:765] (4/8) Epoch 28, batch 1900, train_loss[loss=3.533, NarTop10Accuracy=0.6186, over 6514.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6401, over 6042.16 frames. ], batch size: 49, lr: 3.03e-03 +2024-08-06 13:07:16,116 INFO [trainer.py:765] (4/8) Epoch 28, batch 2000, train_loss[loss=3.431, NarTop10Accuracy=0.6319, over 5708.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6417, over 6022.55 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 13:07:41,547 INFO [trainer.py:765] (4/8) Epoch 28, batch 2100, train_loss[loss=3.571, NarTop10Accuracy=0.6078, over 4865.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6382, over 6010.35 frames. ], batch size: 5, lr: 3.03e-03 +2024-08-06 13:08:06,932 INFO [trainer.py:765] (4/8) Epoch 28, batch 2200, train_loss[loss=3.676, NarTop10Accuracy=0.5874, over 7311.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6375, over 6041.36 frames. ], batch size: 31, lr: 3.02e-03 +2024-08-06 13:08:32,388 INFO [trainer.py:765] (4/8) Epoch 28, batch 2300, train_loss[loss=3.247, NarTop10Accuracy=0.6828, over 5748.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6361, over 6070.75 frames. ], batch size: 9, lr: 3.02e-03 +2024-08-06 13:08:33,137 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 13:08:43,385 INFO [trainer.py:811] (4/8) Epoch 28, validation: loss=3.224, NarTop10Accuracy=0.676, over 1907754.00 frames. +2024-08-06 13:08:43,386 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 13:08:43,890 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 1.997e+02 2.131e+02 2.314e+02 6.875e+02, threshold=4.261e+02, percent-clipped=0.5 +2024-08-06 13:09:07,390 INFO [trainer.py:765] (4/8) Epoch 28, batch 2400, train_loss[loss=3.588, NarTop10Accuracy=0.5963, over 5040.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6323, over 5884.99 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 13:09:30,781 INFO [trainer.py:765] (4/8) Epoch 28, batch 2500, train_loss[loss=3.592, NarTop10Accuracy=0.6125, over 4996.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6398, over 5542.35 frames. ], batch size: 6, lr: 3.02e-03 +2024-08-06 13:09:52,030 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 13:10:48,193 INFO [trainer.py:765] (4/8) Epoch 29, batch 100, train_loss[loss=3.592, NarTop10Accuracy=0.5904, over 7448.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.656, over 2369.95 frames. ], batch size: 31, lr: 2.96e-03 +2024-08-06 13:11:20,841 INFO [trainer.py:765] (4/8) Epoch 29, batch 200, train_loss[loss=3.45, NarTop10Accuracy=0.6377, over 6923.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6531, over 3869.78 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 13:11:56,950 INFO [trainer.py:765] (4/8) Epoch 29, batch 300, train_loss[loss=3.23, NarTop10Accuracy=0.6699, over 7167.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6513, over 4679.46 frames. ], batch size: 22, lr: 2.96e-03 +2024-08-06 13:12:29,716 INFO [trainer.py:765] (4/8) Epoch 29, batch 400, train_loss[loss=2.947, NarTop10Accuracy=0.7098, over 5111.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6523, over 5129.63 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 13:12:59,921 INFO [trainer.py:765] (4/8) Epoch 29, batch 500, train_loss[loss=3.342, NarTop10Accuracy=0.6539, over 6201.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6503, over 5395.92 frames. ], batch size: 11, lr: 2.95e-03 +2024-08-06 13:13:33,547 INFO [trainer.py:765] (4/8) Epoch 29, batch 600, train_loss[loss=3.347, NarTop10Accuracy=0.648, over 5872.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.65, over 5675.95 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 13:14:09,937 INFO [trainer.py:765] (4/8) Epoch 29, batch 700, train_loss[loss=3.648, NarTop10Accuracy=0.5968, over 5068.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6485, over 5733.87 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:14:46,677 INFO [trainer.py:765] (4/8) Epoch 29, batch 800, train_loss[loss=3.345, NarTop10Accuracy=0.6386, over 4949.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6442, over 5787.43 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:15:17,114 INFO [trainer.py:765] (4/8) Epoch 29, batch 900, train_loss[loss=3.307, NarTop10Accuracy=0.6605, over 6152.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6462, over 5805.67 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 13:15:59,364 INFO [trainer.py:765] (4/8) Epoch 29, batch 1000, train_loss[loss=3.598, NarTop10Accuracy=0.6087, over 6248.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6467, over 5917.42 frames. ], batch size: 13, lr: 2.94e-03 +2024-08-06 13:16:31,713 INFO [trainer.py:765] (4/8) Epoch 29, batch 1100, train_loss[loss=3.463, NarTop10Accuracy=0.6206, over 6800.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6439, over 5944.29 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 13:17:04,933 INFO [trainer.py:765] (4/8) Epoch 29, batch 1200, train_loss[loss=3.488, NarTop10Accuracy=0.6177, over 7367.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6435, over 5940.26 frames. ], batch size: 31, lr: 2.94e-03 +2024-08-06 13:17:43,957 INFO [trainer.py:765] (4/8) Epoch 29, batch 1300, train_loss[loss=3.329, NarTop10Accuracy=0.6437, over 5035.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6428, over 6012.42 frames. ], batch size: 6, lr: 2.94e-03 +2024-08-06 13:18:17,924 INFO [trainer.py:765] (4/8) Epoch 29, batch 1400, train_loss[loss=3.53, NarTop10Accuracy=0.612, over 6148.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6425, over 6026.40 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 13:18:48,306 INFO [trainer.py:765] (4/8) Epoch 29, batch 1500, train_loss[loss=3.754, NarTop10Accuracy=0.5666, over 6343.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6422, over 5972.87 frames. ], batch size: 49, lr: 2.93e-03 +2024-08-06 13:19:16,409 INFO [trainer.py:765] (4/8) Epoch 29, batch 1600, train_loss[loss=3.244, NarTop10Accuracy=0.6718, over 6981.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.641, over 5949.89 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:19:43,243 INFO [trainer.py:765] (4/8) Epoch 29, batch 1700, train_loss[loss=3.151, NarTop10Accuracy=0.6889, over 6241.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6402, over 5938.63 frames. ], batch size: 13, lr: 2.93e-03 +2024-08-06 13:19:49,091 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 13:19:59,386 INFO [trainer.py:811] (4/8) Epoch 29, validation: loss=3.233, NarTop10Accuracy=0.6754, over 1907754.00 frames. +2024-08-06 13:19:59,387 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 13:19:59,903 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.964e+02 2.123e+02 2.299e+02 5.520e+02, threshold=4.246e+02, percent-clipped=0.2 +2024-08-06 13:20:20,109 INFO [trainer.py:765] (4/8) Epoch 29, batch 1800, train_loss[loss=3.265, NarTop10Accuracy=0.6669, over 7179.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6419, over 6011.26 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:20:46,845 INFO [trainer.py:765] (4/8) Epoch 29, batch 1900, train_loss[loss=3.452, NarTop10Accuracy=0.6338, over 6604.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6391, over 6046.08 frames. ], batch size: 51, lr: 2.93e-03 +2024-08-06 13:21:12,479 INFO [trainer.py:765] (4/8) Epoch 29, batch 2000, train_loss[loss=3.646, NarTop10Accuracy=0.592, over 5752.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6395, over 6006.71 frames. ], batch size: 49, lr: 2.92e-03 +2024-08-06 13:21:37,983 INFO [trainer.py:765] (4/8) Epoch 29, batch 2100, train_loss[loss=3.576, NarTop10Accuracy=0.6012, over 3997.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6386, over 5991.41 frames. ], batch size: 4, lr: 2.92e-03 +2024-08-06 13:22:03,360 INFO [trainer.py:765] (4/8) Epoch 29, batch 2200, train_loss[loss=3.335, NarTop10Accuracy=0.6566, over 6996.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6404, over 6024.92 frames. ], batch size: 30, lr: 2.92e-03 +2024-08-06 13:22:28,832 INFO [trainer.py:765] (4/8) Epoch 29, batch 2300, train_loss[loss=3.317, NarTop10Accuracy=0.6554, over 5855.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6387, over 6066.01 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 13:22:53,621 INFO [trainer.py:765] (4/8) Epoch 29, batch 2400, train_loss[loss=3.458, NarTop10Accuracy=0.6293, over 5155.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6368, over 5876.28 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 13:23:16,979 INFO [trainer.py:765] (4/8) Epoch 29, batch 2500, train_loss[loss=3.474, NarTop10Accuracy=0.6226, over 4840.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6434, over 5535.86 frames. ], batch size: 6, lr: 2.91e-03 +2024-08-06 13:23:38,025 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 13:24:38,391 INFO [trainer.py:765] (4/8) Epoch 30, batch 100, train_loss[loss=3.323, NarTop10Accuracy=0.6548, over 7279.00 frames. ], tot_loss[loss=3.278, NarTop10Accuracy=0.6636, over 2355.27 frames. ], batch size: 30, lr: 2.86e-03 +2024-08-06 13:25:14,782 INFO [trainer.py:765] (4/8) Epoch 30, batch 200, train_loss[loss=3.225, NarTop10Accuracy=0.6724, over 6956.00 frames. ], tot_loss[loss=3.296, NarTop10Accuracy=0.6602, over 3852.07 frames. ], batch size: 17, lr: 2.86e-03 +2024-08-06 13:25:46,846 INFO [trainer.py:765] (4/8) Epoch 30, batch 300, train_loss[loss=3.175, NarTop10Accuracy=0.6856, over 7222.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.657, over 4664.53 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 13:26:17,539 INFO [trainer.py:765] (4/8) Epoch 30, batch 400, train_loss[loss=3.222, NarTop10Accuracy=0.6788, over 5074.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6543, over 5131.46 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 13:26:53,919 INFO [trainer.py:765] (4/8) Epoch 30, batch 500, train_loss[loss=3.24, NarTop10Accuracy=0.6702, over 6162.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6573, over 5417.09 frames. ], batch size: 11, lr: 2.85e-03 +2024-08-06 13:27:25,422 INFO [trainer.py:765] (4/8) Epoch 30, batch 600, train_loss[loss=3.097, NarTop10Accuracy=0.7031, over 5834.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6559, over 5673.12 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 13:28:00,307 INFO [trainer.py:765] (4/8) Epoch 30, batch 700, train_loss[loss=3.236, NarTop10Accuracy=0.6553, over 5034.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6539, over 5752.38 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:28:37,477 INFO [trainer.py:765] (4/8) Epoch 30, batch 800, train_loss[loss=3.413, NarTop10Accuracy=0.64, over 5014.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.65, over 5810.83 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:29:10,425 INFO [trainer.py:765] (4/8) Epoch 30, batch 900, train_loss[loss=3.328, NarTop10Accuracy=0.6516, over 6832.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6478, over 5838.29 frames. ], batch size: 14, lr: 2.85e-03 +2024-08-06 13:29:45,914 INFO [trainer.py:765] (4/8) Epoch 30, batch 1000, train_loss[loss=3.561, NarTop10Accuracy=0.6075, over 6348.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6461, over 5929.34 frames. ], batch size: 13, lr: 2.84e-03 +2024-08-06 13:30:24,172 INFO [trainer.py:765] (4/8) Epoch 30, batch 1100, train_loss[loss=3.438, NarTop10Accuracy=0.6293, over 6880.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6449, over 5965.56 frames. ], batch size: 17, lr: 2.84e-03 +2024-08-06 13:30:38,002 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 13:30:48,195 INFO [trainer.py:811] (4/8) Epoch 30, validation: loss=3.239, NarTop10Accuracy=0.6729, over 1907754.00 frames. +2024-08-06 13:30:48,196 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 13:30:48,916 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 1.985e+02 2.139e+02 2.326e+02 4.628e+02, threshold=4.279e+02, percent-clipped=0.1 +2024-08-06 13:31:05,665 INFO [trainer.py:765] (4/8) Epoch 30, batch 1200, train_loss[loss=3.322, NarTop10Accuracy=0.6471, over 7561.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6441, over 5964.13 frames. ], batch size: 32, lr: 2.84e-03 +2024-08-06 13:31:43,020 INFO [trainer.py:765] (4/8) Epoch 30, batch 1300, train_loss[loss=3.362, NarTop10Accuracy=0.6468, over 4989.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6441, over 6020.26 frames. ], batch size: 6, lr: 2.84e-03 +2024-08-06 13:32:19,325 INFO [trainer.py:765] (4/8) Epoch 30, batch 1400, train_loss[loss=3.585, NarTop10Accuracy=0.594, over 6000.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.641, over 6028.10 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 13:32:52,335 INFO [trainer.py:765] (4/8) Epoch 30, batch 1500, train_loss[loss=3.648, NarTop10Accuracy=0.5837, over 5938.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6406, over 5951.32 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:33:20,407 INFO [trainer.py:765] (4/8) Epoch 30, batch 1600, train_loss[loss=3.84, NarTop10Accuracy=0.5519, over 7332.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.64, over 5944.82 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:33:47,199 INFO [trainer.py:765] (4/8) Epoch 30, batch 1700, train_loss[loss=3.718, NarTop10Accuracy=0.5715, over 6245.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6396, over 5921.84 frames. ], batch size: 13, lr: 2.83e-03 +2024-08-06 13:34:13,887 INFO [trainer.py:765] (4/8) Epoch 30, batch 1800, train_loss[loss=3.599, NarTop10Accuracy=0.5974, over 6905.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6416, over 5999.81 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:34:40,546 INFO [trainer.py:765] (4/8) Epoch 30, batch 1900, train_loss[loss=3.546, NarTop10Accuracy=0.6085, over 6005.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6388, over 6032.43 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:35:06,315 INFO [trainer.py:765] (4/8) Epoch 30, batch 2000, train_loss[loss=3.745, NarTop10Accuracy=0.5711, over 5745.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6409, over 6014.68 frames. ], batch size: 48, lr: 2.83e-03 +2024-08-06 13:35:31,871 INFO [trainer.py:765] (4/8) Epoch 30, batch 2100, train_loss[loss=3.401, NarTop10Accuracy=0.635, over 4747.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6391, over 6008.50 frames. ], batch size: 5, lr: 2.82e-03 +2024-08-06 13:36:00,553 INFO [trainer.py:765] (4/8) Epoch 30, batch 2200, train_loss[loss=3.388, NarTop10Accuracy=0.6406, over 7295.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6405, over 6044.58 frames. ], batch size: 31, lr: 2.82e-03 +2024-08-06 13:36:26,029 INFO [trainer.py:765] (4/8) Epoch 30, batch 2300, train_loss[loss=3.24, NarTop10Accuracy=0.6635, over 5719.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6421, over 6057.29 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 13:36:50,824 INFO [trainer.py:765] (4/8) Epoch 30, batch 2400, train_loss[loss=3.245, NarTop10Accuracy=0.6749, over 5132.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.639, over 5865.67 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 13:37:14,388 INFO [trainer.py:765] (4/8) Epoch 30, batch 2500, train_loss[loss=3.267, NarTop10Accuracy=0.6708, over 4934.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6455, over 5527.01 frames. ], batch size: 6, lr: 2.82e-03 +2024-08-06 13:37:35,761 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 13:38:28,438 INFO [trainer.py:765] (4/8) Epoch 31, batch 100, train_loss[loss=3.21, NarTop10Accuracy=0.6801, over 7432.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6594, over 2387.31 frames. ], batch size: 31, lr: 2.77e-03 +2024-08-06 13:39:02,651 INFO [trainer.py:765] (4/8) Epoch 31, batch 200, train_loss[loss=3.174, NarTop10Accuracy=0.6865, over 6849.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6567, over 3873.32 frames. ], batch size: 17, lr: 2.76e-03 +2024-08-06 13:39:34,676 INFO [trainer.py:765] (4/8) Epoch 31, batch 300, train_loss[loss=3.345, NarTop10Accuracy=0.6642, over 7273.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.658, over 4672.35 frames. ], batch size: 22, lr: 2.76e-03 +2024-08-06 13:40:07,363 INFO [trainer.py:765] (4/8) Epoch 31, batch 400, train_loss[loss=3.629, NarTop10Accuracy=0.5913, over 5059.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6538, over 5101.34 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 13:40:37,813 INFO [trainer.py:765] (4/8) Epoch 31, batch 500, train_loss[loss=3.277, NarTop10Accuracy=0.6621, over 6149.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6546, over 5391.39 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 13:40:58,299 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 13:41:08,777 INFO [trainer.py:811] (4/8) Epoch 31, validation: loss=3.268, NarTop10Accuracy=0.6673, over 1907754.00 frames. +2024-08-06 13:41:08,778 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 13:41:09,338 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 1.987e+02 2.143e+02 2.328e+02 4.341e+02, threshold=4.287e+02, percent-clipped=0.1 +2024-08-06 13:41:20,862 INFO [trainer.py:765] (4/8) Epoch 31, batch 600, train_loss[loss=3.147, NarTop10Accuracy=0.6871, over 5756.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6521, over 5678.95 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 13:41:54,259 INFO [trainer.py:765] (4/8) Epoch 31, batch 700, train_loss[loss=2.835, NarTop10Accuracy=0.7477, over 5123.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6502, over 5742.67 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 13:42:32,158 INFO [trainer.py:765] (4/8) Epoch 31, batch 800, train_loss[loss=3.231, NarTop10Accuracy=0.6771, over 4980.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6489, over 5794.09 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:43:06,274 INFO [trainer.py:765] (4/8) Epoch 31, batch 900, train_loss[loss=3.161, NarTop10Accuracy=0.6785, over 6616.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6522, over 5832.39 frames. ], batch size: 14, lr: 2.75e-03 +2024-08-06 13:43:38,009 INFO [trainer.py:765] (4/8) Epoch 31, batch 1000, train_loss[loss=3.455, NarTop10Accuracy=0.6242, over 6164.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6491, over 5931.83 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 13:44:14,513 INFO [trainer.py:765] (4/8) Epoch 31, batch 1100, train_loss[loss=3.424, NarTop10Accuracy=0.6337, over 6827.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6483, over 5961.79 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 13:44:53,786 INFO [trainer.py:765] (4/8) Epoch 31, batch 1200, train_loss[loss=3.315, NarTop10Accuracy=0.6524, over 7547.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6456, over 5980.10 frames. ], batch size: 31, lr: 2.75e-03 +2024-08-06 13:45:25,076 INFO [trainer.py:765] (4/8) Epoch 31, batch 1300, train_loss[loss=3.581, NarTop10Accuracy=0.6176, over 5063.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6452, over 6042.20 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:45:58,740 INFO [trainer.py:765] (4/8) Epoch 31, batch 1400, train_loss[loss=3.098, NarTop10Accuracy=0.71, over 6179.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6435, over 6037.27 frames. ], batch size: 11, lr: 2.74e-03 +2024-08-06 13:46:33,490 INFO [trainer.py:765] (4/8) Epoch 31, batch 1500, train_loss[loss=3.597, NarTop10Accuracy=0.5982, over 5908.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6448, over 5966.20 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 13:47:04,657 INFO [trainer.py:765] (4/8) Epoch 31, batch 1600, train_loss[loss=3.249, NarTop10Accuracy=0.6667, over 7058.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6445, over 5945.19 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 13:47:31,423 INFO [trainer.py:765] (4/8) Epoch 31, batch 1700, train_loss[loss=3.504, NarTop10Accuracy=0.6229, over 6221.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.645, over 5923.58 frames. ], batch size: 13, lr: 2.74e-03 +2024-08-06 13:47:58,016 INFO [trainer.py:765] (4/8) Epoch 31, batch 1800, train_loss[loss=3.633, NarTop10Accuracy=0.5789, over 7062.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6446, over 5975.63 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 13:48:24,576 INFO [trainer.py:765] (4/8) Epoch 31, batch 1900, train_loss[loss=3.564, NarTop10Accuracy=0.615, over 6506.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6442, over 6023.90 frames. ], batch size: 50, lr: 2.74e-03 +2024-08-06 13:48:50,258 INFO [trainer.py:765] (4/8) Epoch 31, batch 2000, train_loss[loss=3.67, NarTop10Accuracy=0.5769, over 5846.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6464, over 5999.28 frames. ], batch size: 49, lr: 2.73e-03 +2024-08-06 13:49:15,764 INFO [trainer.py:765] (4/8) Epoch 31, batch 2100, train_loss[loss=3.123, NarTop10Accuracy=0.6859, over 3826.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6448, over 5988.45 frames. ], batch size: 4, lr: 2.73e-03 +2024-08-06 13:49:41,278 INFO [trainer.py:765] (4/8) Epoch 31, batch 2200, train_loss[loss=3.342, NarTop10Accuracy=0.6457, over 7197.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6452, over 6035.20 frames. ], batch size: 30, lr: 2.73e-03 +2024-08-06 13:50:06,708 INFO [trainer.py:765] (4/8) Epoch 31, batch 2300, train_loss[loss=3.551, NarTop10Accuracy=0.6156, over 5753.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6407, over 6055.52 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 13:50:31,392 INFO [trainer.py:765] (4/8) Epoch 31, batch 2400, train_loss[loss=3.143, NarTop10Accuracy=0.6891, over 5139.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6376, over 5870.47 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 13:50:54,891 INFO [trainer.py:765] (4/8) Epoch 31, batch 2500, train_loss[loss=3.27, NarTop10Accuracy=0.6474, over 5125.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6432, over 5543.57 frames. ], batch size: 6, lr: 2.72e-03 +2024-08-06 13:51:08,993 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 13:51:19,069 INFO [trainer.py:811] (4/8) Epoch 31, validation: loss=3.234, NarTop10Accuracy=0.6746, over 1907754.00 frames. +2024-08-06 13:51:19,070 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 13:51:19,540 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.007e+02 2.182e+02 2.368e+02 4.565e+02, threshold=4.363e+02, percent-clipped=0.1 +2024-08-06 13:51:26,437 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 13:52:19,911 INFO [trainer.py:765] (4/8) Epoch 32, batch 100, train_loss[loss=3.249, NarTop10Accuracy=0.6661, over 7063.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6554, over 2383.32 frames. ], batch size: 31, lr: 2.68e-03 +2024-08-06 13:52:52,539 INFO [trainer.py:765] (4/8) Epoch 32, batch 200, train_loss[loss=3.603, NarTop10Accuracy=0.6075, over 6730.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6529, over 3873.78 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 13:53:28,094 INFO [trainer.py:765] (4/8) Epoch 32, batch 300, train_loss[loss=3.362, NarTop10Accuracy=0.6527, over 7112.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6524, over 4685.45 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 13:54:00,887 INFO [trainer.py:765] (4/8) Epoch 32, batch 400, train_loss[loss=3.325, NarTop10Accuracy=0.6403, over 5063.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6516, over 5134.45 frames. ], batch size: 7, lr: 2.67e-03 +2024-08-06 13:54:32,823 INFO [trainer.py:765] (4/8) Epoch 32, batch 500, train_loss[loss=3.277, NarTop10Accuracy=0.6657, over 6097.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.654, over 5400.88 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 13:55:01,773 INFO [trainer.py:765] (4/8) Epoch 32, batch 600, train_loss[loss=3.125, NarTop10Accuracy=0.6913, over 5877.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.656, over 5677.82 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 13:55:41,512 INFO [trainer.py:765] (4/8) Epoch 32, batch 700, train_loss[loss=3.227, NarTop10Accuracy=0.6646, over 4972.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6524, over 5743.96 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:13,174 INFO [trainer.py:765] (4/8) Epoch 32, batch 800, train_loss[loss=3.188, NarTop10Accuracy=0.6846, over 5072.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6525, over 5802.83 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:43,167 INFO [trainer.py:765] (4/8) Epoch 32, batch 900, train_loss[loss=3.668, NarTop10Accuracy=0.5907, over 6680.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6521, over 5809.58 frames. ], batch size: 14, lr: 2.67e-03 +2024-08-06 13:57:24,521 INFO [trainer.py:765] (4/8) Epoch 32, batch 1000, train_loss[loss=3.748, NarTop10Accuracy=0.5593, over 6696.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6476, over 5913.63 frames. ], batch size: 14, lr: 2.66e-03 +2024-08-06 13:57:57,453 INFO [trainer.py:765] (4/8) Epoch 32, batch 1100, train_loss[loss=3.135, NarTop10Accuracy=0.69, over 6889.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6446, over 5956.83 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 13:58:30,542 INFO [trainer.py:765] (4/8) Epoch 32, batch 1200, train_loss[loss=3.278, NarTop10Accuracy=0.6702, over 7479.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6459, over 5956.31 frames. ], batch size: 31, lr: 2.66e-03 +2024-08-06 13:59:08,260 INFO [trainer.py:765] (4/8) Epoch 32, batch 1300, train_loss[loss=3.28, NarTop10Accuracy=0.6466, over 5082.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6443, over 6019.42 frames. ], batch size: 6, lr: 2.66e-03 +2024-08-06 13:59:42,266 INFO [trainer.py:765] (4/8) Epoch 32, batch 1400, train_loss[loss=3.274, NarTop10Accuracy=0.6708, over 6181.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6451, over 6041.89 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 14:00:12,976 INFO [trainer.py:765] (4/8) Epoch 32, batch 1500, train_loss[loss=3.72, NarTop10Accuracy=0.5698, over 5673.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6442, over 5989.35 frames. ], batch size: 49, lr: 2.66e-03 +2024-08-06 14:00:40,824 INFO [trainer.py:765] (4/8) Epoch 32, batch 1600, train_loss[loss=3.218, NarTop10Accuracy=0.6665, over 7104.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6441, over 5971.29 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:01:07,534 INFO [trainer.py:765] (4/8) Epoch 32, batch 1700, train_loss[loss=3.273, NarTop10Accuracy=0.6539, over 6200.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6431, over 5945.66 frames. ], batch size: 13, lr: 2.65e-03 +2024-08-06 14:01:34,089 INFO [trainer.py:765] (4/8) Epoch 32, batch 1800, train_loss[loss=3.163, NarTop10Accuracy=0.6866, over 7116.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6435, over 6003.14 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:02:00,636 INFO [trainer.py:765] (4/8) Epoch 32, batch 1900, train_loss[loss=3.515, NarTop10Accuracy=0.6221, over 6256.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6417, over 6051.16 frames. ], batch size: 48, lr: 2.65e-03 +2024-08-06 14:02:20,591 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 14:02:30,653 INFO [trainer.py:811] (4/8) Epoch 32, validation: loss=3.204, NarTop10Accuracy=0.6812, over 1907754.00 frames. +2024-08-06 14:02:30,653 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 14:02:31,152 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.032e+02 2.200e+02 2.392e+02 6.182e+02, threshold=4.401e+02, percent-clipped=0.1 +2024-08-06 14:02:36,383 INFO [trainer.py:765] (4/8) Epoch 32, batch 2000, train_loss[loss=3.655, NarTop10Accuracy=0.5867, over 5954.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6441, over 6050.77 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 14:03:01,697 INFO [trainer.py:765] (4/8) Epoch 32, batch 2100, train_loss[loss=3.299, NarTop10Accuracy=0.6508, over 4059.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6431, over 6020.45 frames. ], batch size: 4, lr: 2.65e-03 +2024-08-06 14:03:27,176 INFO [trainer.py:765] (4/8) Epoch 32, batch 2200, train_loss[loss=3.572, NarTop10Accuracy=0.6001, over 7378.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6434, over 6061.12 frames. ], batch size: 32, lr: 2.64e-03 +2024-08-06 14:03:52,585 INFO [trainer.py:765] (4/8) Epoch 32, batch 2300, train_loss[loss=3.64, NarTop10Accuracy=0.5954, over 5756.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6421, over 6075.53 frames. ], batch size: 9, lr: 2.64e-03 +2024-08-06 14:04:17,274 INFO [trainer.py:765] (4/8) Epoch 32, batch 2400, train_loss[loss=3.267, NarTop10Accuracy=0.6595, over 5219.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6408, over 5886.32 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 14:04:40,635 INFO [trainer.py:765] (4/8) Epoch 32, batch 2500, train_loss[loss=3.401, NarTop10Accuracy=0.6435, over 4305.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6469, over 5539.45 frames. ], batch size: 5, lr: 2.64e-03 +2024-08-06 14:05:02,193 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 14:06:02,906 INFO [trainer.py:765] (4/8) Epoch 33, batch 100, train_loss[loss=3.592, NarTop10Accuracy=0.6076, over 7255.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6537, over 2380.48 frames. ], batch size: 30, lr: 2.60e-03 +2024-08-06 14:06:36,079 INFO [trainer.py:765] (4/8) Epoch 33, batch 200, train_loss[loss=3.369, NarTop10Accuracy=0.64, over 6838.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6584, over 3898.24 frames. ], batch size: 17, lr: 2.59e-03 +2024-08-06 14:07:12,146 INFO [trainer.py:765] (4/8) Epoch 33, batch 300, train_loss[loss=3.295, NarTop10Accuracy=0.658, over 7223.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6583, over 4687.27 frames. ], batch size: 22, lr: 2.59e-03 +2024-08-06 14:07:48,256 INFO [trainer.py:765] (4/8) Epoch 33, batch 400, train_loss[loss=3.403, NarTop10Accuracy=0.6383, over 5144.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6547, over 5141.78 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 14:08:18,547 INFO [trainer.py:765] (4/8) Epoch 33, batch 500, train_loss[loss=3.175, NarTop10Accuracy=0.6834, over 6142.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6532, over 5437.84 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 14:08:49,792 INFO [trainer.py:765] (4/8) Epoch 33, batch 600, train_loss[loss=3.322, NarTop10Accuracy=0.6609, over 5756.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6525, over 5688.96 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 14:09:32,926 INFO [trainer.py:765] (4/8) Epoch 33, batch 700, train_loss[loss=3.442, NarTop10Accuracy=0.6296, over 5019.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.652, over 5757.90 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 14:10:04,596 INFO [trainer.py:765] (4/8) Epoch 33, batch 800, train_loss[loss=3.21, NarTop10Accuracy=0.6738, over 5087.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6497, over 5834.42 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 14:10:35,387 INFO [trainer.py:765] (4/8) Epoch 33, batch 900, train_loss[loss=3.366, NarTop10Accuracy=0.6494, over 6321.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6512, over 5839.32 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 14:11:15,068 INFO [trainer.py:765] (4/8) Epoch 33, batch 1000, train_loss[loss=3.505, NarTop10Accuracy=0.6211, over 6056.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6495, over 5926.23 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 14:11:47,303 INFO [trainer.py:765] (4/8) Epoch 33, batch 1100, train_loss[loss=3.62, NarTop10Accuracy=0.5939, over 6821.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6468, over 5965.47 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 14:12:20,928 INFO [trainer.py:765] (4/8) Epoch 33, batch 1200, train_loss[loss=3.494, NarTop10Accuracy=0.6309, over 7193.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6482, over 5962.27 frames. ], batch size: 30, lr: 2.58e-03 +2024-08-06 14:12:57,629 INFO [trainer.py:765] (4/8) Epoch 33, batch 1300, train_loss[loss=3.521, NarTop10Accuracy=0.606, over 5106.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6498, over 6022.64 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 14:13:30,667 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 14:13:41,686 INFO [trainer.py:811] (4/8) Epoch 33, validation: loss=3.242, NarTop10Accuracy=0.6732, over 1907754.00 frames. +2024-08-06 14:13:41,687 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 14:13:42,264 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.031e+02 2.174e+02 2.363e+02 4.871e+02, threshold=4.347e+02, percent-clipped=0.1 +2024-08-06 14:13:42,802 INFO [trainer.py:765] (4/8) Epoch 33, batch 1400, train_loss[loss=3.296, NarTop10Accuracy=0.6667, over 6145.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6501, over 6049.40 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 14:14:11,244 INFO [trainer.py:765] (4/8) Epoch 33, batch 1500, train_loss[loss=3.52, NarTop10Accuracy=0.6124, over 6602.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6474, over 5978.31 frames. ], batch size: 49, lr: 2.57e-03 +2024-08-06 14:14:39,190 INFO [trainer.py:765] (4/8) Epoch 33, batch 1600, train_loss[loss=3.356, NarTop10Accuracy=0.6539, over 7157.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6457, over 5967.12 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:05,856 INFO [trainer.py:765] (4/8) Epoch 33, batch 1700, train_loss[loss=3.476, NarTop10Accuracy=0.62, over 6428.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6457, over 5951.60 frames. ], batch size: 13, lr: 2.57e-03 +2024-08-06 14:15:32,588 INFO [trainer.py:765] (4/8) Epoch 33, batch 1800, train_loss[loss=3.521, NarTop10Accuracy=0.6053, over 7128.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6484, over 6017.55 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:59,213 INFO [trainer.py:765] (4/8) Epoch 33, batch 1900, train_loss[loss=3.426, NarTop10Accuracy=0.6373, over 6291.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6455, over 6054.03 frames. ], batch size: 50, lr: 2.57e-03 +2024-08-06 14:16:24,893 INFO [trainer.py:765] (4/8) Epoch 33, batch 2000, train_loss[loss=3.477, NarTop10Accuracy=0.6216, over 6090.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.648, over 6036.28 frames. ], batch size: 49, lr: 2.57e-03 +2024-08-06 14:16:50,349 INFO [trainer.py:765] (4/8) Epoch 33, batch 2100, train_loss[loss=3.337, NarTop10Accuracy=0.6466, over 3908.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6449, over 6023.86 frames. ], batch size: 4, lr: 2.56e-03 +2024-08-06 14:17:15,825 INFO [trainer.py:765] (4/8) Epoch 33, batch 2200, train_loss[loss=3.583, NarTop10Accuracy=0.609, over 7173.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.647, over 6042.80 frames. ], batch size: 31, lr: 2.56e-03 +2024-08-06 14:17:41,308 INFO [trainer.py:765] (4/8) Epoch 33, batch 2300, train_loss[loss=3.341, NarTop10Accuracy=0.6552, over 5702.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6433, over 6069.96 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 14:18:10,143 INFO [trainer.py:765] (4/8) Epoch 33, batch 2400, train_loss[loss=3.634, NarTop10Accuracy=0.5969, over 5043.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6414, over 5866.29 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 14:18:33,706 INFO [trainer.py:765] (4/8) Epoch 33, batch 2500, train_loss[loss=3.514, NarTop10Accuracy=0.615, over 5067.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.647, over 5527.18 frames. ], batch size: 6, lr: 2.56e-03 +2024-08-06 14:18:54,650 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 14:19:51,932 INFO [trainer.py:765] (4/8) Epoch 34, batch 100, train_loss[loss=3.265, NarTop10Accuracy=0.667, over 7149.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6565, over 2365.94 frames. ], batch size: 30, lr: 2.52e-03 +2024-08-06 14:20:24,372 INFO [trainer.py:765] (4/8) Epoch 34, batch 200, train_loss[loss=3.392, NarTop10Accuracy=0.6453, over 6800.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6626, over 3861.95 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 14:21:00,841 INFO [trainer.py:765] (4/8) Epoch 34, batch 300, train_loss[loss=3.269, NarTop10Accuracy=0.6646, over 7187.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6577, over 4684.78 frames. ], batch size: 22, lr: 2.51e-03 +2024-08-06 14:21:31,448 INFO [trainer.py:765] (4/8) Epoch 34, batch 400, train_loss[loss=3.18, NarTop10Accuracy=0.6819, over 5093.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6582, over 5123.38 frames. ], batch size: 7, lr: 2.51e-03 +2024-08-06 14:22:01,875 INFO [trainer.py:765] (4/8) Epoch 34, batch 500, train_loss[loss=3.223, NarTop10Accuracy=0.6771, over 6082.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6567, over 5405.45 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 14:22:36,825 INFO [trainer.py:765] (4/8) Epoch 34, batch 600, train_loss[loss=3.357, NarTop10Accuracy=0.6425, over 5812.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6555, over 5669.86 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 14:23:14,604 INFO [trainer.py:765] (4/8) Epoch 34, batch 700, train_loss[loss=3.357, NarTop10Accuracy=0.6538, over 4992.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.654, over 5748.22 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:46,606 INFO [trainer.py:765] (4/8) Epoch 34, batch 800, train_loss[loss=3.493, NarTop10Accuracy=0.6314, over 5078.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6537, over 5794.29 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:50,717 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 14:24:00,855 INFO [trainer.py:811] (4/8) Epoch 34, validation: loss=3.226, NarTop10Accuracy=0.6758, over 1907754.00 frames. +2024-08-06 14:24:00,856 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 14:24:01,413 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.033e+02 2.200e+02 2.391e+02 5.918e+02, threshold=4.399e+02, percent-clipped=0.1 +2024-08-06 14:24:28,899 INFO [trainer.py:765] (4/8) Epoch 34, batch 900, train_loss[loss=3.157, NarTop10Accuracy=0.6762, over 6225.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6526, over 5828.06 frames. ], batch size: 13, lr: 2.51e-03 +2024-08-06 14:25:05,287 INFO [trainer.py:765] (4/8) Epoch 34, batch 1000, train_loss[loss=3.43, NarTop10Accuracy=0.6318, over 6724.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6504, over 5933.45 frames. ], batch size: 14, lr: 2.50e-03 +2024-08-06 14:25:37,997 INFO [trainer.py:765] (4/8) Epoch 34, batch 1100, train_loss[loss=3.402, NarTop10Accuracy=0.6358, over 6737.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6484, over 5955.88 frames. ], batch size: 17, lr: 2.50e-03 +2024-08-06 14:26:13,974 INFO [trainer.py:765] (4/8) Epoch 34, batch 1200, train_loss[loss=3.508, NarTop10Accuracy=0.6182, over 7228.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6498, over 5957.27 frames. ], batch size: 30, lr: 2.50e-03 +2024-08-06 14:26:52,652 INFO [trainer.py:765] (4/8) Epoch 34, batch 1300, train_loss[loss=3.512, NarTop10Accuracy=0.6219, over 5045.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6505, over 6016.23 frames. ], batch size: 6, lr: 2.50e-03 +2024-08-06 14:27:24,383 INFO [trainer.py:765] (4/8) Epoch 34, batch 1400, train_loss[loss=3.145, NarTop10Accuracy=0.6813, over 6198.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6499, over 6020.59 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 14:27:52,726 INFO [trainer.py:765] (4/8) Epoch 34, batch 1500, train_loss[loss=3.737, NarTop10Accuracy=0.5727, over 5748.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6509, over 5963.74 frames. ], batch size: 48, lr: 2.50e-03 +2024-08-06 14:28:20,672 INFO [trainer.py:765] (4/8) Epoch 34, batch 1600, train_loss[loss=3.278, NarTop10Accuracy=0.6658, over 7154.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.648, over 5946.99 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 14:28:47,383 INFO [trainer.py:765] (4/8) Epoch 34, batch 1700, train_loss[loss=3.466, NarTop10Accuracy=0.6215, over 6738.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6443, over 5939.00 frames. ], batch size: 14, lr: 2.49e-03 +2024-08-06 14:29:14,010 INFO [trainer.py:765] (4/8) Epoch 34, batch 1800, train_loss[loss=3.624, NarTop10Accuracy=0.5831, over 7164.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6467, over 5991.58 frames. ], batch size: 22, lr: 2.49e-03 +2024-08-06 14:29:43,752 INFO [trainer.py:765] (4/8) Epoch 34, batch 1900, train_loss[loss=3.57, NarTop10Accuracy=0.6196, over 6184.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6439, over 6051.41 frames. ], batch size: 51, lr: 2.49e-03 +2024-08-06 14:30:09,516 INFO [trainer.py:765] (4/8) Epoch 34, batch 2000, train_loss[loss=3.564, NarTop10Accuracy=0.6085, over 5708.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6435, over 6022.96 frames. ], batch size: 48, lr: 2.49e-03 +2024-08-06 14:30:35,015 INFO [trainer.py:765] (4/8) Epoch 34, batch 2100, train_loss[loss=3.319, NarTop10Accuracy=0.6492, over 4885.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.644, over 5994.46 frames. ], batch size: 5, lr: 2.49e-03 +2024-08-06 14:31:00,511 INFO [trainer.py:765] (4/8) Epoch 34, batch 2200, train_loss[loss=3.564, NarTop10Accuracy=0.6201, over 7030.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6437, over 6034.66 frames. ], batch size: 30, lr: 2.49e-03 +2024-08-06 14:31:25,979 INFO [trainer.py:765] (4/8) Epoch 34, batch 2300, train_loss[loss=3.274, NarTop10Accuracy=0.6628, over 5794.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6415, over 6056.88 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 14:31:50,751 INFO [trainer.py:765] (4/8) Epoch 34, batch 2400, train_loss[loss=3.219, NarTop10Accuracy=0.6753, over 5228.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6409, over 5870.05 frames. ], batch size: 7, lr: 2.48e-03 +2024-08-06 14:32:14,249 INFO [trainer.py:765] (4/8) Epoch 34, batch 2500, train_loss[loss=3.074, NarTop10Accuracy=0.6995, over 5130.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6463, over 5538.11 frames. ], batch size: 6, lr: 2.48e-03 +2024-08-06 14:32:35,252 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 14:33:26,335 INFO [trainer.py:765] (4/8) Epoch 35, batch 100, train_loss[loss=3.37, NarTop10Accuracy=0.6479, over 7333.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6588, over 2370.02 frames. ], batch size: 30, lr: 2.44e-03 +2024-08-06 14:34:03,580 INFO [trainer.py:765] (4/8) Epoch 35, batch 200, train_loss[loss=3.351, NarTop10Accuracy=0.6432, over 7044.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6593, over 3882.30 frames. ], batch size: 18, lr: 2.44e-03 +2024-08-06 14:34:13,184 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 14:34:23,574 INFO [trainer.py:811] (4/8) Epoch 35, validation: loss=3.163, NarTop10Accuracy=0.689, over 1907754.00 frames. +2024-08-06 14:34:23,575 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 14:34:24,109 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.042e+02 2.203e+02 2.360e+02 4.181e+02, threshold=4.406e+02, percent-clipped=0.0 +2024-08-06 14:34:44,664 INFO [trainer.py:765] (4/8) Epoch 35, batch 300, train_loss[loss=3.431, NarTop10Accuracy=0.625, over 7650.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6599, over 4681.29 frames. ], batch size: 23, lr: 2.44e-03 +2024-08-06 14:35:13,542 INFO [trainer.py:765] (4/8) Epoch 35, batch 400, train_loss[loss=3.349, NarTop10Accuracy=0.6564, over 5117.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6564, over 5126.52 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 14:35:48,187 INFO [trainer.py:765] (4/8) Epoch 35, batch 500, train_loss[loss=3.622, NarTop10Accuracy=0.5926, over 6047.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6568, over 5411.29 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 14:36:22,746 INFO [trainer.py:765] (4/8) Epoch 35, batch 600, train_loss[loss=3.114, NarTop10Accuracy=0.6956, over 5681.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6568, over 5666.75 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 14:36:57,826 INFO [trainer.py:765] (4/8) Epoch 35, batch 700, train_loss[loss=3.456, NarTop10Accuracy=0.6425, over 5026.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6565, over 5727.91 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 14:37:29,768 INFO [trainer.py:765] (4/8) Epoch 35, batch 800, train_loss[loss=3.246, NarTop10Accuracy=0.6716, over 5034.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6543, over 5787.35 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:38:03,303 INFO [trainer.py:765] (4/8) Epoch 35, batch 900, train_loss[loss=3.328, NarTop10Accuracy=0.6595, over 6239.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6522, over 5802.44 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 14:38:43,708 INFO [trainer.py:765] (4/8) Epoch 35, batch 1000, train_loss[loss=3.346, NarTop10Accuracy=0.6434, over 6679.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6531, over 5906.34 frames. ], batch size: 14, lr: 2.43e-03 +2024-08-06 14:39:16,567 INFO [trainer.py:765] (4/8) Epoch 35, batch 1100, train_loss[loss=3.619, NarTop10Accuracy=0.5914, over 6925.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6521, over 5958.48 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 14:39:50,837 INFO [trainer.py:765] (4/8) Epoch 35, batch 1200, train_loss[loss=3.165, NarTop10Accuracy=0.6809, over 7209.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6522, over 5944.59 frames. ], batch size: 30, lr: 2.43e-03 +2024-08-06 14:40:33,953 INFO [trainer.py:765] (4/8) Epoch 35, batch 1300, train_loss[loss=2.928, NarTop10Accuracy=0.7328, over 5046.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6522, over 6010.24 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:41:03,184 INFO [trainer.py:765] (4/8) Epoch 35, batch 1400, train_loss[loss=3.488, NarTop10Accuracy=0.6204, over 6119.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6486, over 6035.90 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 14:41:33,824 INFO [trainer.py:765] (4/8) Epoch 35, batch 1500, train_loss[loss=3.524, NarTop10Accuracy=0.6175, over 6323.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6483, over 5971.42 frames. ], batch size: 49, lr: 2.43e-03 +2024-08-06 14:42:01,777 INFO [trainer.py:765] (4/8) Epoch 35, batch 1600, train_loss[loss=3.545, NarTop10Accuracy=0.6017, over 7084.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6472, over 5953.03 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 14:42:28,466 INFO [trainer.py:765] (4/8) Epoch 35, batch 1700, train_loss[loss=3.261, NarTop10Accuracy=0.6616, over 6353.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6475, over 5938.19 frames. ], batch size: 13, lr: 2.42e-03 +2024-08-06 14:42:55,040 INFO [trainer.py:765] (4/8) Epoch 35, batch 1800, train_loss[loss=3.38, NarTop10Accuracy=0.6411, over 7183.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6458, over 6008.69 frames. ], batch size: 23, lr: 2.42e-03 +2024-08-06 14:43:21,646 INFO [trainer.py:765] (4/8) Epoch 35, batch 1900, train_loss[loss=3.432, NarTop10Accuracy=0.6304, over 6402.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6465, over 6054.04 frames. ], batch size: 49, lr: 2.42e-03 +2024-08-06 14:43:47,367 INFO [trainer.py:765] (4/8) Epoch 35, batch 2000, train_loss[loss=3.626, NarTop10Accuracy=0.5926, over 5690.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6467, over 6022.71 frames. ], batch size: 52, lr: 2.42e-03 +2024-08-06 14:44:12,858 INFO [trainer.py:765] (4/8) Epoch 35, batch 2100, train_loss[loss=3.382, NarTop10Accuracy=0.6364, over 3793.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.645, over 6009.81 frames. ], batch size: 4, lr: 2.42e-03 +2024-08-06 14:44:38,389 INFO [trainer.py:765] (4/8) Epoch 35, batch 2200, train_loss[loss=3.495, NarTop10Accuracy=0.6234, over 7498.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6451, over 6052.25 frames. ], batch size: 31, lr: 2.42e-03 +2024-08-06 14:44:47,201 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 14:44:57,441 INFO [trainer.py:811] (4/8) Epoch 35, validation: loss=3.219, NarTop10Accuracy=0.6773, over 1907754.00 frames. +2024-08-06 14:44:57,441 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 14:44:57,974 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.083e+02 2.237e+02 2.412e+02 3.944e+02, threshold=4.474e+02, percent-clipped=0.0 +2024-08-06 14:45:14,100 INFO [trainer.py:765] (4/8) Epoch 35, batch 2300, train_loss[loss=3.007, NarTop10Accuracy=0.6956, over 5671.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6434, over 6069.08 frames. ], batch size: 9, lr: 2.41e-03 +2024-08-06 14:45:38,819 INFO [trainer.py:765] (4/8) Epoch 35, batch 2400, train_loss[loss=3.182, NarTop10Accuracy=0.6801, over 5085.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6423, over 5882.34 frames. ], batch size: 7, lr: 2.41e-03 +2024-08-06 14:46:02,147 INFO [trainer.py:765] (4/8) Epoch 35, batch 2500, train_loss[loss=3.43, NarTop10Accuracy=0.635, over 5008.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6492, over 5540.20 frames. ], batch size: 6, lr: 2.41e-03 +2024-08-06 14:46:23,025 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 14:47:25,441 INFO [trainer.py:765] (4/8) Epoch 36, batch 100, train_loss[loss=3.272, NarTop10Accuracy=0.6648, over 7015.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6563, over 2380.83 frames. ], batch size: 30, lr: 2.38e-03 +2024-08-06 14:47:58,358 INFO [trainer.py:765] (4/8) Epoch 36, batch 200, train_loss[loss=3.181, NarTop10Accuracy=0.6879, over 6920.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6588, over 3872.48 frames. ], batch size: 17, lr: 2.37e-03 +2024-08-06 14:48:30,724 INFO [trainer.py:765] (4/8) Epoch 36, batch 300, train_loss[loss=3.209, NarTop10Accuracy=0.6853, over 7152.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6606, over 4670.66 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 14:49:04,815 INFO [trainer.py:765] (4/8) Epoch 36, batch 400, train_loss[loss=3.082, NarTop10Accuracy=0.706, over 5865.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6607, over 5125.21 frames. ], batch size: 8, lr: 2.37e-03 +2024-08-06 14:49:36,588 INFO [trainer.py:765] (4/8) Epoch 36, batch 500, train_loss[loss=3.368, NarTop10Accuracy=0.6508, over 6086.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6602, over 5393.55 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 14:50:09,654 INFO [trainer.py:765] (4/8) Epoch 36, batch 600, train_loss[loss=3.221, NarTop10Accuracy=0.6771, over 5778.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6579, over 5669.64 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 14:50:46,514 INFO [trainer.py:765] (4/8) Epoch 36, batch 700, train_loss[loss=3.076, NarTop10Accuracy=0.6961, over 5035.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6555, over 5736.11 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:23,702 INFO [trainer.py:765] (4/8) Epoch 36, batch 800, train_loss[loss=3.478, NarTop10Accuracy=0.6203, over 4903.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.654, over 5800.45 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:54,346 INFO [trainer.py:765] (4/8) Epoch 36, batch 900, train_loss[loss=3.126, NarTop10Accuracy=0.7033, over 6278.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6565, over 5817.86 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 14:52:30,324 INFO [trainer.py:765] (4/8) Epoch 36, batch 1000, train_loss[loss=3.242, NarTop10Accuracy=0.6541, over 6238.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6549, over 5921.03 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 14:53:06,863 INFO [trainer.py:765] (4/8) Epoch 36, batch 1100, train_loss[loss=3.285, NarTop10Accuracy=0.6687, over 6820.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6519, over 5958.22 frames. ], batch size: 17, lr: 2.36e-03 +2024-08-06 14:53:40,248 INFO [trainer.py:765] (4/8) Epoch 36, batch 1200, train_loss[loss=3.267, NarTop10Accuracy=0.6638, over 7244.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6549, over 5960.22 frames. ], batch size: 30, lr: 2.36e-03 +2024-08-06 14:54:15,855 INFO [trainer.py:765] (4/8) Epoch 36, batch 1300, train_loss[loss=2.982, NarTop10Accuracy=0.7252, over 5604.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6528, over 6016.65 frames. ], batch size: 7, lr: 2.36e-03 +2024-08-06 14:54:51,540 INFO [trainer.py:765] (4/8) Epoch 36, batch 1400, train_loss[loss=3.262, NarTop10Accuracy=0.6697, over 6032.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6517, over 6041.59 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 14:55:21,802 INFO [trainer.py:765] (4/8) Epoch 36, batch 1500, train_loss[loss=3.477, NarTop10Accuracy=0.6229, over 6196.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6499, over 5977.41 frames. ], batch size: 48, lr: 2.36e-03 +2024-08-06 14:55:49,902 INFO [trainer.py:765] (4/8) Epoch 36, batch 1600, train_loss[loss=3.15, NarTop10Accuracy=0.6862, over 7231.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6512, over 5972.43 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 14:56:04,132 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 14:56:14,600 INFO [trainer.py:811] (4/8) Epoch 36, validation: loss=3.22, NarTop10Accuracy=0.6784, over 1907754.00 frames. +2024-08-06 14:56:14,601 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 14:56:15,103 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.063e+02 2.224e+02 2.398e+02 5.290e+02, threshold=4.447e+02, percent-clipped=0.1 +2024-08-06 14:56:27,178 INFO [trainer.py:765] (4/8) Epoch 36, batch 1700, train_loss[loss=3.092, NarTop10Accuracy=0.6964, over 6682.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6505, over 5951.20 frames. ], batch size: 14, lr: 2.35e-03 +2024-08-06 14:56:53,758 INFO [trainer.py:765] (4/8) Epoch 36, batch 1800, train_loss[loss=3.324, NarTop10Accuracy=0.6506, over 7086.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6488, over 6004.39 frames. ], batch size: 22, lr: 2.35e-03 +2024-08-06 14:57:20,335 INFO [trainer.py:765] (4/8) Epoch 36, batch 1900, train_loss[loss=3.462, NarTop10Accuracy=0.6333, over 5579.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.648, over 6046.92 frames. ], batch size: 49, lr: 2.35e-03 +2024-08-06 14:57:46,056 INFO [trainer.py:765] (4/8) Epoch 36, batch 2000, train_loss[loss=3.723, NarTop10Accuracy=0.5746, over 6151.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6457, over 6019.87 frames. ], batch size: 51, lr: 2.35e-03 +2024-08-06 14:58:11,404 INFO [trainer.py:765] (4/8) Epoch 36, batch 2100, train_loss[loss=2.964, NarTop10Accuracy=0.7221, over 4820.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.646, over 5995.97 frames. ], batch size: 5, lr: 2.35e-03 +2024-08-06 14:58:36,832 INFO [trainer.py:765] (4/8) Epoch 36, batch 2200, train_loss[loss=3.58, NarTop10Accuracy=0.6049, over 7352.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6451, over 6033.80 frames. ], batch size: 31, lr: 2.35e-03 +2024-08-06 14:59:02,344 INFO [trainer.py:765] (4/8) Epoch 36, batch 2300, train_loss[loss=3.244, NarTop10Accuracy=0.6726, over 5806.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6436, over 6081.17 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 14:59:27,094 INFO [trainer.py:765] (4/8) Epoch 36, batch 2400, train_loss[loss=3.562, NarTop10Accuracy=0.6089, over 5017.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6418, over 5897.04 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 14:59:50,503 INFO [trainer.py:765] (4/8) Epoch 36, batch 2500, train_loss[loss=3.516, NarTop10Accuracy=0.6186, over 5152.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6477, over 5541.67 frames. ], batch size: 6, lr: 2.34e-03 +2024-08-06 15:00:11,883 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 15:01:14,218 INFO [trainer.py:765] (4/8) Epoch 37, batch 100, train_loss[loss=3.342, NarTop10Accuracy=0.6582, over 7120.00 frames. ], tot_loss[loss=3.278, NarTop10Accuracy=0.6633, over 2372.52 frames. ], batch size: 30, lr: 2.31e-03 +2024-08-06 15:01:44,097 INFO [trainer.py:765] (4/8) Epoch 37, batch 200, train_loss[loss=3.111, NarTop10Accuracy=0.6946, over 6901.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.662, over 3872.17 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 15:02:17,383 INFO [trainer.py:765] (4/8) Epoch 37, batch 300, train_loss[loss=3.292, NarTop10Accuracy=0.6643, over 6887.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6619, over 4674.67 frames. ], batch size: 22, lr: 2.31e-03 +2024-08-06 15:02:48,346 INFO [trainer.py:765] (4/8) Epoch 37, batch 400, train_loss[loss=3.411, NarTop10Accuracy=0.6329, over 5728.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6602, over 5144.93 frames. ], batch size: 8, lr: 2.31e-03 +2024-08-06 15:03:26,570 INFO [trainer.py:765] (4/8) Epoch 37, batch 500, train_loss[loss=3.373, NarTop10Accuracy=0.65, over 6117.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6584, over 5406.50 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 15:03:58,032 INFO [trainer.py:765] (4/8) Epoch 37, batch 600, train_loss[loss=3.143, NarTop10Accuracy=0.7007, over 5734.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6591, over 5682.72 frames. ], batch size: 9, lr: 2.30e-03 +2024-08-06 15:04:30,248 INFO [trainer.py:765] (4/8) Epoch 37, batch 700, train_loss[loss=3.227, NarTop10Accuracy=0.6737, over 5083.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6578, over 5747.14 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:12,163 INFO [trainer.py:765] (4/8) Epoch 37, batch 800, train_loss[loss=3.406, NarTop10Accuracy=0.6328, over 5025.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6566, over 5805.40 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:40,606 INFO [trainer.py:765] (4/8) Epoch 37, batch 900, train_loss[loss=3.195, NarTop10Accuracy=0.6784, over 6241.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6545, over 5816.21 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 15:06:15,608 INFO [trainer.py:765] (4/8) Epoch 37, batch 1000, train_loss[loss=3.255, NarTop10Accuracy=0.6708, over 6280.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6526, over 5923.56 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 15:06:42,491 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 15:06:53,168 INFO [trainer.py:811] (4/8) Epoch 37, validation: loss=3.234, NarTop10Accuracy=0.6744, over 1907754.00 frames. +2024-08-06 15:06:53,169 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 15:06:53,809 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.068e+02 2.238e+02 2.409e+02 6.392e+02, threshold=4.475e+02, percent-clipped=0.1 +2024-08-06 15:07:01,306 INFO [trainer.py:765] (4/8) Epoch 37, batch 1100, train_loss[loss=3.543, NarTop10Accuracy=0.6115, over 6955.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6528, over 5970.46 frames. ], batch size: 17, lr: 2.30e-03 +2024-08-06 15:07:32,718 INFO [trainer.py:765] (4/8) Epoch 37, batch 1200, train_loss[loss=3.268, NarTop10Accuracy=0.6635, over 7552.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6532, over 5963.47 frames. ], batch size: 31, lr: 2.30e-03 +2024-08-06 15:08:04,777 INFO [trainer.py:765] (4/8) Epoch 37, batch 1300, train_loss[loss=3.376, NarTop10Accuracy=0.6441, over 5065.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6509, over 6017.99 frames. ], batch size: 6, lr: 2.29e-03 +2024-08-06 15:08:47,879 INFO [trainer.py:765] (4/8) Epoch 37, batch 1400, train_loss[loss=3.076, NarTop10Accuracy=0.6941, over 6309.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6488, over 6046.77 frames. ], batch size: 11, lr: 2.29e-03 +2024-08-06 15:09:16,180 INFO [trainer.py:765] (4/8) Epoch 37, batch 1500, train_loss[loss=3.488, NarTop10Accuracy=0.6163, over 6209.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6485, over 5986.22 frames. ], batch size: 50, lr: 2.29e-03 +2024-08-06 15:09:44,190 INFO [trainer.py:765] (4/8) Epoch 37, batch 1600, train_loss[loss=3.402, NarTop10Accuracy=0.6485, over 7166.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6499, over 5956.63 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:10:11,082 INFO [trainer.py:765] (4/8) Epoch 37, batch 1700, train_loss[loss=3.177, NarTop10Accuracy=0.6893, over 6340.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6499, over 5951.05 frames. ], batch size: 13, lr: 2.29e-03 +2024-08-06 15:10:37,752 INFO [trainer.py:765] (4/8) Epoch 37, batch 1800, train_loss[loss=3.491, NarTop10Accuracy=0.6202, over 7157.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6502, over 6001.89 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:11:04,270 INFO [trainer.py:765] (4/8) Epoch 37, batch 1900, train_loss[loss=3.412, NarTop10Accuracy=0.6409, over 6055.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6479, over 6046.53 frames. ], batch size: 48, lr: 2.29e-03 +2024-08-06 15:11:29,941 INFO [trainer.py:765] (4/8) Epoch 37, batch 2000, train_loss[loss=3.602, NarTop10Accuracy=0.6019, over 6353.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6482, over 6016.63 frames. ], batch size: 52, lr: 2.29e-03 +2024-08-06 15:11:58,797 INFO [trainer.py:765] (4/8) Epoch 37, batch 2100, train_loss[loss=3.414, NarTop10Accuracy=0.6348, over 4756.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6481, over 6014.96 frames. ], batch size: 5, lr: 2.29e-03 +2024-08-06 15:12:24,312 INFO [trainer.py:765] (4/8) Epoch 37, batch 2200, train_loss[loss=3.265, NarTop10Accuracy=0.6589, over 7511.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6488, over 6055.52 frames. ], batch size: 31, lr: 2.28e-03 +2024-08-06 15:12:49,787 INFO [trainer.py:765] (4/8) Epoch 37, batch 2300, train_loss[loss=3.221, NarTop10Accuracy=0.6719, over 5673.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6495, over 6060.51 frames. ], batch size: 9, lr: 2.28e-03 +2024-08-06 15:13:14,526 INFO [trainer.py:765] (4/8) Epoch 37, batch 2400, train_loss[loss=3.07, NarTop10Accuracy=0.701, over 5124.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6473, over 5879.65 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 15:13:37,942 INFO [trainer.py:765] (4/8) Epoch 37, batch 2500, train_loss[loss=3.589, NarTop10Accuracy=0.6079, over 5114.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6524, over 5514.14 frames. ], batch size: 6, lr: 2.28e-03 +2024-08-06 15:13:59,093 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 15:14:50,846 INFO [trainer.py:765] (4/8) Epoch 38, batch 100, train_loss[loss=3.542, NarTop10Accuracy=0.6198, over 7323.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6577, over 2376.77 frames. ], batch size: 30, lr: 2.25e-03 +2024-08-06 15:15:27,289 INFO [trainer.py:765] (4/8) Epoch 38, batch 200, train_loss[loss=3.26, NarTop10Accuracy=0.6724, over 6883.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6634, over 3883.83 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 15:16:01,280 INFO [trainer.py:765] (4/8) Epoch 38, batch 300, train_loss[loss=3.257, NarTop10Accuracy=0.6653, over 7193.00 frames. ], tot_loss[loss=3.275, NarTop10Accuracy=0.6648, over 4676.15 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 15:16:32,594 INFO [trainer.py:765] (4/8) Epoch 38, batch 400, train_loss[loss=3.393, NarTop10Accuracy=0.6405, over 5236.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6629, over 5136.02 frames. ], batch size: 7, lr: 2.24e-03 +2024-08-06 15:17:04,257 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 15:17:14,104 INFO [trainer.py:811] (4/8) Epoch 38, validation: loss=3.229, NarTop10Accuracy=0.6755, over 1907754.00 frames. +2024-08-06 15:17:14,105 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 15:17:14,631 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.062e+02 2.214e+02 2.396e+02 3.845e+02, threshold=4.429e+02, percent-clipped=0.0 +2024-08-06 15:17:16,480 INFO [trainer.py:765] (4/8) Epoch 38, batch 500, train_loss[loss=3.399, NarTop10Accuracy=0.6433, over 6131.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6635, over 5409.48 frames. ], batch size: 11, lr: 2.24e-03 +2024-08-06 15:17:53,875 INFO [trainer.py:765] (4/8) Epoch 38, batch 600, train_loss[loss=3.203, NarTop10Accuracy=0.6813, over 5727.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6596, over 5675.24 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 15:18:26,466 INFO [trainer.py:765] (4/8) Epoch 38, batch 700, train_loss[loss=3.328, NarTop10Accuracy=0.6434, over 5074.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6576, over 5734.17 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:01,129 INFO [trainer.py:765] (4/8) Epoch 38, batch 800, train_loss[loss=2.926, NarTop10Accuracy=0.7278, over 5071.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6573, over 5806.60 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:36,539 INFO [trainer.py:765] (4/8) Epoch 38, batch 900, train_loss[loss=3.574, NarTop10Accuracy=0.6011, over 6374.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.6574, over 5823.59 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 15:20:09,134 INFO [trainer.py:765] (4/8) Epoch 38, batch 1000, train_loss[loss=3.292, NarTop10Accuracy=0.6599, over 6242.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6545, over 5904.22 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 15:20:47,346 INFO [trainer.py:765] (4/8) Epoch 38, batch 1100, train_loss[loss=3.197, NarTop10Accuracy=0.6819, over 6851.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6527, over 5949.58 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 15:21:25,595 INFO [trainer.py:765] (4/8) Epoch 38, batch 1200, train_loss[loss=3.422, NarTop10Accuracy=0.6321, over 7415.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6491, over 5963.53 frames. ], batch size: 31, lr: 2.23e-03 +2024-08-06 15:21:57,556 INFO [trainer.py:765] (4/8) Epoch 38, batch 1300, train_loss[loss=3.265, NarTop10Accuracy=0.6625, over 5071.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6516, over 6014.12 frames. ], batch size: 6, lr: 2.23e-03 +2024-08-06 15:22:29,468 INFO [trainer.py:765] (4/8) Epoch 38, batch 1400, train_loss[loss=3.056, NarTop10Accuracy=0.7089, over 6195.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6492, over 6027.34 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 15:23:06,615 INFO [trainer.py:765] (4/8) Epoch 38, batch 1500, train_loss[loss=3.32, NarTop10Accuracy=0.6593, over 5470.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.647, over 5956.97 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 15:23:34,640 INFO [trainer.py:765] (4/8) Epoch 38, batch 1600, train_loss[loss=3.348, NarTop10Accuracy=0.6545, over 7300.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6469, over 5939.35 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:01,433 INFO [trainer.py:765] (4/8) Epoch 38, batch 1700, train_loss[loss=3.24, NarTop10Accuracy=0.6678, over 6503.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6464, over 5931.89 frames. ], batch size: 14, lr: 2.23e-03 +2024-08-06 15:24:28,064 INFO [trainer.py:765] (4/8) Epoch 38, batch 1800, train_loss[loss=3.139, NarTop10Accuracy=0.6844, over 7142.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6456, over 6002.61 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:54,672 INFO [trainer.py:765] (4/8) Epoch 38, batch 1900, train_loss[loss=3.317, NarTop10Accuracy=0.6613, over 6378.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6449, over 6054.38 frames. ], batch size: 49, lr: 2.23e-03 +2024-08-06 15:25:20,410 INFO [trainer.py:765] (4/8) Epoch 38, batch 2000, train_loss[loss=3.559, NarTop10Accuracy=0.6095, over 6341.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6468, over 6039.51 frames. ], batch size: 51, lr: 2.23e-03 +2024-08-06 15:25:45,856 INFO [trainer.py:765] (4/8) Epoch 38, batch 2100, train_loss[loss=3.085, NarTop10Accuracy=0.6942, over 4030.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6476, over 6012.62 frames. ], batch size: 4, lr: 2.22e-03 +2024-08-06 15:26:11,316 INFO [trainer.py:765] (4/8) Epoch 38, batch 2200, train_loss[loss=3.62, NarTop10Accuracy=0.6025, over 7313.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6488, over 6031.60 frames. ], batch size: 31, lr: 2.22e-03 +2024-08-06 15:26:36,708 INFO [trainer.py:765] (4/8) Epoch 38, batch 2300, train_loss[loss=3.167, NarTop10Accuracy=0.6855, over 5801.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6475, over 6062.11 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 15:27:01,479 INFO [trainer.py:765] (4/8) Epoch 38, batch 2400, train_loss[loss=3.385, NarTop10Accuracy=0.6437, over 5088.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6467, over 5884.79 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 15:27:23,145 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 15:27:33,590 INFO [trainer.py:811] (4/8) Epoch 38, validation: loss=3.213, NarTop10Accuracy=0.6782, over 1907754.00 frames. +2024-08-06 15:27:33,590 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 15:27:34,076 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.098e+02 2.247e+02 2.437e+02 3.550e+02, threshold=4.494e+02, percent-clipped=0.0 +2024-08-06 15:27:35,515 INFO [trainer.py:765] (4/8) Epoch 38, batch 2500, train_loss[loss=3.057, NarTop10Accuracy=0.7093, over 5241.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6526, over 5554.49 frames. ], batch size: 6, lr: 2.22e-03 +2024-08-06 15:27:56,581 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 15:28:51,227 INFO [trainer.py:765] (4/8) Epoch 39, batch 100, train_loss[loss=3.251, NarTop10Accuracy=0.6636, over 7087.00 frames. ], tot_loss[loss=3.26, NarTop10Accuracy=0.6672, over 2380.92 frames. ], batch size: 30, lr: 2.19e-03 +2024-08-06 15:29:28,051 INFO [trainer.py:765] (4/8) Epoch 39, batch 200, train_loss[loss=3.346, NarTop10Accuracy=0.6455, over 6979.00 frames. ], tot_loss[loss=3.26, NarTop10Accuracy=0.6677, over 3877.15 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 15:30:02,017 INFO [trainer.py:765] (4/8) Epoch 39, batch 300, train_loss[loss=3.056, NarTop10Accuracy=0.6994, over 7303.00 frames. ], tot_loss[loss=3.268, NarTop10Accuracy=0.6654, over 4679.25 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 15:30:32,992 INFO [trainer.py:765] (4/8) Epoch 39, batch 400, train_loss[loss=3.135, NarTop10Accuracy=0.69, over 5072.00 frames. ], tot_loss[loss=3.275, NarTop10Accuracy=0.6634, over 5121.55 frames. ], batch size: 7, lr: 2.19e-03 +2024-08-06 15:31:03,569 INFO [trainer.py:765] (4/8) Epoch 39, batch 500, train_loss[loss=3.029, NarTop10Accuracy=0.7156, over 6168.00 frames. ], tot_loss[loss=3.288, NarTop10Accuracy=0.6615, over 5394.86 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 15:31:40,850 INFO [trainer.py:765] (4/8) Epoch 39, batch 600, train_loss[loss=3.261, NarTop10Accuracy=0.6753, over 5758.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6606, over 5659.27 frames. ], batch size: 9, lr: 2.18e-03 +2024-08-06 15:32:14,451 INFO [trainer.py:765] (4/8) Epoch 39, batch 700, train_loss[loss=2.938, NarTop10Accuracy=0.7313, over 5206.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6581, over 5724.63 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:32:44,165 INFO [trainer.py:765] (4/8) Epoch 39, batch 800, train_loss[loss=3.055, NarTop10Accuracy=0.7003, over 4947.00 frames. ], tot_loss[loss=3.303, NarTop10Accuracy=0.6582, over 5796.10 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:33:21,116 INFO [trainer.py:765] (4/8) Epoch 39, batch 900, train_loss[loss=3.26, NarTop10Accuracy=0.6564, over 6727.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6562, over 5817.83 frames. ], batch size: 14, lr: 2.18e-03 +2024-08-06 15:34:02,655 INFO [trainer.py:765] (4/8) Epoch 39, batch 1000, train_loss[loss=3.092, NarTop10Accuracy=0.6944, over 6681.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6563, over 5917.94 frames. ], batch size: 14, lr: 2.18e-03 +2024-08-06 15:34:33,094 INFO [trainer.py:765] (4/8) Epoch 39, batch 1100, train_loss[loss=3.113, NarTop10Accuracy=0.7018, over 6786.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.655, over 5952.65 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 15:35:09,244 INFO [trainer.py:765] (4/8) Epoch 39, batch 1200, train_loss[loss=3.298, NarTop10Accuracy=0.6625, over 7355.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.655, over 5947.53 frames. ], batch size: 30, lr: 2.18e-03 +2024-08-06 15:35:46,813 INFO [trainer.py:765] (4/8) Epoch 39, batch 1300, train_loss[loss=3.543, NarTop10Accuracy=0.6089, over 5027.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6561, over 6030.14 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:36:18,850 INFO [trainer.py:765] (4/8) Epoch 39, batch 1400, train_loss[loss=3.331, NarTop10Accuracy=0.6602, over 6237.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6545, over 6039.00 frames. ], batch size: 11, lr: 2.17e-03 +2024-08-06 15:36:47,213 INFO [trainer.py:765] (4/8) Epoch 39, batch 1500, train_loss[loss=3.462, NarTop10Accuracy=0.6294, over 6756.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6529, over 5980.61 frames. ], batch size: 49, lr: 2.17e-03 +2024-08-06 15:37:15,216 INFO [trainer.py:765] (4/8) Epoch 39, batch 1600, train_loss[loss=3.304, NarTop10Accuracy=0.6704, over 7070.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6512, over 5970.42 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:37:41,882 INFO [trainer.py:765] (4/8) Epoch 39, batch 1700, train_loss[loss=3.127, NarTop10Accuracy=0.7002, over 6270.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6506, over 5949.54 frames. ], batch size: 13, lr: 2.17e-03 +2024-08-06 15:38:08,509 INFO [trainer.py:765] (4/8) Epoch 39, batch 1800, train_loss[loss=3.128, NarTop10Accuracy=0.6859, over 7076.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6502, over 6009.29 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:38:35,253 INFO [trainer.py:765] (4/8) Epoch 39, batch 1900, train_loss[loss=3.43, NarTop10Accuracy=0.6357, over 5542.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6471, over 6045.70 frames. ], batch size: 49, lr: 2.17e-03 +2024-08-06 15:38:37,989 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 15:38:48,262 INFO [trainer.py:811] (4/8) Epoch 39, validation: loss=3.177, NarTop10Accuracy=0.6866, over 1907754.00 frames. +2024-08-06 15:38:48,262 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 15:38:48,768 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.106e+02 2.266e+02 2.462e+02 4.274e+02, threshold=4.532e+02, percent-clipped=0.0 +2024-08-06 15:39:11,226 INFO [trainer.py:765] (4/8) Epoch 39, batch 2000, train_loss[loss=3.318, NarTop10Accuracy=0.6577, over 6007.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6498, over 6021.35 frames. ], batch size: 50, lr: 2.17e-03 +2024-08-06 15:39:36,692 INFO [trainer.py:765] (4/8) Epoch 39, batch 2100, train_loss[loss=3.807, NarTop10Accuracy=0.5535, over 4067.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.65, over 5989.00 frames. ], batch size: 4, lr: 2.17e-03 +2024-08-06 15:40:02,086 INFO [trainer.py:765] (4/8) Epoch 39, batch 2200, train_loss[loss=3.563, NarTop10Accuracy=0.6035, over 7303.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6501, over 6032.86 frames. ], batch size: 30, lr: 2.17e-03 +2024-08-06 15:40:27,496 INFO [trainer.py:765] (4/8) Epoch 39, batch 2300, train_loss[loss=3.011, NarTop10Accuracy=0.7124, over 5790.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6474, over 6059.96 frames. ], batch size: 9, lr: 2.16e-03 +2024-08-06 15:40:52,331 INFO [trainer.py:765] (4/8) Epoch 39, batch 2400, train_loss[loss=3.319, NarTop10Accuracy=0.6497, over 5129.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6473, over 5865.32 frames. ], batch size: 7, lr: 2.16e-03 +2024-08-06 15:41:15,695 INFO [trainer.py:765] (4/8) Epoch 39, batch 2500, train_loss[loss=3.195, NarTop10Accuracy=0.6702, over 5083.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6527, over 5523.88 frames. ], batch size: 6, lr: 2.16e-03 +2024-08-06 15:41:37,173 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 15:42:35,254 INFO [trainer.py:765] (4/8) Epoch 40, batch 100, train_loss[loss=3.415, NarTop10Accuracy=0.6293, over 6990.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6561, over 2371.77 frames. ], batch size: 30, lr: 2.13e-03 +2024-08-06 15:43:09,645 INFO [trainer.py:765] (4/8) Epoch 40, batch 200, train_loss[loss=3.495, NarTop10Accuracy=0.6209, over 6840.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.663, over 3865.63 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 15:43:43,738 INFO [trainer.py:765] (4/8) Epoch 40, batch 300, train_loss[loss=3.418, NarTop10Accuracy=0.6299, over 6970.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6613, over 4666.74 frames. ], batch size: 21, lr: 2.13e-03 +2024-08-06 15:44:18,202 INFO [trainer.py:765] (4/8) Epoch 40, batch 400, train_loss[loss=3.029, NarTop10Accuracy=0.7208, over 5060.00 frames. ], tot_loss[loss=3.271, NarTop10Accuracy=0.6652, over 5119.29 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 15:44:50,257 INFO [trainer.py:765] (4/8) Epoch 40, batch 500, train_loss[loss=3.197, NarTop10Accuracy=0.6927, over 6090.00 frames. ], tot_loss[loss=3.27, NarTop10Accuracy=0.6651, over 5406.50 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 15:45:25,431 INFO [trainer.py:765] (4/8) Epoch 40, batch 600, train_loss[loss=3.439, NarTop10Accuracy=0.6287, over 5750.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6609, over 5673.07 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 15:45:58,647 INFO [trainer.py:765] (4/8) Epoch 40, batch 700, train_loss[loss=3.437, NarTop10Accuracy=0.6316, over 5052.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6572, over 5750.52 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 15:46:34,887 INFO [trainer.py:765] (4/8) Epoch 40, batch 800, train_loss[loss=3.231, NarTop10Accuracy=0.6677, over 4234.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6562, over 5801.65 frames. ], batch size: 5, lr: 2.13e-03 +2024-08-06 15:47:07,290 INFO [trainer.py:765] (4/8) Epoch 40, batch 900, train_loss[loss=3.221, NarTop10Accuracy=0.6798, over 6181.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6557, over 5820.33 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:47:43,510 INFO [trainer.py:765] (4/8) Epoch 40, batch 1000, train_loss[loss=3.541, NarTop10Accuracy=0.6129, over 6322.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6538, over 5924.07 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:48:18,710 INFO [trainer.py:765] (4/8) Epoch 40, batch 1100, train_loss[loss=3.417, NarTop10Accuracy=0.6277, over 6813.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.653, over 5955.50 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 15:48:52,094 INFO [trainer.py:765] (4/8) Epoch 40, batch 1200, train_loss[loss=3.307, NarTop10Accuracy=0.6479, over 7572.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6533, over 5950.40 frames. ], batch size: 32, lr: 2.12e-03 +2024-08-06 15:49:29,782 INFO [trainer.py:765] (4/8) Epoch 40, batch 1300, train_loss[loss=3.34, NarTop10Accuracy=0.6506, over 5073.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6539, over 6031.76 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 15:49:38,246 INFO [trainer.py:803] (4/8) Computing validation loss +2024-08-06 15:49:48,934 INFO [trainer.py:811] (4/8) Epoch 40, validation: loss=3.171, NarTop10Accuracy=0.6871, over 1907754.00 frames. +2024-08-06 15:49:48,935 INFO [trainer.py:814] (4/8) Maximum memory allocated so far is 29796MB +2024-08-06 15:49:49,616 INFO [optim.py:386] (4/8) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.095e+02 2.264e+02 2.441e+02 4.960e+02, threshold=4.528e+02, percent-clipped=0.1 +2024-08-06 15:50:12,461 INFO [trainer.py:765] (4/8) Epoch 40, batch 1400, train_loss[loss=3.155, NarTop10Accuracy=0.699, over 5940.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6527, over 6035.62 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 15:50:45,931 INFO [trainer.py:765] (4/8) Epoch 40, batch 1500, train_loss[loss=3.558, NarTop10Accuracy=0.6141, over 6219.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.652, over 5968.49 frames. ], batch size: 48, lr: 2.12e-03 +2024-08-06 15:51:13,821 INFO [trainer.py:765] (4/8) Epoch 40, batch 1600, train_loss[loss=3.173, NarTop10Accuracy=0.6826, over 7256.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6545, over 5952.85 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:51:40,571 INFO [trainer.py:765] (4/8) Epoch 40, batch 1700, train_loss[loss=3.266, NarTop10Accuracy=0.6611, over 6613.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6535, over 5938.94 frames. ], batch size: 14, lr: 2.12e-03 +2024-08-06 15:52:07,236 INFO [trainer.py:765] (4/8) Epoch 40, batch 1800, train_loss[loss=3.415, NarTop10Accuracy=0.6372, over 7046.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6527, over 6008.90 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:52:33,821 INFO [trainer.py:765] (4/8) Epoch 40, batch 1900, train_loss[loss=3.436, NarTop10Accuracy=0.6316, over 6831.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6519, over 6048.12 frames. ], batch size: 50, lr: 2.11e-03 +2024-08-06 15:52:59,511 INFO [trainer.py:765] (4/8) Epoch 40, batch 2000, train_loss[loss=3.371, NarTop10Accuracy=0.6472, over 6382.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6508, over 6016.22 frames. ], batch size: 49, lr: 2.11e-03 +2024-08-06 15:53:24,913 INFO [trainer.py:765] (4/8) Epoch 40, batch 2100, train_loss[loss=3.23, NarTop10Accuracy=0.6722, over 4749.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6515, over 5998.66 frames. ], batch size: 5, lr: 2.11e-03 +2024-08-06 15:53:50,419 INFO [trainer.py:765] (4/8) Epoch 40, batch 2200, train_loss[loss=3.466, NarTop10Accuracy=0.6253, over 7258.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6503, over 6048.86 frames. ], batch size: 30, lr: 2.11e-03 +2024-08-06 15:54:15,886 INFO [trainer.py:765] (4/8) Epoch 40, batch 2300, train_loss[loss=3.154, NarTop10Accuracy=0.6816, over 5816.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6502, over 6068.53 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 15:54:43,787 INFO [trainer.py:765] (4/8) Epoch 40, batch 2400, train_loss[loss=3.487, NarTop10Accuracy=0.6208, over 5198.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6494, over 5881.25 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 15:55:07,364 INFO [trainer.py:765] (4/8) Epoch 40, batch 2500, train_loss[loss=2.958, NarTop10Accuracy=0.7302, over 4991.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6563, over 5549.51 frames. ], batch size: 6, lr: 2.11e-03 +2024-08-06 15:55:28,705 INFO [trainer.py:650] (4/8) Reaches end of dataloader. +2024-08-06 15:55:28,708 INFO [trainer.py:1069] (4/8) Done! diff --git a/libritts/log/log-train-2024-08-06-06-41-41-5 b/libritts/log/log-train-2024-08-06-06-41-41-5 new file mode 100644 index 0000000000000000000000000000000000000000..5d9b37f729b19d78b8f6342c6a220fe6ba20c489 --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-41-41-5 @@ -0,0 +1,1260 @@ +2024-08-06 06:41:41,446 INFO [trainer.py:870] (5/8) Training started +2024-08-06 06:41:41,447 INFO [trainer.py:889] (5/8) Device: cuda:5 +2024-08-06 06:41:41,447 INFO [trainer.py:890] (5/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:41:41,447 INFO [trainer.py:892] (5/8) About to create model +2024-08-06 06:41:42,310 INFO [trainer.py:899] (5/8) Number of model parameters: 367386628 +2024-08-06 06:41:42,311 INFO [checkpoint.py:112] (5/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 06:41:44,546 INFO [trainer.py:914] (5/8) Using DDP +2024-08-06 06:41:46,900 INFO [datamodule.py:427] (5/8) About to get train cuts +2024-08-06 06:41:46,902 INFO [datamodule.py:434] (5/8) About to get dev cuts +2024-08-06 06:41:46,903 INFO [datamodule.py:292] (5/8) Disable SpecAugment +2024-08-06 06:41:46,903 INFO [datamodule.py:294] (5/8) About to create train dataset +2024-08-06 06:41:46,904 INFO [datamodule.py:323] (5/8) Using DynamicBucketingSampler +2024-08-06 06:41:47,527 INFO [datamodule.py:344] (5/8) About to create train dataloader +2024-08-06 06:41:47,527 INFO [datamodule.py:367] (5/8) About to create dev dataset +2024-08-06 06:41:47,865 INFO [datamodule.py:388] (5/8) About to create dev dataloader +2024-08-06 06:42:36,135 INFO [trainer.py:765] (5/8) Epoch 1, batch 100, train_loss[loss=92.54, NarTop10Accuracy=0.01448, over 7174.00 frames. ], tot_loss[loss=80.56, NarTop10Accuracy=0.05175, over 2356.33 frames. ], batch size: 30, lr: 2.25e-02 +2024-08-06 06:43:05,818 INFO [trainer.py:765] (5/8) Epoch 1, batch 200, train_loss[loss=116.6, NarTop10Accuracy=0.02211, over 6946.00 frames. ], tot_loss[loss=99.14, NarTop10Accuracy=0.04427, over 3861.72 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 06:43:33,851 INFO [trainer.py:765] (5/8) Epoch 1, batch 300, train_loss[loss=73.66, NarTop10Accuracy=0.02235, over 7252.00 frames. ], tot_loss[loss=86.9, NarTop10Accuracy=0.04573, over 4672.31 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 06:44:05,251 INFO [trainer.py:765] (5/8) Epoch 1, batch 400, train_loss[loss=32.92, NarTop10Accuracy=0.06196, over 5060.00 frames. ], tot_loss[loss=68.02, NarTop10Accuracy=0.05005, over 5125.42 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 06:44:33,444 INFO [trainer.py:765] (5/8) Epoch 1, batch 500, train_loss[loss=18.06, NarTop10Accuracy=0.01905, over 6089.00 frames. ], tot_loss[loss=48.71, NarTop10Accuracy=0.05517, over 5406.12 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 06:45:02,924 INFO [trainer.py:765] (5/8) Epoch 1, batch 600, train_loss[loss=6.129, NarTop10Accuracy=0.1403, over 5740.00 frames. ], tot_loss[loss=33.37, NarTop10Accuracy=0.06211, over 5664.01 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 06:45:40,481 INFO [trainer.py:765] (5/8) Epoch 1, batch 700, train_loss[loss=7.097, NarTop10Accuracy=0.1017, over 4959.00 frames. ], tot_loss[loss=23.57, NarTop10Accuracy=0.07062, over 5724.62 frames. ], batch size: 6, lr: 2.99e-02 +2024-08-06 06:46:09,662 INFO [trainer.py:765] (5/8) Epoch 1, batch 800, train_loss[loss=6.573, NarTop10Accuracy=0.1234, over 5086.00 frames. ], tot_loss[loss=17.53, NarTop10Accuracy=0.0818, over 5778.14 frames. ], batch size: 6, lr: 2.98e-02 +2024-08-06 06:46:37,733 INFO [trainer.py:765] (5/8) Epoch 1, batch 900, train_loss[loss=6.084, NarTop10Accuracy=0.1594, over 6313.00 frames. ], tot_loss[loss=13.04, NarTop10Accuracy=0.1109, over 5800.15 frames. ], batch size: 13, lr: 2.98e-02 +2024-08-06 06:47:13,908 INFO [trainer.py:765] (5/8) Epoch 1, batch 1000, train_loss[loss=5.864, NarTop10Accuracy=0.1968, over 6270.00 frames. ], tot_loss[loss=10.17, NarTop10Accuracy=0.1371, over 5909.40 frames. ], batch size: 13, lr: 2.97e-02 +2024-08-06 06:47:47,141 INFO [trainer.py:765] (5/8) Epoch 1, batch 1100, train_loss[loss=5.416, NarTop10Accuracy=0.2126, over 6938.00 frames. ], tot_loss[loss=8.416, NarTop10Accuracy=0.1567, over 5940.83 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 06:48:15,709 INFO [trainer.py:765] (5/8) Epoch 1, batch 1200, train_loss[loss=6.33, NarTop10Accuracy=0.1204, over 7281.00 frames. ], tot_loss[loss=7.304, NarTop10Accuracy=0.1738, over 5947.64 frames. ], batch size: 31, lr: 2.96e-02 +2024-08-06 06:48:47,235 INFO [trainer.py:765] (5/8) Epoch 1, batch 1300, train_loss[loss=5.558, NarTop10Accuracy=0.1944, over 4992.00 frames. ], tot_loss[loss=6.615, NarTop10Accuracy=0.1845, over 5999.94 frames. ], batch size: 6, lr: 2.95e-02 +2024-08-06 06:49:23,567 INFO [trainer.py:765] (5/8) Epoch 1, batch 1400, train_loss[loss=5.312, NarTop10Accuracy=0.2247, over 6136.00 frames. ], tot_loss[loss=6.202, NarTop10Accuracy=0.1898, over 6013.51 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 06:49:51,507 INFO [trainer.py:765] (5/8) Epoch 1, batch 1500, train_loss[loss=5.711, NarTop10Accuracy=0.1734, over 5985.00 frames. ], tot_loss[loss=5.94, NarTop10Accuracy=0.1956, over 5965.91 frames. ], batch size: 49, lr: 2.94e-02 +2024-08-06 06:50:19,162 INFO [trainer.py:765] (5/8) Epoch 1, batch 1600, train_loss[loss=5.44, NarTop10Accuracy=0.2283, over 7123.00 frames. ], tot_loss[loss=5.771, NarTop10Accuracy=0.2014, over 5953.16 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 06:50:45,597 INFO [trainer.py:765] (5/8) Epoch 1, batch 1700, train_loss[loss=5.147, NarTop10Accuracy=0.2642, over 6153.00 frames. ], tot_loss[loss=5.643, NarTop10Accuracy=0.2086, over 5941.26 frames. ], batch size: 13, lr: 2.92e-02 +2024-08-06 06:51:11,955 INFO [trainer.py:765] (5/8) Epoch 1, batch 1800, train_loss[loss=5.512, NarTop10Accuracy=0.2077, over 7095.00 frames. ], tot_loss[loss=5.551, NarTop10Accuracy=0.2158, over 5998.42 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 06:51:38,223 INFO [trainer.py:765] (5/8) Epoch 1, batch 1900, train_loss[loss=5.693, NarTop10Accuracy=0.1785, over 6901.00 frames. ], tot_loss[loss=5.501, NarTop10Accuracy=0.2197, over 6029.69 frames. ], batch size: 49, lr: 2.90e-02 +2024-08-06 06:52:03,653 INFO [trainer.py:765] (5/8) Epoch 1, batch 2000, train_loss[loss=5.386, NarTop10Accuracy=0.2275, over 6008.00 frames. ], tot_loss[loss=5.436, NarTop10Accuracy=0.2287, over 5998.63 frames. ], batch size: 49, lr: 2.89e-02 +2024-08-06 06:52:03,654 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 06:52:13,994 INFO [trainer.py:811] (5/8) Epoch 1, validation: loss=5.351, NarTop10Accuracy=0.2423, over 1907754.00 frames. +2024-08-06 06:52:13,994 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 26428MB +2024-08-06 06:52:14,534 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 4.341e+01 2.262e+02 7.241e+02 2.074e+04 7.259e+05, threshold=1.448e+03, percent-clipped=0.0 +2024-08-06 06:52:39,586 INFO [trainer.py:765] (5/8) Epoch 1, batch 2100, train_loss[loss=5.393, NarTop10Accuracy=0.2322, over 3897.00 frames. ], tot_loss[loss=5.389, NarTop10Accuracy=0.2363, over 5983.41 frames. ], batch size: 4, lr: 2.88e-02 +2024-08-06 06:53:05,354 INFO [trainer.py:765] (5/8) Epoch 1, batch 2200, train_loss[loss=5.348, NarTop10Accuracy=0.231, over 7337.00 frames. ], tot_loss[loss=5.355, NarTop10Accuracy=0.2416, over 6026.53 frames. ], batch size: 30, lr: 2.87e-02 +2024-08-06 06:53:30,700 INFO [trainer.py:765] (5/8) Epoch 1, batch 2300, train_loss[loss=5.085, NarTop10Accuracy=0.2837, over 5740.00 frames. ], tot_loss[loss=5.333, NarTop10Accuracy=0.2458, over 6066.05 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 06:53:55,358 INFO [trainer.py:765] (5/8) Epoch 1, batch 2400, train_loss[loss=5.322, NarTop10Accuracy=0.262, over 5056.00 frames. ], tot_loss[loss=5.31, NarTop10Accuracy=0.2502, over 5886.97 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 06:54:18,658 INFO [trainer.py:765] (5/8) Epoch 1, batch 2500, train_loss[loss=4.995, NarTop10Accuracy=0.3059, over 5015.00 frames. ], tot_loss[loss=5.255, NarTop10Accuracy=0.2602, over 5538.39 frames. ], batch size: 6, lr: 2.84e-02 +2024-08-06 06:54:40,052 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 06:55:37,937 INFO [trainer.py:765] (5/8) Epoch 2, batch 100, train_loss[loss=5.174, NarTop10Accuracy=0.2771, over 7433.00 frames. ], tot_loss[loss=5.162, NarTop10Accuracy=0.2819, over 2366.82 frames. ], batch size: 30, lr: 2.77e-02 +2024-08-06 06:56:16,406 INFO [trainer.py:765] (5/8) Epoch 2, batch 200, train_loss[loss=4.862, NarTop10Accuracy=0.3557, over 6810.00 frames. ], tot_loss[loss=5.147, NarTop10Accuracy=0.2845, over 3872.74 frames. ], batch size: 17, lr: 2.76e-02 +2024-08-06 06:56:44,972 INFO [trainer.py:765] (5/8) Epoch 2, batch 300, train_loss[loss=5.326, NarTop10Accuracy=0.2547, over 7095.00 frames. ], tot_loss[loss=5.139, NarTop10Accuracy=0.2868, over 4661.98 frames. ], batch size: 22, lr: 2.75e-02 +2024-08-06 06:57:13,939 INFO [trainer.py:765] (5/8) Epoch 2, batch 400, train_loss[loss=5.438, NarTop10Accuracy=0.2238, over 5070.00 frames. ], tot_loss[loss=5.129, NarTop10Accuracy=0.2892, over 5123.97 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 06:57:56,209 INFO [trainer.py:765] (5/8) Epoch 2, batch 500, train_loss[loss=4.811, NarTop10Accuracy=0.3436, over 6017.00 frames. ], tot_loss[loss=5.101, NarTop10Accuracy=0.2947, over 5402.33 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 06:58:25,426 INFO [trainer.py:765] (5/8) Epoch 2, batch 600, train_loss[loss=5.105, NarTop10Accuracy=0.2985, over 5734.00 frames. ], tot_loss[loss=5.097, NarTop10Accuracy=0.2959, over 5668.14 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 06:58:55,282 INFO [trainer.py:765] (5/8) Epoch 2, batch 700, train_loss[loss=4.88, NarTop10Accuracy=0.3402, over 5015.00 frames. ], tot_loss[loss=5.091, NarTop10Accuracy=0.297, over 5741.19 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 06:59:31,889 INFO [trainer.py:765] (5/8) Epoch 2, batch 800, train_loss[loss=4.618, NarTop10Accuracy=0.3844, over 5000.00 frames. ], tot_loss[loss=5.087, NarTop10Accuracy=0.2973, over 5794.36 frames. ], batch size: 6, lr: 2.69e-02 +2024-08-06 07:00:03,184 INFO [trainer.py:765] (5/8) Epoch 2, batch 900, train_loss[loss=5.402, NarTop10Accuracy=0.2196, over 6734.00 frames. ], tot_loss[loss=5.055, NarTop10Accuracy=0.3033, over 5808.19 frames. ], batch size: 14, lr: 2.68e-02 +2024-08-06 07:00:33,142 INFO [trainer.py:765] (5/8) Epoch 2, batch 1000, train_loss[loss=4.869, NarTop10Accuracy=0.3434, over 6222.00 frames. ], tot_loss[loss=5.024, NarTop10Accuracy=0.309, over 5924.14 frames. ], batch size: 13, lr: 2.66e-02 +2024-08-06 07:01:05,573 INFO [trainer.py:765] (5/8) Epoch 2, batch 1100, train_loss[loss=4.955, NarTop10Accuracy=0.3248, over 6734.00 frames. ], tot_loss[loss=5.008, NarTop10Accuracy=0.312, over 5960.67 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 07:01:46,286 INFO [trainer.py:765] (5/8) Epoch 2, batch 1200, train_loss[loss=4.77, NarTop10Accuracy=0.3611, over 7184.00 frames. ], tot_loss[loss=4.997, NarTop10Accuracy=0.3142, over 5964.25 frames. ], batch size: 30, lr: 2.64e-02 +2024-08-06 07:02:15,644 INFO [trainer.py:765] (5/8) Epoch 2, batch 1300, train_loss[loss=5.755, NarTop10Accuracy=0.1619, over 5025.00 frames. ], tot_loss[loss=4.955, NarTop10Accuracy=0.3219, over 6013.62 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 07:02:45,253 INFO [trainer.py:765] (5/8) Epoch 2, batch 1400, train_loss[loss=4.924, NarTop10Accuracy=0.3262, over 6520.00 frames. ], tot_loss[loss=4.945, NarTop10Accuracy=0.3242, over 6008.84 frames. ], batch size: 12, lr: 2.61e-02 +2024-08-06 07:02:50,268 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 07:03:02,094 INFO [trainer.py:811] (5/8) Epoch 2, validation: loss=4.943, NarTop10Accuracy=0.3266, over 1907754.00 frames. +2024-08-06 07:03:02,095 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 26975MB +2024-08-06 07:03:02,637 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 5.429e+01 1.166e+02 1.425e+02 1.750e+02 6.435e+02, threshold=2.851e+02, percent-clipped=0.0 +2024-08-06 07:03:25,471 INFO [trainer.py:765] (5/8) Epoch 2, batch 1500, train_loss[loss=5.203, NarTop10Accuracy=0.2848, over 5744.00 frames. ], tot_loss[loss=4.931, NarTop10Accuracy=0.3266, over 5942.84 frames. ], batch size: 48, lr: 2.60e-02 +2024-08-06 07:03:53,553 INFO [trainer.py:765] (5/8) Epoch 2, batch 1600, train_loss[loss=4.767, NarTop10Accuracy=0.3501, over 6970.00 frames. ], tot_loss[loss=4.914, NarTop10Accuracy=0.3305, over 5924.07 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 07:04:20,313 INFO [trainer.py:765] (5/8) Epoch 2, batch 1700, train_loss[loss=4.859, NarTop10Accuracy=0.3388, over 6300.00 frames. ], tot_loss[loss=4.91, NarTop10Accuracy=0.3317, over 5920.86 frames. ], batch size: 13, lr: 2.58e-02 +2024-08-06 07:04:46,887 INFO [trainer.py:765] (5/8) Epoch 2, batch 1800, train_loss[loss=4.629, NarTop10Accuracy=0.3845, over 7068.00 frames. ], tot_loss[loss=4.89, NarTop10Accuracy=0.3356, over 5987.98 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 07:05:13,586 INFO [trainer.py:765] (5/8) Epoch 2, batch 1900, train_loss[loss=4.978, NarTop10Accuracy=0.3213, over 5649.00 frames. ], tot_loss[loss=4.868, NarTop10Accuracy=0.3397, over 6040.55 frames. ], batch size: 49, lr: 2.55e-02 +2024-08-06 07:05:39,285 INFO [trainer.py:765] (5/8) Epoch 2, batch 2000, train_loss[loss=4.817, NarTop10Accuracy=0.3497, over 5860.00 frames. ], tot_loss[loss=4.848, NarTop10Accuracy=0.3439, over 6016.95 frames. ], batch size: 49, lr: 2.54e-02 +2024-08-06 07:06:04,828 INFO [trainer.py:765] (5/8) Epoch 2, batch 2100, train_loss[loss=4.681, NarTop10Accuracy=0.3813, over 4854.00 frames. ], tot_loss[loss=4.86, NarTop10Accuracy=0.3421, over 6004.27 frames. ], batch size: 5, lr: 2.52e-02 +2024-08-06 07:06:30,372 INFO [trainer.py:765] (5/8) Epoch 2, batch 2200, train_loss[loss=4.7, NarTop10Accuracy=0.3668, over 7400.00 frames. ], tot_loss[loss=4.812, NarTop10Accuracy=0.3513, over 6045.19 frames. ], batch size: 31, lr: 2.51e-02 +2024-08-06 07:06:55,874 INFO [trainer.py:765] (5/8) Epoch 2, batch 2300, train_loss[loss=4.396, NarTop10Accuracy=0.426, over 5732.00 frames. ], tot_loss[loss=4.805, NarTop10Accuracy=0.3529, over 6056.45 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 07:07:20,576 INFO [trainer.py:765] (5/8) Epoch 2, batch 2400, train_loss[loss=4.673, NarTop10Accuracy=0.3757, over 5183.00 frames. ], tot_loss[loss=4.779, NarTop10Accuracy=0.3576, over 5872.95 frames. ], batch size: 7, lr: 2.49e-02 +2024-08-06 07:07:47,111 INFO [trainer.py:765] (5/8) Epoch 2, batch 2500, train_loss[loss=4.435, NarTop10Accuracy=0.4287, over 4253.00 frames. ], tot_loss[loss=4.751, NarTop10Accuracy=0.3638, over 5560.89 frames. ], batch size: 5, lr: 2.47e-02 +2024-08-06 07:08:08,054 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 07:09:08,538 INFO [trainer.py:765] (5/8) Epoch 3, batch 100, train_loss[loss=4.992, NarTop10Accuracy=0.3171, over 7109.00 frames. ], tot_loss[loss=4.662, NarTop10Accuracy=0.3837, over 2363.00 frames. ], batch size: 30, lr: 2.35e-02 +2024-08-06 07:09:41,499 INFO [trainer.py:765] (5/8) Epoch 3, batch 200, train_loss[loss=4.451, NarTop10Accuracy=0.4302, over 6831.00 frames. ], tot_loss[loss=4.616, NarTop10Accuracy=0.3915, over 3853.48 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 07:10:16,976 INFO [trainer.py:765] (5/8) Epoch 3, batch 300, train_loss[loss=4.573, NarTop10Accuracy=0.404, over 7046.00 frames. ], tot_loss[loss=4.608, NarTop10Accuracy=0.3925, over 4653.45 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 07:10:49,792 INFO [trainer.py:765] (5/8) Epoch 3, batch 400, train_loss[loss=4.343, NarTop10Accuracy=0.439, over 5254.00 frames. ], tot_loss[loss=4.579, NarTop10Accuracy=0.3981, over 5114.17 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 07:11:18,179 INFO [trainer.py:765] (5/8) Epoch 3, batch 500, train_loss[loss=4.579, NarTop10Accuracy=0.3988, over 6155.00 frames. ], tot_loss[loss=4.574, NarTop10Accuracy=0.399, over 5390.01 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 07:11:51,262 INFO [trainer.py:765] (5/8) Epoch 3, batch 600, train_loss[loss=4.398, NarTop10Accuracy=0.4217, over 5559.00 frames. ], tot_loss[loss=4.556, NarTop10Accuracy=0.4026, over 5638.56 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 07:12:32,101 INFO [trainer.py:765] (5/8) Epoch 3, batch 700, train_loss[loss=4.532, NarTop10Accuracy=0.4004, over 5006.00 frames. ], tot_loss[loss=4.546, NarTop10Accuracy=0.4045, over 5731.70 frames. ], batch size: 6, lr: 2.29e-02 +2024-08-06 07:13:01,919 INFO [trainer.py:765] (5/8) Epoch 3, batch 800, train_loss[loss=4.223, NarTop10Accuracy=0.4529, over 5064.00 frames. ], tot_loss[loss=4.536, NarTop10Accuracy=0.4061, over 5777.96 frames. ], batch size: 6, lr: 2.27e-02 +2024-08-06 07:13:12,668 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 07:13:22,883 INFO [trainer.py:811] (5/8) Epoch 3, validation: loss=4.43, NarTop10Accuracy=0.4285, over 1907754.00 frames. +2024-08-06 07:13:22,884 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 26975MB +2024-08-06 07:13:23,429 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 6.823e+01 1.318e+02 1.583e+02 1.978e+02 8.364e+02, threshold=3.166e+02, percent-clipped=5.2 +2024-08-06 07:13:42,434 INFO [trainer.py:765] (5/8) Epoch 3, batch 900, train_loss[loss=4.263, NarTop10Accuracy=0.452, over 6142.00 frames. ], tot_loss[loss=4.517, NarTop10Accuracy=0.4101, over 5806.45 frames. ], batch size: 13, lr: 2.26e-02 +2024-08-06 07:14:25,626 INFO [trainer.py:765] (5/8) Epoch 3, batch 1000, train_loss[loss=4.19, NarTop10Accuracy=0.4601, over 6200.00 frames. ], tot_loss[loss=4.501, NarTop10Accuracy=0.413, over 5915.62 frames. ], batch size: 13, lr: 2.25e-02 +2024-08-06 07:14:56,324 INFO [trainer.py:765] (5/8) Epoch 3, batch 1100, train_loss[loss=4.361, NarTop10Accuracy=0.4354, over 6957.00 frames. ], tot_loss[loss=4.484, NarTop10Accuracy=0.4159, over 5949.66 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 07:15:29,866 INFO [trainer.py:765] (5/8) Epoch 3, batch 1200, train_loss[loss=4.261, NarTop10Accuracy=0.4672, over 7280.00 frames. ], tot_loss[loss=4.479, NarTop10Accuracy=0.4168, over 5941.03 frames. ], batch size: 30, lr: 2.23e-02 +2024-08-06 07:16:12,665 INFO [trainer.py:765] (5/8) Epoch 3, batch 1300, train_loss[loss=4.514, NarTop10Accuracy=0.4079, over 5103.00 frames. ], tot_loss[loss=4.459, NarTop10Accuracy=0.421, over 6004.83 frames. ], batch size: 6, lr: 2.22e-02 +2024-08-06 07:16:42,204 INFO [trainer.py:765] (5/8) Epoch 3, batch 1400, train_loss[loss=4.373, NarTop10Accuracy=0.4259, over 6140.00 frames. ], tot_loss[loss=4.463, NarTop10Accuracy=0.4202, over 6021.83 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 07:17:10,663 INFO [trainer.py:765] (5/8) Epoch 3, batch 1500, train_loss[loss=4.651, NarTop10Accuracy=0.3885, over 5939.00 frames. ], tot_loss[loss=4.446, NarTop10Accuracy=0.4239, over 5957.07 frames. ], batch size: 48, lr: 2.20e-02 +2024-08-06 07:17:38,769 INFO [trainer.py:765] (5/8) Epoch 3, batch 1600, train_loss[loss=4.129, NarTop10Accuracy=0.4779, over 7047.00 frames. ], tot_loss[loss=4.432, NarTop10Accuracy=0.4265, over 5937.00 frames. ], batch size: 22, lr: 2.19e-02 +2024-08-06 07:18:05,503 INFO [trainer.py:765] (5/8) Epoch 3, batch 1700, train_loss[loss=4.023, NarTop10Accuracy=0.5046, over 6536.00 frames. ], tot_loss[loss=4.406, NarTop10Accuracy=0.4316, over 5941.19 frames. ], batch size: 14, lr: 2.18e-02 +2024-08-06 07:18:32,160 INFO [trainer.py:765] (5/8) Epoch 3, batch 1800, train_loss[loss=4.242, NarTop10Accuracy=0.4638, over 6983.00 frames. ], tot_loss[loss=4.383, NarTop10Accuracy=0.4356, over 5998.08 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 07:19:01,958 INFO [trainer.py:765] (5/8) Epoch 3, batch 1900, train_loss[loss=4.648, NarTop10Accuracy=0.3871, over 6604.00 frames. ], tot_loss[loss=4.37, NarTop10Accuracy=0.4387, over 6046.89 frames. ], batch size: 48, lr: 2.16e-02 +2024-08-06 07:19:27,621 INFO [trainer.py:765] (5/8) Epoch 3, batch 2000, train_loss[loss=4.521, NarTop10Accuracy=0.4044, over 6201.00 frames. ], tot_loss[loss=4.351, NarTop10Accuracy=0.4421, over 6018.32 frames. ], batch size: 51, lr: 2.15e-02 +2024-08-06 07:19:53,070 INFO [trainer.py:765] (5/8) Epoch 3, batch 2100, train_loss[loss=4.063, NarTop10Accuracy=0.5078, over 3919.00 frames. ], tot_loss[loss=4.323, NarTop10Accuracy=0.4476, over 5996.20 frames. ], batch size: 4, lr: 2.14e-02 +2024-08-06 07:20:18,553 INFO [trainer.py:765] (5/8) Epoch 3, batch 2200, train_loss[loss=4.501, NarTop10Accuracy=0.4123, over 7291.00 frames. ], tot_loss[loss=4.31, NarTop10Accuracy=0.4501, over 6042.57 frames. ], batch size: 30, lr: 2.13e-02 +2024-08-06 07:20:44,051 INFO [trainer.py:765] (5/8) Epoch 3, batch 2300, train_loss[loss=4.098, NarTop10Accuracy=0.4838, over 5826.00 frames. ], tot_loss[loss=4.314, NarTop10Accuracy=0.4491, over 6079.12 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 07:21:08,677 INFO [trainer.py:765] (5/8) Epoch 3, batch 2400, train_loss[loss=4.368, NarTop10Accuracy=0.444, over 5034.00 frames. ], tot_loss[loss=4.309, NarTop10Accuracy=0.45, over 5889.85 frames. ], batch size: 7, lr: 2.11e-02 +2024-08-06 07:21:32,171 INFO [trainer.py:765] (5/8) Epoch 3, batch 2500, train_loss[loss=4.206, NarTop10Accuracy=0.4719, over 4997.00 frames. ], tot_loss[loss=4.267, NarTop10Accuracy=0.4584, over 5537.87 frames. ], batch size: 6, lr: 2.10e-02 +2024-08-06 07:21:53,166 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 07:23:00,977 INFO [trainer.py:765] (5/8) Epoch 4, batch 100, train_loss[loss=4.133, NarTop10Accuracy=0.471, over 7314.00 frames. ], tot_loss[loss=4.199, NarTop10Accuracy=0.4728, over 2368.53 frames. ], batch size: 31, lr: 1.97e-02 +2024-08-06 07:23:33,303 INFO [trainer.py:765] (5/8) Epoch 4, batch 200, train_loss[loss=4.248, NarTop10Accuracy=0.4618, over 6757.00 frames. ], tot_loss[loss=4.175, NarTop10Accuracy=0.4782, over 3874.58 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 07:23:51,466 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 07:24:01,517 INFO [trainer.py:811] (5/8) Epoch 4, validation: loss=4.035, NarTop10Accuracy=0.5085, over 1907754.00 frames. +2024-08-06 07:24:01,517 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 26975MB +2024-08-06 07:24:02,097 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 9.910e+01 1.530e+02 1.750e+02 2.064e+02 5.317e+02, threshold=3.500e+02, percent-clipped=3.3 +2024-08-06 07:24:14,361 INFO [trainer.py:765] (5/8) Epoch 4, batch 300, train_loss[loss=3.959, NarTop10Accuracy=0.5199, over 7208.00 frames. ], tot_loss[loss=4.173, NarTop10Accuracy=0.4789, over 4679.84 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 07:24:53,596 INFO [trainer.py:765] (5/8) Epoch 4, batch 400, train_loss[loss=3.816, NarTop10Accuracy=0.5571, over 5079.00 frames. ], tot_loss[loss=4.175, NarTop10Accuracy=0.4784, over 5119.92 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 07:25:25,295 INFO [trainer.py:765] (5/8) Epoch 4, batch 500, train_loss[loss=3.836, NarTop10Accuracy=0.5402, over 6095.00 frames. ], tot_loss[loss=4.161, NarTop10Accuracy=0.4807, over 5401.97 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 07:25:56,975 INFO [trainer.py:765] (5/8) Epoch 4, batch 600, train_loss[loss=4.393, NarTop10Accuracy=0.4305, over 5774.00 frames. ], tot_loss[loss=4.146, NarTop10Accuracy=0.4831, over 5682.01 frames. ], batch size: 9, lr: 1.92e-02 +2024-08-06 07:26:37,606 INFO [trainer.py:765] (5/8) Epoch 4, batch 700, train_loss[loss=3.966, NarTop10Accuracy=0.5152, over 4939.00 frames. ], tot_loss[loss=4.153, NarTop10Accuracy=0.4819, over 5743.32 frames. ], batch size: 6, lr: 1.92e-02 +2024-08-06 07:27:07,432 INFO [trainer.py:765] (5/8) Epoch 4, batch 800, train_loss[loss=4.175, NarTop10Accuracy=0.4839, over 4944.00 frames. ], tot_loss[loss=4.144, NarTop10Accuracy=0.4837, over 5815.90 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 07:27:42,041 INFO [trainer.py:765] (5/8) Epoch 4, batch 900, train_loss[loss=4.215, NarTop10Accuracy=0.4628, over 6257.00 frames. ], tot_loss[loss=4.118, NarTop10Accuracy=0.4885, over 5830.32 frames. ], batch size: 13, lr: 1.90e-02 +2024-08-06 07:28:20,670 INFO [trainer.py:765] (5/8) Epoch 4, batch 1000, train_loss[loss=3.89, NarTop10Accuracy=0.53, over 6209.00 frames. ], tot_loss[loss=4.112, NarTop10Accuracy=0.4899, over 5936.87 frames. ], batch size: 13, lr: 1.89e-02 +2024-08-06 07:28:54,071 INFO [trainer.py:765] (5/8) Epoch 4, batch 1100, train_loss[loss=4.069, NarTop10Accuracy=0.504, over 6838.00 frames. ], tot_loss[loss=4.103, NarTop10Accuracy=0.4916, over 5944.47 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 07:29:29,598 INFO [trainer.py:765] (5/8) Epoch 4, batch 1200, train_loss[loss=4.281, NarTop10Accuracy=0.4549, over 6999.00 frames. ], tot_loss[loss=4.107, NarTop10Accuracy=0.4909, over 5952.85 frames. ], batch size: 30, lr: 1.87e-02 +2024-08-06 07:30:04,990 INFO [trainer.py:765] (5/8) Epoch 4, batch 1300, train_loss[loss=4.141, NarTop10Accuracy=0.4909, over 5102.00 frames. ], tot_loss[loss=4.079, NarTop10Accuracy=0.4965, over 6016.28 frames. ], batch size: 6, lr: 1.87e-02 +2024-08-06 07:30:43,379 INFO [trainer.py:765] (5/8) Epoch 4, batch 1400, train_loss[loss=4.009, NarTop10Accuracy=0.5101, over 6183.00 frames. ], tot_loss[loss=4.075, NarTop10Accuracy=0.4973, over 6014.39 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 07:31:11,831 INFO [trainer.py:765] (5/8) Epoch 4, batch 1500, train_loss[loss=3.996, NarTop10Accuracy=0.5177, over 5881.00 frames. ], tot_loss[loss=4.081, NarTop10Accuracy=0.4961, over 5955.41 frames. ], batch size: 49, lr: 1.85e-02 +2024-08-06 07:31:39,960 INFO [trainer.py:765] (5/8) Epoch 4, batch 1600, train_loss[loss=4.094, NarTop10Accuracy=0.4948, over 7509.00 frames. ], tot_loss[loss=4.083, NarTop10Accuracy=0.4959, over 5934.98 frames. ], batch size: 23, lr: 1.84e-02 +2024-08-06 07:32:06,854 INFO [trainer.py:765] (5/8) Epoch 4, batch 1700, train_loss[loss=4.299, NarTop10Accuracy=0.4485, over 6265.00 frames. ], tot_loss[loss=4.049, NarTop10Accuracy=0.5024, over 5927.41 frames. ], batch size: 13, lr: 1.84e-02 +2024-08-06 07:32:33,482 INFO [trainer.py:765] (5/8) Epoch 4, batch 1800, train_loss[loss=4.151, NarTop10Accuracy=0.4663, over 6953.00 frames. ], tot_loss[loss=4.042, NarTop10Accuracy=0.5041, over 5992.40 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 07:33:00,193 INFO [trainer.py:765] (5/8) Epoch 4, batch 1900, train_loss[loss=4.203, NarTop10Accuracy=0.4662, over 6090.00 frames. ], tot_loss[loss=4.063, NarTop10Accuracy=0.4999, over 6031.30 frames. ], batch size: 51, lr: 1.82e-02 +2024-08-06 07:33:25,989 INFO [trainer.py:765] (5/8) Epoch 4, batch 2000, train_loss[loss=4.569, NarTop10Accuracy=0.4086, over 6579.00 frames. ], tot_loss[loss=4.044, NarTop10Accuracy=0.504, over 6021.77 frames. ], batch size: 49, lr: 1.81e-02 +2024-08-06 07:33:51,512 INFO [trainer.py:765] (5/8) Epoch 4, batch 2100, train_loss[loss=3.869, NarTop10Accuracy=0.5301, over 4898.00 frames. ], tot_loss[loss=4.037, NarTop10Accuracy=0.5055, over 6020.93 frames. ], batch size: 5, lr: 1.81e-02 +2024-08-06 07:34:16,905 INFO [trainer.py:765] (5/8) Epoch 4, batch 2200, train_loss[loss=4.189, NarTop10Accuracy=0.475, over 7230.00 frames. ], tot_loss[loss=4.04, NarTop10Accuracy=0.5049, over 6051.80 frames. ], batch size: 30, lr: 1.80e-02 +2024-08-06 07:34:31,430 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 07:34:41,462 INFO [trainer.py:811] (5/8) Epoch 4, validation: loss=3.858, NarTop10Accuracy=0.5445, over 1907754.00 frames. +2024-08-06 07:34:41,463 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 26975MB +2024-08-06 07:34:41,980 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.721e+02 1.919e+02 2.225e+02 9.682e+02, threshold=3.839e+02, percent-clipped=2.3 +2024-08-06 07:34:52,442 INFO [trainer.py:765] (5/8) Epoch 4, batch 2300, train_loss[loss=3.798, NarTop10Accuracy=0.5484, over 5690.00 frames. ], tot_loss[loss=4.034, NarTop10Accuracy=0.5063, over 6078.07 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 07:35:17,167 INFO [trainer.py:765] (5/8) Epoch 4, batch 2400, train_loss[loss=4.029, NarTop10Accuracy=0.4988, over 5097.00 frames. ], tot_loss[loss=4.027, NarTop10Accuracy=0.508, over 5892.67 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 07:35:40,623 INFO [trainer.py:765] (5/8) Epoch 4, batch 2500, train_loss[loss=4.386, NarTop10Accuracy=0.4351, over 5071.00 frames. ], tot_loss[loss=4.009, NarTop10Accuracy=0.5113, over 5546.38 frames. ], batch size: 6, lr: 1.78e-02 +2024-08-06 07:36:01,491 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 07:37:02,524 INFO [trainer.py:765] (5/8) Epoch 5, batch 100, train_loss[loss=3.895, NarTop10Accuracy=0.5337, over 7124.00 frames. ], tot_loss[loss=3.955, NarTop10Accuracy=0.5239, over 2378.93 frames. ], batch size: 30, lr: 1.66e-02 +2024-08-06 07:37:39,814 INFO [trainer.py:765] (5/8) Epoch 5, batch 200, train_loss[loss=4.172, NarTop10Accuracy=0.4889, over 6907.00 frames. ], tot_loss[loss=3.938, NarTop10Accuracy=0.5268, over 3883.77 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 07:38:13,471 INFO [trainer.py:765] (5/8) Epoch 5, batch 300, train_loss[loss=4.325, NarTop10Accuracy=0.4454, over 7178.00 frames. ], tot_loss[loss=3.924, NarTop10Accuracy=0.5295, over 4681.68 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 07:38:42,429 INFO [trainer.py:765] (5/8) Epoch 5, batch 400, train_loss[loss=3.831, NarTop10Accuracy=0.5537, over 5129.00 frames. ], tot_loss[loss=3.916, NarTop10Accuracy=0.5308, over 5129.08 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 07:39:17,020 INFO [trainer.py:765] (5/8) Epoch 5, batch 500, train_loss[loss=3.873, NarTop10Accuracy=0.5423, over 6183.00 frames. ], tot_loss[loss=3.913, NarTop10Accuracy=0.5317, over 5413.05 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 07:39:51,943 INFO [trainer.py:765] (5/8) Epoch 5, batch 600, train_loss[loss=3.926, NarTop10Accuracy=0.515, over 5699.00 frames. ], tot_loss[loss=3.906, NarTop10Accuracy=0.5329, over 5677.03 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 07:40:28,626 INFO [trainer.py:765] (5/8) Epoch 5, batch 700, train_loss[loss=3.672, NarTop10Accuracy=0.5809, over 5048.00 frames. ], tot_loss[loss=3.913, NarTop10Accuracy=0.5319, over 5729.97 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:02,367 INFO [trainer.py:765] (5/8) Epoch 5, batch 800, train_loss[loss=4.079, NarTop10Accuracy=0.4995, over 5002.00 frames. ], tot_loss[loss=3.919, NarTop10Accuracy=0.5304, over 5809.52 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:37,937 INFO [trainer.py:765] (5/8) Epoch 5, batch 900, train_loss[loss=4.219, NarTop10Accuracy=0.474, over 6061.00 frames. ], tot_loss[loss=3.9, NarTop10Accuracy=0.5344, over 5828.77 frames. ], batch size: 13, lr: 1.61e-02 +2024-08-06 07:42:13,845 INFO [trainer.py:765] (5/8) Epoch 5, batch 1000, train_loss[loss=3.896, NarTop10Accuracy=0.527, over 6269.00 frames. ], tot_loss[loss=3.887, NarTop10Accuracy=0.5364, over 5930.51 frames. ], batch size: 13, lr: 1.60e-02 +2024-08-06 07:42:46,467 INFO [trainer.py:765] (5/8) Epoch 5, batch 1100, train_loss[loss=3.995, NarTop10Accuracy=0.5288, over 6838.00 frames. ], tot_loss[loss=3.895, NarTop10Accuracy=0.5355, over 5956.44 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 07:43:25,226 INFO [trainer.py:765] (5/8) Epoch 5, batch 1200, train_loss[loss=4.073, NarTop10Accuracy=0.5037, over 7218.00 frames. ], tot_loss[loss=3.895, NarTop10Accuracy=0.5352, over 5964.51 frames. ], batch size: 30, lr: 1.59e-02 +2024-08-06 07:44:00,556 INFO [trainer.py:765] (5/8) Epoch 5, batch 1300, train_loss[loss=3.666, NarTop10Accuracy=0.5785, over 5087.00 frames. ], tot_loss[loss=3.897, NarTop10Accuracy=0.5347, over 6037.29 frames. ], batch size: 6, lr: 1.59e-02 +2024-08-06 07:44:30,238 INFO [trainer.py:765] (5/8) Epoch 5, batch 1400, train_loss[loss=4.049, NarTop10Accuracy=0.508, over 6213.00 frames. ], tot_loss[loss=3.898, NarTop10Accuracy=0.5344, over 6050.56 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 07:45:02,845 INFO [trainer.py:765] (5/8) Epoch 5, batch 1500, train_loss[loss=3.939, NarTop10Accuracy=0.5322, over 6554.00 frames. ], tot_loss[loss=3.897, NarTop10Accuracy=0.5343, over 6006.38 frames. ], batch size: 49, lr: 1.57e-02 +2024-08-06 07:45:31,008 INFO [trainer.py:765] (5/8) Epoch 5, batch 1600, train_loss[loss=4.054, NarTop10Accuracy=0.5044, over 7320.00 frames. ], tot_loss[loss=3.904, NarTop10Accuracy=0.5336, over 5960.86 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 07:45:51,057 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 07:46:01,621 INFO [trainer.py:811] (5/8) Epoch 5, validation: loss=3.749, NarTop10Accuracy=0.5672, over 1907754.00 frames. +2024-08-06 07:46:01,622 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 26975MB +2024-08-06 07:46:02,122 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.669e+02 1.884e+02 2.190e+02 6.243e+02, threshold=3.768e+02, percent-clipped=1.8 +2024-08-06 07:46:08,362 INFO [trainer.py:765] (5/8) Epoch 5, batch 1700, train_loss[loss=3.837, NarTop10Accuracy=0.5419, over 6253.00 frames. ], tot_loss[loss=3.896, NarTop10Accuracy=0.535, over 5938.48 frames. ], batch size: 13, lr: 1.56e-02 +2024-08-06 07:46:34,967 INFO [trainer.py:765] (5/8) Epoch 5, batch 1800, train_loss[loss=3.798, NarTop10Accuracy=0.559, over 7129.00 frames. ], tot_loss[loss=3.882, NarTop10Accuracy=0.5383, over 5995.37 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 07:47:01,489 INFO [trainer.py:765] (5/8) Epoch 5, batch 1900, train_loss[loss=3.908, NarTop10Accuracy=0.538, over 6509.00 frames. ], tot_loss[loss=3.892, NarTop10Accuracy=0.536, over 6036.16 frames. ], batch size: 50, lr: 1.55e-02 +2024-08-06 07:47:27,147 INFO [trainer.py:765] (5/8) Epoch 5, batch 2000, train_loss[loss=3.878, NarTop10Accuracy=0.544, over 6217.00 frames. ], tot_loss[loss=3.892, NarTop10Accuracy=0.5357, over 6017.93 frames. ], batch size: 49, lr: 1.55e-02 +2024-08-06 07:47:52,619 INFO [trainer.py:765] (5/8) Epoch 5, batch 2100, train_loss[loss=3.929, NarTop10Accuracy=0.5213, over 4756.00 frames. ], tot_loss[loss=3.893, NarTop10Accuracy=0.5354, over 6004.21 frames. ], batch size: 5, lr: 1.54e-02 +2024-08-06 07:48:17,993 INFO [trainer.py:765] (5/8) Epoch 5, batch 2200, train_loss[loss=3.978, NarTop10Accuracy=0.5171, over 7414.00 frames. ], tot_loss[loss=3.887, NarTop10Accuracy=0.5371, over 6037.50 frames. ], batch size: 31, lr: 1.54e-02 +2024-08-06 07:48:43,421 INFO [trainer.py:765] (5/8) Epoch 5, batch 2300, train_loss[loss=4.203, NarTop10Accuracy=0.4701, over 5852.00 frames. ], tot_loss[loss=3.895, NarTop10Accuracy=0.5355, over 6063.70 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 07:49:08,169 INFO [trainer.py:765] (5/8) Epoch 5, batch 2400, train_loss[loss=3.886, NarTop10Accuracy=0.5381, over 5145.00 frames. ], tot_loss[loss=3.895, NarTop10Accuracy=0.5355, over 5857.37 frames. ], batch size: 7, lr: 1.53e-02 +2024-08-06 07:49:31,645 INFO [trainer.py:765] (5/8) Epoch 5, batch 2500, train_loss[loss=3.752, NarTop10Accuracy=0.5639, over 5041.00 frames. ], tot_loss[loss=3.849, NarTop10Accuracy=0.5444, over 5523.87 frames. ], batch size: 6, lr: 1.52e-02 +2024-08-06 07:49:53,606 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 07:50:58,969 INFO [trainer.py:765] (5/8) Epoch 6, batch 100, train_loss[loss=3.559, NarTop10Accuracy=0.6046, over 7377.00 frames. ], tot_loss[loss=3.785, NarTop10Accuracy=0.5593, over 2359.29 frames. ], batch size: 30, lr: 1.42e-02 +2024-08-06 07:51:31,788 INFO [trainer.py:765] (5/8) Epoch 6, batch 200, train_loss[loss=3.677, NarTop10Accuracy=0.5837, over 6923.00 frames. ], tot_loss[loss=3.799, NarTop10Accuracy=0.5566, over 3853.21 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 07:52:04,696 INFO [trainer.py:765] (5/8) Epoch 6, batch 300, train_loss[loss=3.752, NarTop10Accuracy=0.5668, over 7139.00 frames. ], tot_loss[loss=3.793, NarTop10Accuracy=0.5575, over 4676.59 frames. ], batch size: 22, lr: 1.41e-02 +2024-08-06 07:52:36,200 INFO [trainer.py:765] (5/8) Epoch 6, batch 400, train_loss[loss=3.862, NarTop10Accuracy=0.5433, over 5169.00 frames. ], tot_loss[loss=3.785, NarTop10Accuracy=0.5591, over 5119.31 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 07:53:06,102 INFO [trainer.py:765] (5/8) Epoch 6, batch 500, train_loss[loss=3.977, NarTop10Accuracy=0.5184, over 6200.00 frames. ], tot_loss[loss=3.781, NarTop10Accuracy=0.5598, over 5401.55 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 07:53:43,285 INFO [trainer.py:765] (5/8) Epoch 6, batch 600, train_loss[loss=3.731, NarTop10Accuracy=0.5688, over 5785.00 frames. ], tot_loss[loss=3.778, NarTop10Accuracy=0.5599, over 5671.11 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 07:54:15,438 INFO [trainer.py:765] (5/8) Epoch 6, batch 700, train_loss[loss=3.709, NarTop10Accuracy=0.5612, over 5158.00 frames. ], tot_loss[loss=3.781, NarTop10Accuracy=0.5596, over 5754.59 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:54:49,526 INFO [trainer.py:765] (5/8) Epoch 6, batch 800, train_loss[loss=3.714, NarTop10Accuracy=0.5658, over 4954.00 frames. ], tot_loss[loss=3.784, NarTop10Accuracy=0.5589, over 5802.64 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:55:21,984 INFO [trainer.py:765] (5/8) Epoch 6, batch 900, train_loss[loss=3.768, NarTop10Accuracy=0.567, over 6354.00 frames. ], tot_loss[loss=3.787, NarTop10Accuracy=0.5579, over 5818.55 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 07:56:00,804 INFO [trainer.py:765] (5/8) Epoch 6, batch 1000, train_loss[loss=3.579, NarTop10Accuracy=0.5999, over 6738.00 frames. ], tot_loss[loss=3.799, NarTop10Accuracy=0.5556, over 5920.55 frames. ], batch size: 14, lr: 1.38e-02 +2024-08-06 07:56:34,171 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 07:56:44,742 INFO [trainer.py:811] (5/8) Epoch 6, validation: loss=3.634, NarTop10Accuracy=0.5919, over 1907754.00 frames. +2024-08-06 07:56:44,743 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 26975MB +2024-08-06 07:56:45,276 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.300e+02 1.714e+02 1.918e+02 2.211e+02 6.360e+02, threshold=3.836e+02, percent-clipped=1.6 +2024-08-06 07:56:46,638 INFO [trainer.py:765] (5/8) Epoch 6, batch 1100, train_loss[loss=3.562, NarTop10Accuracy=0.5981, over 6863.00 frames. ], tot_loss[loss=3.796, NarTop10Accuracy=0.556, over 5969.25 frames. ], batch size: 17, lr: 1.37e-02 +2024-08-06 07:57:24,887 INFO [trainer.py:765] (5/8) Epoch 6, batch 1200, train_loss[loss=4.047, NarTop10Accuracy=0.5041, over 7485.00 frames. ], tot_loss[loss=3.787, NarTop10Accuracy=0.5579, over 5969.57 frames. ], batch size: 30, lr: 1.37e-02 +2024-08-06 07:57:56,611 INFO [trainer.py:765] (5/8) Epoch 6, batch 1300, train_loss[loss=3.694, NarTop10Accuracy=0.563, over 5032.00 frames. ], tot_loss[loss=3.783, NarTop10Accuracy=0.558, over 6025.38 frames. ], batch size: 6, lr: 1.37e-02 +2024-08-06 07:58:30,735 INFO [trainer.py:765] (5/8) Epoch 6, batch 1400, train_loss[loss=3.832, NarTop10Accuracy=0.5407, over 6165.00 frames. ], tot_loss[loss=3.79, NarTop10Accuracy=0.5566, over 6041.35 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 07:59:00,998 INFO [trainer.py:765] (5/8) Epoch 6, batch 1500, train_loss[loss=4.135, NarTop10Accuracy=0.4846, over 5910.00 frames. ], tot_loss[loss=3.804, NarTop10Accuracy=0.5542, over 5975.66 frames. ], batch size: 48, lr: 1.36e-02 +2024-08-06 07:59:28,933 INFO [trainer.py:765] (5/8) Epoch 6, batch 1600, train_loss[loss=3.652, NarTop10Accuracy=0.582, over 7262.00 frames. ], tot_loss[loss=3.794, NarTop10Accuracy=0.5566, over 5970.85 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 07:59:55,617 INFO [trainer.py:765] (5/8) Epoch 6, batch 1700, train_loss[loss=3.652, NarTop10Accuracy=0.5725, over 6601.00 frames. ], tot_loss[loss=3.786, NarTop10Accuracy=0.5582, over 5953.93 frames. ], batch size: 14, lr: 1.35e-02 +2024-08-06 08:00:22,187 INFO [trainer.py:765] (5/8) Epoch 6, batch 1800, train_loss[loss=3.805, NarTop10Accuracy=0.5509, over 7074.00 frames. ], tot_loss[loss=3.786, NarTop10Accuracy=0.5579, over 6020.67 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 08:00:48,794 INFO [trainer.py:765] (5/8) Epoch 6, batch 1900, train_loss[loss=3.982, NarTop10Accuracy=0.5265, over 6081.00 frames. ], tot_loss[loss=3.817, NarTop10Accuracy=0.5518, over 6054.92 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 08:01:14,460 INFO [trainer.py:765] (5/8) Epoch 6, batch 2000, train_loss[loss=3.967, NarTop10Accuracy=0.5208, over 5707.00 frames. ], tot_loss[loss=3.8, NarTop10Accuracy=0.5551, over 6027.71 frames. ], batch size: 49, lr: 1.34e-02 +2024-08-06 08:01:43,133 INFO [trainer.py:765] (5/8) Epoch 6, batch 2100, train_loss[loss=3.711, NarTop10Accuracy=0.5853, over 4829.00 frames. ], tot_loss[loss=3.8, NarTop10Accuracy=0.5551, over 6020.72 frames. ], batch size: 5, lr: 1.33e-02 +2024-08-06 08:02:08,517 INFO [trainer.py:765] (5/8) Epoch 6, batch 2200, train_loss[loss=3.633, NarTop10Accuracy=0.5928, over 7432.00 frames. ], tot_loss[loss=3.802, NarTop10Accuracy=0.5545, over 6054.10 frames. ], batch size: 31, lr: 1.33e-02 +2024-08-06 08:02:33,916 INFO [trainer.py:765] (5/8) Epoch 6, batch 2300, train_loss[loss=3.783, NarTop10Accuracy=0.5603, over 5659.00 frames. ], tot_loss[loss=3.802, NarTop10Accuracy=0.5548, over 6074.54 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 08:02:58,616 INFO [trainer.py:765] (5/8) Epoch 6, batch 2400, train_loss[loss=3.737, NarTop10Accuracy=0.5631, over 5274.00 frames. ], tot_loss[loss=3.789, NarTop10Accuracy=0.557, over 5903.68 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 08:03:21,938 INFO [trainer.py:765] (5/8) Epoch 6, batch 2500, train_loss[loss=4.332, NarTop10Accuracy=0.446, over 5025.00 frames. ], tot_loss[loss=3.771, NarTop10Accuracy=0.5606, over 5554.04 frames. ], batch size: 6, lr: 1.32e-02 +2024-08-06 08:03:43,118 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 08:04:42,818 INFO [trainer.py:765] (5/8) Epoch 7, batch 100, train_loss[loss=3.884, NarTop10Accuracy=0.5486, over 7354.00 frames. ], tot_loss[loss=3.709, NarTop10Accuracy=0.5753, over 2369.54 frames. ], batch size: 30, lr: 1.23e-02 +2024-08-06 08:05:18,346 INFO [trainer.py:765] (5/8) Epoch 7, batch 200, train_loss[loss=3.913, NarTop10Accuracy=0.5396, over 6827.00 frames. ], tot_loss[loss=3.716, NarTop10Accuracy=0.5742, over 3870.96 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 08:05:46,773 INFO [trainer.py:765] (5/8) Epoch 7, batch 300, train_loss[loss=3.721, NarTop10Accuracy=0.5719, over 7250.00 frames. ], tot_loss[loss=3.726, NarTop10Accuracy=0.5715, over 4684.10 frames. ], batch size: 22, lr: 1.23e-02 +2024-08-06 08:06:22,091 INFO [trainer.py:765] (5/8) Epoch 7, batch 400, train_loss[loss=3.836, NarTop10Accuracy=0.5467, over 5281.00 frames. ], tot_loss[loss=3.721, NarTop10Accuracy=0.5724, over 5140.76 frames. ], batch size: 7, lr: 1.22e-02 +2024-08-06 08:06:52,315 INFO [trainer.py:765] (5/8) Epoch 7, batch 500, train_loss[loss=3.534, NarTop10Accuracy=0.5902, over 6019.00 frames. ], tot_loss[loss=3.707, NarTop10Accuracy=0.5749, over 5402.49 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 08:06:56,086 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 08:07:06,251 INFO [trainer.py:811] (5/8) Epoch 7, validation: loss=3.56, NarTop10Accuracy=0.6069, over 1907754.00 frames. +2024-08-06 08:07:06,252 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 26975MB +2024-08-06 08:07:06,837 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 1.760e+02 1.958e+02 2.227e+02 5.399e+02, threshold=3.916e+02, percent-clipped=0.8 +2024-08-06 08:07:33,151 INFO [trainer.py:765] (5/8) Epoch 7, batch 600, train_loss[loss=3.675, NarTop10Accuracy=0.5834, over 5755.00 frames. ], tot_loss[loss=3.71, NarTop10Accuracy=0.5739, over 5665.91 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 08:08:11,333 INFO [trainer.py:765] (5/8) Epoch 7, batch 700, train_loss[loss=3.548, NarTop10Accuracy=0.6077, over 5273.00 frames. ], tot_loss[loss=3.716, NarTop10Accuracy=0.5724, over 5739.33 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 08:08:45,557 INFO [trainer.py:765] (5/8) Epoch 7, batch 800, train_loss[loss=3.513, NarTop10Accuracy=0.6123, over 5085.00 frames. ], tot_loss[loss=3.701, NarTop10Accuracy=0.5764, over 5807.39 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 08:09:17,739 INFO [trainer.py:765] (5/8) Epoch 7, batch 900, train_loss[loss=3.702, NarTop10Accuracy=0.5685, over 6711.00 frames. ], tot_loss[loss=3.711, NarTop10Accuracy=0.5741, over 5829.72 frames. ], batch size: 14, lr: 1.21e-02 +2024-08-06 08:09:54,191 INFO [trainer.py:765] (5/8) Epoch 7, batch 1000, train_loss[loss=3.846, NarTop10Accuracy=0.5495, over 6297.00 frames. ], tot_loss[loss=3.716, NarTop10Accuracy=0.5729, over 5930.40 frames. ], batch size: 13, lr: 1.20e-02 +2024-08-06 08:10:29,570 INFO [trainer.py:765] (5/8) Epoch 7, batch 1100, train_loss[loss=3.777, NarTop10Accuracy=0.5587, over 6819.00 frames. ], tot_loss[loss=3.725, NarTop10Accuracy=0.5714, over 5953.46 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 08:11:02,491 INFO [trainer.py:765] (5/8) Epoch 7, batch 1200, train_loss[loss=4.098, NarTop10Accuracy=0.5048, over 7225.00 frames. ], tot_loss[loss=3.724, NarTop10Accuracy=0.5715, over 5952.42 frames. ], batch size: 31, lr: 1.20e-02 +2024-08-06 08:11:33,447 INFO [trainer.py:765] (5/8) Epoch 7, batch 1300, train_loss[loss=3.588, NarTop10Accuracy=0.5997, over 5045.00 frames. ], tot_loss[loss=3.718, NarTop10Accuracy=0.5722, over 6019.87 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 08:12:10,913 INFO [trainer.py:765] (5/8) Epoch 7, batch 1400, train_loss[loss=3.859, NarTop10Accuracy=0.547, over 6499.00 frames. ], tot_loss[loss=3.724, NarTop10Accuracy=0.5705, over 6039.95 frames. ], batch size: 12, lr: 1.19e-02 +2024-08-06 08:12:42,109 INFO [trainer.py:765] (5/8) Epoch 7, batch 1500, train_loss[loss=3.632, NarTop10Accuracy=0.5936, over 5797.00 frames. ], tot_loss[loss=3.713, NarTop10Accuracy=0.573, over 5963.80 frames. ], batch size: 50, lr: 1.19e-02 +2024-08-06 08:13:13,237 INFO [trainer.py:765] (5/8) Epoch 7, batch 1600, train_loss[loss=3.682, NarTop10Accuracy=0.5719, over 7238.00 frames. ], tot_loss[loss=3.718, NarTop10Accuracy=0.572, over 5950.49 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:13:40,016 INFO [trainer.py:765] (5/8) Epoch 7, batch 1700, train_loss[loss=3.765, NarTop10Accuracy=0.5636, over 6221.00 frames. ], tot_loss[loss=3.733, NarTop10Accuracy=0.5689, over 5932.16 frames. ], batch size: 13, lr: 1.18e-02 +2024-08-06 08:14:06,583 INFO [trainer.py:765] (5/8) Epoch 7, batch 1800, train_loss[loss=3.713, NarTop10Accuracy=0.5768, over 7281.00 frames. ], tot_loss[loss=3.74, NarTop10Accuracy=0.5674, over 5995.88 frames. ], batch size: 23, lr: 1.18e-02 +2024-08-06 08:14:33,223 INFO [trainer.py:765] (5/8) Epoch 7, batch 1900, train_loss[loss=3.973, NarTop10Accuracy=0.5228, over 5823.00 frames. ], tot_loss[loss=3.745, NarTop10Accuracy=0.5664, over 6033.42 frames. ], batch size: 48, lr: 1.17e-02 +2024-08-06 08:14:58,994 INFO [trainer.py:765] (5/8) Epoch 7, batch 2000, train_loss[loss=3.579, NarTop10Accuracy=0.5991, over 6015.00 frames. ], tot_loss[loss=3.728, NarTop10Accuracy=0.57, over 6023.82 frames. ], batch size: 48, lr: 1.17e-02 +2024-08-06 08:15:24,423 INFO [trainer.py:765] (5/8) Epoch 7, batch 2100, train_loss[loss=3.834, NarTop10Accuracy=0.5363, over 3934.00 frames. ], tot_loss[loss=3.727, NarTop10Accuracy=0.5702, over 5998.65 frames. ], batch size: 4, lr: 1.17e-02 +2024-08-06 08:15:49,960 INFO [trainer.py:765] (5/8) Epoch 7, batch 2200, train_loss[loss=4.085, NarTop10Accuracy=0.5007, over 7200.00 frames. ], tot_loss[loss=3.741, NarTop10Accuracy=0.5674, over 6036.86 frames. ], batch size: 31, lr: 1.17e-02 +2024-08-06 08:16:15,490 INFO [trainer.py:765] (5/8) Epoch 7, batch 2300, train_loss[loss=3.772, NarTop10Accuracy=0.5598, over 5757.00 frames. ], tot_loss[loss=3.749, NarTop10Accuracy=0.5658, over 6058.65 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 08:16:40,319 INFO [trainer.py:765] (5/8) Epoch 7, batch 2400, train_loss[loss=3.882, NarTop10Accuracy=0.5467, over 5085.00 frames. ], tot_loss[loss=3.747, NarTop10Accuracy=0.5665, over 5866.71 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 08:17:03,739 INFO [trainer.py:765] (5/8) Epoch 7, batch 2500, train_loss[loss=3.376, NarTop10Accuracy=0.6311, over 5126.00 frames. ], tot_loss[loss=3.72, NarTop10Accuracy=0.5719, over 5518.56 frames. ], batch size: 6, lr: 1.16e-02 +2024-08-06 08:17:06,843 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 08:17:17,433 INFO [trainer.py:811] (5/8) Epoch 7, validation: loss=3.591, NarTop10Accuracy=0.6002, over 1907754.00 frames. +2024-08-06 08:17:17,433 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 26975MB +2024-08-06 08:17:17,901 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.794e+02 1.981e+02 2.246e+02 4.644e+02, threshold=3.962e+02, percent-clipped=1.0 +2024-08-06 08:17:35,260 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 08:18:36,193 INFO [trainer.py:765] (5/8) Epoch 8, batch 100, train_loss[loss=3.71, NarTop10Accuracy=0.5775, over 6941.00 frames. ], tot_loss[loss=3.647, NarTop10Accuracy=0.588, over 2372.57 frames. ], batch size: 30, lr: 1.09e-02 +2024-08-06 08:19:15,020 INFO [trainer.py:765] (5/8) Epoch 8, batch 200, train_loss[loss=3.488, NarTop10Accuracy=0.6241, over 7078.00 frames. ], tot_loss[loss=3.66, NarTop10Accuracy=0.5849, over 3874.81 frames. ], batch size: 18, lr: 1.09e-02 +2024-08-06 08:19:43,561 INFO [trainer.py:765] (5/8) Epoch 8, batch 300, train_loss[loss=3.515, NarTop10Accuracy=0.6106, over 7158.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.5842, over 4701.22 frames. ], batch size: 22, lr: 1.08e-02 +2024-08-06 08:20:16,268 INFO [trainer.py:765] (5/8) Epoch 8, batch 400, train_loss[loss=3.383, NarTop10Accuracy=0.6432, over 5112.00 frames. ], tot_loss[loss=3.672, NarTop10Accuracy=0.5833, over 5154.40 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 08:20:48,421 INFO [trainer.py:765] (5/8) Epoch 8, batch 500, train_loss[loss=3.719, NarTop10Accuracy=0.572, over 6157.00 frames. ], tot_loss[loss=3.658, NarTop10Accuracy=0.5849, over 5425.98 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 08:21:23,737 INFO [trainer.py:765] (5/8) Epoch 8, batch 600, train_loss[loss=3.856, NarTop10Accuracy=0.5434, over 5720.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5822, over 5687.00 frames. ], batch size: 9, lr: 1.07e-02 +2024-08-06 08:21:57,607 INFO [trainer.py:765] (5/8) Epoch 8, batch 700, train_loss[loss=3.829, NarTop10Accuracy=0.5488, over 4321.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.583, over 5737.40 frames. ], batch size: 5, lr: 1.07e-02 +2024-08-06 08:22:27,341 INFO [trainer.py:765] (5/8) Epoch 8, batch 800, train_loss[loss=3.586, NarTop10Accuracy=0.5887, over 5092.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.582, over 5800.69 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:23:06,892 INFO [trainer.py:765] (5/8) Epoch 8, batch 900, train_loss[loss=3.337, NarTop10Accuracy=0.6569, over 6150.00 frames. ], tot_loss[loss=3.654, NarTop10Accuracy=0.5852, over 5827.75 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 08:23:42,943 INFO [trainer.py:765] (5/8) Epoch 8, batch 1000, train_loss[loss=3.391, NarTop10Accuracy=0.6466, over 6637.00 frames. ], tot_loss[loss=3.657, NarTop10Accuracy=0.5845, over 5937.19 frames. ], batch size: 14, lr: 1.06e-02 +2024-08-06 08:24:15,105 INFO [trainer.py:765] (5/8) Epoch 8, batch 1100, train_loss[loss=3.714, NarTop10Accuracy=0.5742, over 6836.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5836, over 5967.83 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 08:24:57,339 INFO [trainer.py:765] (5/8) Epoch 8, batch 1200, train_loss[loss=3.636, NarTop10Accuracy=0.5995, over 7332.00 frames. ], tot_loss[loss=3.66, NarTop10Accuracy=0.5838, over 5953.98 frames. ], batch size: 31, lr: 1.06e-02 +2024-08-06 08:25:26,604 INFO [trainer.py:765] (5/8) Epoch 8, batch 1300, train_loss[loss=3.694, NarTop10Accuracy=0.5734, over 5183.00 frames. ], tot_loss[loss=3.656, NarTop10Accuracy=0.5846, over 6010.92 frames. ], batch size: 6, lr: 1.06e-02 +2024-08-06 08:26:00,604 INFO [trainer.py:765] (5/8) Epoch 8, batch 1400, train_loss[loss=3.472, NarTop10Accuracy=0.6064, over 6097.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5817, over 6016.96 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 08:26:28,987 INFO [trainer.py:765] (5/8) Epoch 8, batch 1500, train_loss[loss=3.772, NarTop10Accuracy=0.5648, over 5557.00 frames. ], tot_loss[loss=3.668, NarTop10Accuracy=0.5817, over 5963.85 frames. ], batch size: 49, lr: 1.05e-02 +2024-08-06 08:26:56,933 INFO [trainer.py:765] (5/8) Epoch 8, batch 1600, train_loss[loss=3.797, NarTop10Accuracy=0.5613, over 7082.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5817, over 5964.33 frames. ], batch size: 22, lr: 1.05e-02 +2024-08-06 08:27:23,763 INFO [trainer.py:765] (5/8) Epoch 8, batch 1700, train_loss[loss=3.569, NarTop10Accuracy=0.607, over 6238.00 frames. ], tot_loss[loss=3.682, NarTop10Accuracy=0.5796, over 5949.11 frames. ], batch size: 13, lr: 1.05e-02 +2024-08-06 08:27:50,462 INFO [trainer.py:765] (5/8) Epoch 8, batch 1800, train_loss[loss=3.777, NarTop10Accuracy=0.5699, over 7325.00 frames. ], tot_loss[loss=3.672, NarTop10Accuracy=0.5817, over 6002.17 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 08:28:17,180 INFO [trainer.py:765] (5/8) Epoch 8, batch 1900, train_loss[loss=4.062, NarTop10Accuracy=0.4952, over 6418.00 frames. ], tot_loss[loss=3.678, NarTop10Accuracy=0.581, over 6036.72 frames. ], batch size: 49, lr: 1.04e-02 +2024-08-06 08:28:25,164 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 08:28:35,290 INFO [trainer.py:811] (5/8) Epoch 8, validation: loss=3.507, NarTop10Accuracy=0.6181, over 1907754.00 frames. +2024-08-06 08:28:35,291 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 27185MB +2024-08-06 08:28:35,795 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.304e+02 1.789e+02 1.988e+02 2.230e+02 4.452e+02, threshold=3.975e+02, percent-clipped=0.5 +2024-08-06 08:28:52,981 INFO [trainer.py:765] (5/8) Epoch 8, batch 2000, train_loss[loss=3.934, NarTop10Accuracy=0.5407, over 5718.00 frames. ], tot_loss[loss=3.672, NarTop10Accuracy=0.582, over 5996.43 frames. ], batch size: 49, lr: 1.04e-02 +2024-08-06 08:29:18,484 INFO [trainer.py:765] (5/8) Epoch 8, batch 2100, train_loss[loss=3.581, NarTop10Accuracy=0.598, over 4047.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.5818, over 5996.08 frames. ], batch size: 4, lr: 1.04e-02 +2024-08-06 08:29:43,788 INFO [trainer.py:765] (5/8) Epoch 8, batch 2200, train_loss[loss=3.958, NarTop10Accuracy=0.5206, over 7193.00 frames. ], tot_loss[loss=3.676, NarTop10Accuracy=0.5808, over 6035.33 frames. ], batch size: 30, lr: 1.03e-02 +2024-08-06 08:30:09,132 INFO [trainer.py:765] (5/8) Epoch 8, batch 2300, train_loss[loss=3.531, NarTop10Accuracy=0.5986, over 5715.00 frames. ], tot_loss[loss=3.679, NarTop10Accuracy=0.5799, over 6058.09 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 08:30:33,790 INFO [trainer.py:765] (5/8) Epoch 8, batch 2400, train_loss[loss=3.431, NarTop10Accuracy=0.6309, over 5219.00 frames. ], tot_loss[loss=3.699, NarTop10Accuracy=0.5762, over 5885.88 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 08:30:57,138 INFO [trainer.py:765] (5/8) Epoch 8, batch 2500, train_loss[loss=3.708, NarTop10Accuracy=0.5843, over 5136.00 frames. ], tot_loss[loss=3.674, NarTop10Accuracy=0.5805, over 5536.44 frames. ], batch size: 6, lr: 1.03e-02 +2024-08-06 08:31:18,350 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 08:32:19,098 INFO [trainer.py:765] (5/8) Epoch 9, batch 100, train_loss[loss=3.94, NarTop10Accuracy=0.5289, over 7458.00 frames. ], tot_loss[loss=3.615, NarTop10Accuracy=0.595, over 2374.67 frames. ], batch size: 31, lr: 9.71e-03 +2024-08-06 08:32:51,461 INFO [trainer.py:765] (5/8) Epoch 9, batch 200, train_loss[loss=3.285, NarTop10Accuracy=0.6559, over 6823.00 frames. ], tot_loss[loss=3.606, NarTop10Accuracy=0.5966, over 3873.34 frames. ], batch size: 17, lr: 9.69e-03 +2024-08-06 08:33:27,115 INFO [trainer.py:765] (5/8) Epoch 9, batch 300, train_loss[loss=3.534, NarTop10Accuracy=0.6131, over 7377.00 frames. ], tot_loss[loss=3.603, NarTop10Accuracy=0.5967, over 4679.51 frames. ], batch size: 22, lr: 9.67e-03 +2024-08-06 08:34:00,964 INFO [trainer.py:765] (5/8) Epoch 9, batch 400, train_loss[loss=3.467, NarTop10Accuracy=0.6123, over 5028.00 frames. ], tot_loss[loss=3.596, NarTop10Accuracy=0.5979, over 5122.08 frames. ], batch size: 7, lr: 9.64e-03 +2024-08-06 08:34:32,880 INFO [trainer.py:765] (5/8) Epoch 9, batch 500, train_loss[loss=3.726, NarTop10Accuracy=0.568, over 6223.00 frames. ], tot_loss[loss=3.577, NarTop10Accuracy=0.6021, over 5416.99 frames. ], batch size: 11, lr: 9.62e-03 +2024-08-06 08:35:07,498 INFO [trainer.py:765] (5/8) Epoch 9, batch 600, train_loss[loss=3.445, NarTop10Accuracy=0.6365, over 5805.00 frames. ], tot_loss[loss=3.586, NarTop10Accuracy=0.5997, over 5698.14 frames. ], batch size: 9, lr: 9.60e-03 +2024-08-06 08:35:42,824 INFO [trainer.py:765] (5/8) Epoch 9, batch 700, train_loss[loss=3.635, NarTop10Accuracy=0.5765, over 5060.00 frames. ], tot_loss[loss=3.598, NarTop10Accuracy=0.5973, over 5753.26 frames. ], batch size: 6, lr: 9.58e-03 +2024-08-06 08:36:14,821 INFO [trainer.py:765] (5/8) Epoch 9, batch 800, train_loss[loss=3.327, NarTop10Accuracy=0.6475, over 5089.00 frames. ], tot_loss[loss=3.621, NarTop10Accuracy=0.5927, over 5800.86 frames. ], batch size: 6, lr: 9.56e-03 +2024-08-06 08:36:46,454 INFO [trainer.py:765] (5/8) Epoch 9, batch 900, train_loss[loss=3.304, NarTop10Accuracy=0.6521, over 6715.00 frames. ], tot_loss[loss=3.612, NarTop10Accuracy=0.5939, over 5825.89 frames. ], batch size: 14, lr: 9.54e-03 +2024-08-06 08:37:26,564 INFO [trainer.py:765] (5/8) Epoch 9, batch 1000, train_loss[loss=3.561, NarTop10Accuracy=0.6007, over 6340.00 frames. ], tot_loss[loss=3.615, NarTop10Accuracy=0.5933, over 5921.24 frames. ], batch size: 13, lr: 9.52e-03 +2024-08-06 08:37:59,421 INFO [trainer.py:765] (5/8) Epoch 9, batch 1100, train_loss[loss=3.955, NarTop10Accuracy=0.5236, over 6836.00 frames. ], tot_loss[loss=3.641, NarTop10Accuracy=0.5881, over 5943.03 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 08:38:31,995 INFO [trainer.py:765] (5/8) Epoch 9, batch 1200, train_loss[loss=3.782, NarTop10Accuracy=0.5638, over 6987.00 frames. ], tot_loss[loss=3.639, NarTop10Accuracy=0.5886, over 5943.65 frames. ], batch size: 30, lr: 9.48e-03 +2024-08-06 08:39:11,840 INFO [trainer.py:765] (5/8) Epoch 9, batch 1300, train_loss[loss=3.69, NarTop10Accuracy=0.5829, over 5198.00 frames. ], tot_loss[loss=3.638, NarTop10Accuracy=0.5889, over 6014.34 frames. ], batch size: 6, lr: 9.46e-03 +2024-08-06 08:39:27,117 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 08:39:38,196 INFO [trainer.py:811] (5/8) Epoch 9, validation: loss=3.495, NarTop10Accuracy=0.6214, over 1907754.00 frames. +2024-08-06 08:39:38,197 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 27185MB +2024-08-06 08:39:38,758 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 1.781e+02 1.970e+02 2.189e+02 6.315e+02, threshold=3.940e+02, percent-clipped=0.6 +2024-08-06 08:39:52,278 INFO [trainer.py:765] (5/8) Epoch 9, batch 1400, train_loss[loss=3.531, NarTop10Accuracy=0.6025, over 6134.00 frames. ], tot_loss[loss=3.629, NarTop10Accuracy=0.5905, over 6029.01 frames. ], batch size: 11, lr: 9.43e-03 +2024-08-06 08:40:22,332 INFO [trainer.py:765] (5/8) Epoch 9, batch 1500, train_loss[loss=3.891, NarTop10Accuracy=0.534, over 5606.00 frames. ], tot_loss[loss=3.638, NarTop10Accuracy=0.5887, over 5959.67 frames. ], batch size: 49, lr: 9.41e-03 +2024-08-06 08:40:50,368 INFO [trainer.py:765] (5/8) Epoch 9, batch 1600, train_loss[loss=3.639, NarTop10Accuracy=0.5832, over 7165.00 frames. ], tot_loss[loss=3.625, NarTop10Accuracy=0.5907, over 5946.40 frames. ], batch size: 22, lr: 9.39e-03 +2024-08-06 08:41:17,152 INFO [trainer.py:765] (5/8) Epoch 9, batch 1700, train_loss[loss=3.593, NarTop10Accuracy=0.5911, over 6159.00 frames. ], tot_loss[loss=3.633, NarTop10Accuracy=0.5893, over 5937.11 frames. ], batch size: 13, lr: 9.37e-03 +2024-08-06 08:41:43,813 INFO [trainer.py:765] (5/8) Epoch 9, batch 1800, train_loss[loss=3.803, NarTop10Accuracy=0.5578, over 7210.00 frames. ], tot_loss[loss=3.628, NarTop10Accuracy=0.5909, over 5988.77 frames. ], batch size: 22, lr: 9.35e-03 +2024-08-06 08:42:10,496 INFO [trainer.py:765] (5/8) Epoch 9, batch 1900, train_loss[loss=3.738, NarTop10Accuracy=0.5836, over 6492.00 frames. ], tot_loss[loss=3.634, NarTop10Accuracy=0.59, over 6031.43 frames. ], batch size: 51, lr: 9.33e-03 +2024-08-06 08:42:36,203 INFO [trainer.py:765] (5/8) Epoch 9, batch 2000, train_loss[loss=4.081, NarTop10Accuracy=0.5063, over 5540.00 frames. ], tot_loss[loss=3.643, NarTop10Accuracy=0.5882, over 6011.24 frames. ], batch size: 49, lr: 9.31e-03 +2024-08-06 08:43:01,667 INFO [trainer.py:765] (5/8) Epoch 9, batch 2100, train_loss[loss=3.331, NarTop10Accuracy=0.6483, over 4793.00 frames. ], tot_loss[loss=3.638, NarTop10Accuracy=0.5888, over 6007.11 frames. ], batch size: 5, lr: 9.30e-03 +2024-08-06 08:43:27,178 INFO [trainer.py:765] (5/8) Epoch 9, batch 2200, train_loss[loss=3.522, NarTop10Accuracy=0.6043, over 7560.00 frames. ], tot_loss[loss=3.644, NarTop10Accuracy=0.5875, over 6035.33 frames. ], batch size: 32, lr: 9.28e-03 +2024-08-06 08:43:52,671 INFO [trainer.py:765] (5/8) Epoch 9, batch 2300, train_loss[loss=3.811, NarTop10Accuracy=0.5552, over 5813.00 frames. ], tot_loss[loss=3.656, NarTop10Accuracy=0.5852, over 6072.83 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 08:44:20,550 INFO [trainer.py:765] (5/8) Epoch 9, batch 2400, train_loss[loss=3.325, NarTop10Accuracy=0.6564, over 5108.00 frames. ], tot_loss[loss=3.65, NarTop10Accuracy=0.5863, over 5876.21 frames. ], batch size: 7, lr: 9.24e-03 +2024-08-06 08:44:44,002 INFO [trainer.py:765] (5/8) Epoch 9, batch 2500, train_loss[loss=3.914, NarTop10Accuracy=0.5412, over 5075.00 frames. ], tot_loss[loss=3.631, NarTop10Accuracy=0.5894, over 5522.59 frames. ], batch size: 6, lr: 9.22e-03 +2024-08-06 08:45:05,342 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 08:46:09,064 INFO [trainer.py:765] (5/8) Epoch 10, batch 100, train_loss[loss=3.416, NarTop10Accuracy=0.6382, over 7143.00 frames. ], tot_loss[loss=3.582, NarTop10Accuracy=0.6015, over 2366.27 frames. ], batch size: 30, lr: 8.75e-03 +2024-08-06 08:46:44,075 INFO [trainer.py:765] (5/8) Epoch 10, batch 200, train_loss[loss=3.359, NarTop10Accuracy=0.645, over 6863.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.6032, over 3863.93 frames. ], batch size: 17, lr: 8.73e-03 +2024-08-06 08:47:14,444 INFO [trainer.py:765] (5/8) Epoch 10, batch 300, train_loss[loss=3.66, NarTop10Accuracy=0.5792, over 7255.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6043, over 4676.26 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 08:47:46,119 INFO [trainer.py:765] (5/8) Epoch 10, batch 400, train_loss[loss=3.955, NarTop10Accuracy=0.5304, over 5036.00 frames. ], tot_loss[loss=3.579, NarTop10Accuracy=0.6019, over 5124.55 frames. ], batch size: 7, lr: 8.70e-03 +2024-08-06 08:48:22,371 INFO [trainer.py:765] (5/8) Epoch 10, batch 500, train_loss[loss=3.541, NarTop10Accuracy=0.6007, over 6094.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.6023, over 5403.50 frames. ], batch size: 11, lr: 8.68e-03 +2024-08-06 08:48:53,460 INFO [trainer.py:765] (5/8) Epoch 10, batch 600, train_loss[loss=3.316, NarTop10Accuracy=0.6624, over 5772.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6021, over 5671.86 frames. ], batch size: 9, lr: 8.66e-03 +2024-08-06 08:49:26,707 INFO [trainer.py:765] (5/8) Epoch 10, batch 700, train_loss[loss=3.533, NarTop10Accuracy=0.6036, over 5010.00 frames. ], tot_loss[loss=3.587, NarTop10Accuracy=0.6, over 5740.41 frames. ], batch size: 6, lr: 8.65e-03 +2024-08-06 08:49:49,164 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 08:50:00,983 INFO [trainer.py:811] (5/8) Epoch 10, validation: loss=3.46, NarTop10Accuracy=0.6279, over 1907754.00 frames. +2024-08-06 08:50:00,984 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 27185MB +2024-08-06 08:50:01,724 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.353e+02 1.818e+02 1.985e+02 2.213e+02 4.843e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 08:50:09,800 INFO [trainer.py:765] (5/8) Epoch 10, batch 800, train_loss[loss=3.471, NarTop10Accuracy=0.6258, over 5186.00 frames. ], tot_loss[loss=3.59, NarTop10Accuracy=0.5991, over 5809.61 frames. ], batch size: 6, lr: 8.63e-03 +2024-08-06 08:50:42,889 INFO [trainer.py:765] (5/8) Epoch 10, batch 900, train_loss[loss=3.429, NarTop10Accuracy=0.6358, over 6338.00 frames. ], tot_loss[loss=3.579, NarTop10Accuracy=0.601, over 5803.80 frames. ], batch size: 13, lr: 8.61e-03 +2024-08-06 08:51:18,459 INFO [trainer.py:765] (5/8) Epoch 10, batch 1000, train_loss[loss=3.824, NarTop10Accuracy=0.5543, over 6706.00 frames. ], tot_loss[loss=3.598, NarTop10Accuracy=0.5972, over 5918.75 frames. ], batch size: 14, lr: 8.59e-03 +2024-08-06 08:51:57,361 INFO [trainer.py:765] (5/8) Epoch 10, batch 1100, train_loss[loss=3.303, NarTop10Accuracy=0.6419, over 6946.00 frames. ], tot_loss[loss=3.604, NarTop10Accuracy=0.5956, over 5943.33 frames. ], batch size: 17, lr: 8.58e-03 +2024-08-06 08:52:32,047 INFO [trainer.py:765] (5/8) Epoch 10, batch 1200, train_loss[loss=3.59, NarTop10Accuracy=0.5875, over 7014.00 frames. ], tot_loss[loss=3.609, NarTop10Accuracy=0.595, over 5929.78 frames. ], batch size: 30, lr: 8.56e-03 +2024-08-06 08:53:06,606 INFO [trainer.py:765] (5/8) Epoch 10, batch 1300, train_loss[loss=3.663, NarTop10Accuracy=0.5859, over 5042.00 frames. ], tot_loss[loss=3.607, NarTop10Accuracy=0.5951, over 6008.95 frames. ], batch size: 6, lr: 8.54e-03 +2024-08-06 08:53:46,879 INFO [trainer.py:765] (5/8) Epoch 10, batch 1400, train_loss[loss=3.413, NarTop10Accuracy=0.6426, over 6037.00 frames. ], tot_loss[loss=3.613, NarTop10Accuracy=0.5943, over 6029.12 frames. ], batch size: 11, lr: 8.53e-03 +2024-08-06 08:54:17,500 INFO [trainer.py:765] (5/8) Epoch 10, batch 1500, train_loss[loss=3.67, NarTop10Accuracy=0.5749, over 5742.00 frames. ], tot_loss[loss=3.598, NarTop10Accuracy=0.5974, over 5961.69 frames. ], batch size: 48, lr: 8.51e-03 +2024-08-06 08:54:45,525 INFO [trainer.py:765] (5/8) Epoch 10, batch 1600, train_loss[loss=3.55, NarTop10Accuracy=0.6071, over 6992.00 frames. ], tot_loss[loss=3.605, NarTop10Accuracy=0.596, over 5941.23 frames. ], batch size: 22, lr: 8.49e-03 +2024-08-06 08:55:12,299 INFO [trainer.py:765] (5/8) Epoch 10, batch 1700, train_loss[loss=3.329, NarTop10Accuracy=0.6467, over 6679.00 frames. ], tot_loss[loss=3.609, NarTop10Accuracy=0.595, over 5925.21 frames. ], batch size: 14, lr: 8.48e-03 +2024-08-06 08:55:41,988 INFO [trainer.py:765] (5/8) Epoch 10, batch 1800, train_loss[loss=3.561, NarTop10Accuracy=0.6093, over 7116.00 frames. ], tot_loss[loss=3.603, NarTop10Accuracy=0.5959, over 5996.68 frames. ], batch size: 22, lr: 8.46e-03 +2024-08-06 08:56:08,571 INFO [trainer.py:765] (5/8) Epoch 10, batch 1900, train_loss[loss=3.984, NarTop10Accuracy=0.5325, over 6067.00 frames. ], tot_loss[loss=3.602, NarTop10Accuracy=0.5961, over 6042.77 frames. ], batch size: 49, lr: 8.45e-03 +2024-08-06 08:56:34,286 INFO [trainer.py:765] (5/8) Epoch 10, batch 2000, train_loss[loss=3.588, NarTop10Accuracy=0.6037, over 6299.00 frames. ], tot_loss[loss=3.597, NarTop10Accuracy=0.5969, over 6010.58 frames. ], batch size: 51, lr: 8.43e-03 +2024-08-06 08:56:59,750 INFO [trainer.py:765] (5/8) Epoch 10, batch 2100, train_loss[loss=3.431, NarTop10Accuracy=0.6311, over 4738.00 frames. ], tot_loss[loss=3.607, NarTop10Accuracy=0.5951, over 6003.74 frames. ], batch size: 5, lr: 8.41e-03 +2024-08-06 08:57:25,279 INFO [trainer.py:765] (5/8) Epoch 10, batch 2200, train_loss[loss=3.705, NarTop10Accuracy=0.579, over 6966.00 frames. ], tot_loss[loss=3.604, NarTop10Accuracy=0.5955, over 6055.66 frames. ], batch size: 30, lr: 8.40e-03 +2024-08-06 08:57:50,681 INFO [trainer.py:765] (5/8) Epoch 10, batch 2300, train_loss[loss=3.461, NarTop10Accuracy=0.6191, over 5703.00 frames. ], tot_loss[loss=3.605, NarTop10Accuracy=0.5954, over 6079.24 frames. ], batch size: 9, lr: 8.38e-03 +2024-08-06 08:58:15,343 INFO [trainer.py:765] (5/8) Epoch 10, batch 2400, train_loss[loss=3.557, NarTop10Accuracy=0.6127, over 5092.00 frames. ], tot_loss[loss=3.607, NarTop10Accuracy=0.5947, over 5889.90 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 08:58:38,808 INFO [trainer.py:765] (5/8) Epoch 10, batch 2500, train_loss[loss=3.463, NarTop10Accuracy=0.6219, over 4973.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.5976, over 5551.39 frames. ], batch size: 6, lr: 8.35e-03 +2024-08-06 08:59:00,278 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 09:00:03,680 INFO [trainer.py:765] (5/8) Epoch 11, batch 100, train_loss[loss=3.395, NarTop10Accuracy=0.638, over 6953.00 frames. ], tot_loss[loss=3.532, NarTop10Accuracy=0.612, over 2365.98 frames. ], batch size: 30, lr: 7.96e-03 +2024-08-06 09:00:30,915 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 09:00:41,217 INFO [trainer.py:811] (5/8) Epoch 11, validation: loss=3.404, NarTop10Accuracy=0.6396, over 1907754.00 frames. +2024-08-06 09:00:41,218 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 27597MB +2024-08-06 09:00:41,774 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 1.800e+02 1.980e+02 2.200e+02 4.491e+02, threshold=3.959e+02, percent-clipped=0.2 +2024-08-06 09:00:46,859 INFO [trainer.py:765] (5/8) Epoch 11, batch 200, train_loss[loss=3.666, NarTop10Accuracy=0.5817, over 7034.00 frames. ], tot_loss[loss=3.528, NarTop10Accuracy=0.6126, over 3871.04 frames. ], batch size: 17, lr: 7.94e-03 +2024-08-06 09:01:17,853 INFO [trainer.py:765] (5/8) Epoch 11, batch 300, train_loss[loss=3.521, NarTop10Accuracy=0.6172, over 7032.00 frames. ], tot_loss[loss=3.532, NarTop10Accuracy=0.6115, over 4677.31 frames. ], batch size: 22, lr: 7.93e-03 +2024-08-06 09:01:50,534 INFO [trainer.py:765] (5/8) Epoch 11, batch 400, train_loss[loss=3.446, NarTop10Accuracy=0.6231, over 5085.00 frames. ], tot_loss[loss=3.519, NarTop10Accuracy=0.6138, over 5130.72 frames. ], batch size: 7, lr: 7.91e-03 +2024-08-06 09:02:21,238 INFO [trainer.py:765] (5/8) Epoch 11, batch 500, train_loss[loss=3.373, NarTop10Accuracy=0.6505, over 6168.00 frames. ], tot_loss[loss=3.524, NarTop10Accuracy=0.6126, over 5394.01 frames. ], batch size: 11, lr: 7.90e-03 +2024-08-06 09:03:01,742 INFO [trainer.py:765] (5/8) Epoch 11, batch 600, train_loss[loss=3.755, NarTop10Accuracy=0.5665, over 5820.00 frames. ], tot_loss[loss=3.546, NarTop10Accuracy=0.6081, over 5673.82 frames. ], batch size: 9, lr: 7.88e-03 +2024-08-06 09:03:38,236 INFO [trainer.py:765] (5/8) Epoch 11, batch 700, train_loss[loss=3.457, NarTop10Accuracy=0.6021, over 4986.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.6083, over 5742.84 frames. ], batch size: 6, lr: 7.87e-03 +2024-08-06 09:04:10,756 INFO [trainer.py:765] (5/8) Epoch 11, batch 800, train_loss[loss=3.647, NarTop10Accuracy=0.5941, over 5087.00 frames. ], tot_loss[loss=3.558, NarTop10Accuracy=0.605, over 5784.32 frames. ], batch size: 6, lr: 7.86e-03 +2024-08-06 09:04:50,083 INFO [trainer.py:765] (5/8) Epoch 11, batch 900, train_loss[loss=3.391, NarTop10Accuracy=0.6253, over 6246.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.6097, over 5818.20 frames. ], batch size: 13, lr: 7.84e-03 +2024-08-06 09:05:27,012 INFO [trainer.py:765] (5/8) Epoch 11, batch 1000, train_loss[loss=3.588, NarTop10Accuracy=0.5872, over 6718.00 frames. ], tot_loss[loss=3.541, NarTop10Accuracy=0.6078, over 5928.02 frames. ], batch size: 14, lr: 7.83e-03 +2024-08-06 09:06:00,350 INFO [trainer.py:765] (5/8) Epoch 11, batch 1100, train_loss[loss=3.447, NarTop10Accuracy=0.6217, over 6872.00 frames. ], tot_loss[loss=3.556, NarTop10Accuracy=0.6049, over 5943.25 frames. ], batch size: 17, lr: 7.81e-03 +2024-08-06 09:06:40,947 INFO [trainer.py:765] (5/8) Epoch 11, batch 1200, train_loss[loss=3.672, NarTop10Accuracy=0.579, over 7232.00 frames. ], tot_loss[loss=3.557, NarTop10Accuracy=0.6041, over 5950.09 frames. ], batch size: 30, lr: 7.80e-03 +2024-08-06 09:07:15,493 INFO [trainer.py:765] (5/8) Epoch 11, batch 1300, train_loss[loss=3.256, NarTop10Accuracy=0.6608, over 4361.00 frames. ], tot_loss[loss=3.562, NarTop10Accuracy=0.603, over 6018.12 frames. ], batch size: 5, lr: 7.79e-03 +2024-08-06 09:07:47,628 INFO [trainer.py:765] (5/8) Epoch 11, batch 1400, train_loss[loss=3.525, NarTop10Accuracy=0.6145, over 6161.00 frames. ], tot_loss[loss=3.573, NarTop10Accuracy=0.6013, over 6033.77 frames. ], batch size: 11, lr: 7.77e-03 +2024-08-06 09:08:18,987 INFO [trainer.py:765] (5/8) Epoch 11, batch 1500, train_loss[loss=3.671, NarTop10Accuracy=0.5808, over 5900.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6012, over 5957.89 frames. ], batch size: 49, lr: 7.76e-03 +2024-08-06 09:08:47,149 INFO [trainer.py:765] (5/8) Epoch 11, batch 1600, train_loss[loss=3.326, NarTop10Accuracy=0.6496, over 7141.00 frames. ], tot_loss[loss=3.574, NarTop10Accuracy=0.6013, over 5943.99 frames. ], batch size: 22, lr: 7.74e-03 +2024-08-06 09:09:13,951 INFO [trainer.py:765] (5/8) Epoch 11, batch 1700, train_loss[loss=3.644, NarTop10Accuracy=0.5937, over 6241.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.6007, over 5926.93 frames. ], batch size: 13, lr: 7.73e-03 +2024-08-06 09:09:40,733 INFO [trainer.py:765] (5/8) Epoch 11, batch 1800, train_loss[loss=3.511, NarTop10Accuracy=0.6043, over 7303.00 frames. ], tot_loss[loss=3.582, NarTop10Accuracy=0.6002, over 5986.44 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 09:10:07,343 INFO [trainer.py:765] (5/8) Epoch 11, batch 1900, train_loss[loss=3.765, NarTop10Accuracy=0.5582, over 5957.00 frames. ], tot_loss[loss=3.591, NarTop10Accuracy=0.5981, over 6027.64 frames. ], batch size: 48, lr: 7.70e-03 +2024-08-06 09:10:33,040 INFO [trainer.py:765] (5/8) Epoch 11, batch 2000, train_loss[loss=3.546, NarTop10Accuracy=0.6154, over 6638.00 frames. ], tot_loss[loss=3.584, NarTop10Accuracy=0.5995, over 6013.45 frames. ], batch size: 48, lr: 7.69e-03 +2024-08-06 09:10:58,442 INFO [trainer.py:765] (5/8) Epoch 11, batch 2100, train_loss[loss=3.465, NarTop10Accuracy=0.6274, over 4850.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6022, over 5998.24 frames. ], batch size: 5, lr: 7.68e-03 +2024-08-06 09:11:20,710 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 09:11:31,457 INFO [trainer.py:811] (5/8) Epoch 11, validation: loss=3.372, NarTop10Accuracy=0.6462, over 1907754.00 frames. +2024-08-06 09:11:31,458 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 27597MB +2024-08-06 09:11:31,930 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.800e+02 1.966e+02 2.160e+02 4.000e+02, threshold=3.933e+02, percent-clipped=0.1 +2024-08-06 09:11:34,518 INFO [trainer.py:765] (5/8) Epoch 11, batch 2200, train_loss[loss=3.639, NarTop10Accuracy=0.598, over 7098.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6014, over 6029.98 frames. ], batch size: 30, lr: 7.66e-03 +2024-08-06 09:11:59,939 INFO [trainer.py:765] (5/8) Epoch 11, batch 2300, train_loss[loss=3.472, NarTop10Accuracy=0.6344, over 5701.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.6007, over 6057.85 frames. ], batch size: 9, lr: 7.65e-03 +2024-08-06 09:12:24,695 INFO [trainer.py:765] (5/8) Epoch 11, batch 2400, train_loss[loss=3.694, NarTop10Accuracy=0.5796, over 5237.00 frames. ], tot_loss[loss=3.595, NarTop10Accuracy=0.5977, over 5882.10 frames. ], batch size: 7, lr: 7.64e-03 +2024-08-06 09:12:47,879 INFO [trainer.py:765] (5/8) Epoch 11, batch 2500, train_loss[loss=3.675, NarTop10Accuracy=0.5812, over 5120.00 frames. ], tot_loss[loss=3.565, NarTop10Accuracy=0.603, over 5542.28 frames. ], batch size: 6, lr: 7.62e-03 +2024-08-06 09:13:09,018 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 09:14:12,278 INFO [trainer.py:765] (5/8) Epoch 12, batch 100, train_loss[loss=3.465, NarTop10Accuracy=0.6334, over 7280.00 frames. ], tot_loss[loss=3.523, NarTop10Accuracy=0.613, over 2371.53 frames. ], batch size: 30, lr: 7.29e-03 +2024-08-06 09:14:48,096 INFO [trainer.py:765] (5/8) Epoch 12, batch 200, train_loss[loss=3.424, NarTop10Accuracy=0.6384, over 6858.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6174, over 3872.48 frames. ], batch size: 17, lr: 7.28e-03 +2024-08-06 09:15:20,021 INFO [trainer.py:765] (5/8) Epoch 12, batch 300, train_loss[loss=3.539, NarTop10Accuracy=0.6112, over 7125.00 frames. ], tot_loss[loss=3.496, NarTop10Accuracy=0.6181, over 4667.41 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 09:15:52,633 INFO [trainer.py:765] (5/8) Epoch 12, batch 400, train_loss[loss=3.677, NarTop10Accuracy=0.5846, over 5090.00 frames. ], tot_loss[loss=3.516, NarTop10Accuracy=0.614, over 5125.37 frames. ], batch size: 7, lr: 7.25e-03 +2024-08-06 09:16:26,433 INFO [trainer.py:765] (5/8) Epoch 12, batch 500, train_loss[loss=3.35, NarTop10Accuracy=0.6319, over 6110.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6144, over 5411.81 frames. ], batch size: 11, lr: 7.24e-03 +2024-08-06 09:16:59,239 INFO [trainer.py:765] (5/8) Epoch 12, batch 600, train_loss[loss=3.464, NarTop10Accuracy=0.6229, over 5666.00 frames. ], tot_loss[loss=3.527, NarTop10Accuracy=0.6118, over 5681.65 frames. ], batch size: 9, lr: 7.23e-03 +2024-08-06 09:17:36,318 INFO [trainer.py:765] (5/8) Epoch 12, batch 700, train_loss[loss=3.434, NarTop10Accuracy=0.616, over 5199.00 frames. ], tot_loss[loss=3.527, NarTop10Accuracy=0.6119, over 5760.19 frames. ], batch size: 6, lr: 7.22e-03 +2024-08-06 09:18:07,752 INFO [trainer.py:765] (5/8) Epoch 12, batch 800, train_loss[loss=3.372, NarTop10Accuracy=0.6335, over 5110.00 frames. ], tot_loss[loss=3.531, NarTop10Accuracy=0.6111, over 5803.83 frames. ], batch size: 6, lr: 7.21e-03 +2024-08-06 09:18:43,779 INFO [trainer.py:765] (5/8) Epoch 12, batch 900, train_loss[loss=3.811, NarTop10Accuracy=0.5587, over 6209.00 frames. ], tot_loss[loss=3.536, NarTop10Accuracy=0.6094, over 5821.64 frames. ], batch size: 13, lr: 7.19e-03 +2024-08-06 09:19:17,689 INFO [trainer.py:765] (5/8) Epoch 12, batch 1000, train_loss[loss=3.474, NarTop10Accuracy=0.6107, over 6131.00 frames. ], tot_loss[loss=3.53, NarTop10Accuracy=0.6107, over 5910.37 frames. ], batch size: 13, lr: 7.18e-03 +2024-08-06 09:19:52,426 INFO [trainer.py:765] (5/8) Epoch 12, batch 1100, train_loss[loss=3.629, NarTop10Accuracy=0.6036, over 6902.00 frames. ], tot_loss[loss=3.531, NarTop10Accuracy=0.6109, over 5937.21 frames. ], batch size: 17, lr: 7.17e-03 +2024-08-06 09:20:29,442 INFO [trainer.py:765] (5/8) Epoch 12, batch 1200, train_loss[loss=3.437, NarTop10Accuracy=0.6324, over 7330.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.608, over 5940.46 frames. ], batch size: 31, lr: 7.16e-03 +2024-08-06 09:21:02,826 INFO [trainer.py:765] (5/8) Epoch 12, batch 1300, train_loss[loss=3.488, NarTop10Accuracy=0.6124, over 5215.00 frames. ], tot_loss[loss=3.547, NarTop10Accuracy=0.6074, over 6019.93 frames. ], batch size: 6, lr: 7.15e-03 +2024-08-06 09:21:36,981 INFO [trainer.py:765] (5/8) Epoch 12, batch 1400, train_loss[loss=3.306, NarTop10Accuracy=0.6507, over 6198.00 frames. ], tot_loss[loss=3.554, NarTop10Accuracy=0.6061, over 6058.82 frames. ], batch size: 11, lr: 7.13e-03 +2024-08-06 09:22:09,919 INFO [trainer.py:765] (5/8) Epoch 12, batch 1500, train_loss[loss=3.567, NarTop10Accuracy=0.6049, over 6131.00 frames. ], tot_loss[loss=3.556, NarTop10Accuracy=0.6061, over 5994.31 frames. ], batch size: 48, lr: 7.12e-03 +2024-08-06 09:22:38,026 INFO [trainer.py:765] (5/8) Epoch 12, batch 1600, train_loss[loss=3.656, NarTop10Accuracy=0.5896, over 7049.00 frames. ], tot_loss[loss=3.565, NarTop10Accuracy=0.6043, over 5973.55 frames. ], batch size: 22, lr: 7.11e-03 +2024-08-06 09:22:39,859 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 09:22:49,889 INFO [trainer.py:811] (5/8) Epoch 12, validation: loss=3.364, NarTop10Accuracy=0.6481, over 1907754.00 frames. +2024-08-06 09:22:49,890 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 27597MB +2024-08-06 09:22:50,413 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.796e+02 1.978e+02 2.176e+02 4.603e+02, threshold=3.957e+02, percent-clipped=0.2 +2024-08-06 09:23:14,785 INFO [trainer.py:765] (5/8) Epoch 12, batch 1700, train_loss[loss=3.382, NarTop10Accuracy=0.6354, over 6630.00 frames. ], tot_loss[loss=3.566, NarTop10Accuracy=0.6039, over 5951.22 frames. ], batch size: 14, lr: 7.10e-03 +2024-08-06 09:23:41,387 INFO [trainer.py:765] (5/8) Epoch 12, batch 1800, train_loss[loss=3.459, NarTop10Accuracy=0.6267, over 6944.00 frames. ], tot_loss[loss=3.561, NarTop10Accuracy=0.6049, over 5997.37 frames. ], batch size: 22, lr: 7.09e-03 +2024-08-06 09:24:07,957 INFO [trainer.py:765] (5/8) Epoch 12, batch 1900, train_loss[loss=3.571, NarTop10Accuracy=0.6131, over 6081.00 frames. ], tot_loss[loss=3.574, NarTop10Accuracy=0.6021, over 6036.48 frames. ], batch size: 49, lr: 7.08e-03 +2024-08-06 09:24:33,619 INFO [trainer.py:765] (5/8) Epoch 12, batch 2000, train_loss[loss=3.631, NarTop10Accuracy=0.5925, over 7095.00 frames. ], tot_loss[loss=3.572, NarTop10Accuracy=0.6018, over 6011.24 frames. ], batch size: 49, lr: 7.07e-03 +2024-08-06 09:24:59,038 INFO [trainer.py:765] (5/8) Epoch 12, batch 2100, train_loss[loss=3.712, NarTop10Accuracy=0.5718, over 4899.00 frames. ], tot_loss[loss=3.567, NarTop10Accuracy=0.603, over 5985.88 frames. ], batch size: 5, lr: 7.05e-03 +2024-08-06 09:25:24,509 INFO [trainer.py:765] (5/8) Epoch 12, batch 2200, train_loss[loss=3.343, NarTop10Accuracy=0.6489, over 7395.00 frames. ], tot_loss[loss=3.566, NarTop10Accuracy=0.6032, over 6029.83 frames. ], batch size: 31, lr: 7.04e-03 +2024-08-06 09:25:49,926 INFO [trainer.py:765] (5/8) Epoch 12, batch 2300, train_loss[loss=3.753, NarTop10Accuracy=0.5733, over 5742.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.6018, over 6052.72 frames. ], batch size: 9, lr: 7.03e-03 +2024-08-06 09:26:14,656 INFO [trainer.py:765] (5/8) Epoch 12, batch 2400, train_loss[loss=3.428, NarTop10Accuracy=0.6227, over 5124.00 frames. ], tot_loss[loss=3.577, NarTop10Accuracy=0.6013, over 5876.37 frames. ], batch size: 7, lr: 7.02e-03 +2024-08-06 09:26:38,154 INFO [trainer.py:765] (5/8) Epoch 12, batch 2500, train_loss[loss=3.543, NarTop10Accuracy=0.5989, over 4910.00 frames. ], tot_loss[loss=3.566, NarTop10Accuracy=0.6035, over 5545.99 frames. ], batch size: 6, lr: 7.01e-03 +2024-08-06 09:26:59,445 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 09:28:03,611 INFO [trainer.py:765] (5/8) Epoch 13, batch 100, train_loss[loss=3.513, NarTop10Accuracy=0.6031, over 7205.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6169, over 2377.07 frames. ], batch size: 30, lr: 6.72e-03 +2024-08-06 09:28:36,905 INFO [trainer.py:765] (5/8) Epoch 13, batch 200, train_loss[loss=3.263, NarTop10Accuracy=0.6621, over 6937.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6185, over 3890.39 frames. ], batch size: 17, lr: 6.71e-03 +2024-08-06 09:29:07,170 INFO [trainer.py:765] (5/8) Epoch 13, batch 300, train_loss[loss=3.49, NarTop10Accuracy=0.627, over 7307.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6189, over 4686.56 frames. ], batch size: 22, lr: 6.70e-03 +2024-08-06 09:29:41,038 INFO [trainer.py:765] (5/8) Epoch 13, batch 400, train_loss[loss=3.721, NarTop10Accuracy=0.5771, over 5666.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6211, over 5143.44 frames. ], batch size: 8, lr: 6.69e-03 +2024-08-06 09:30:13,730 INFO [trainer.py:765] (5/8) Epoch 13, batch 500, train_loss[loss=3.847, NarTop10Accuracy=0.546, over 6223.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6206, over 5420.93 frames. ], batch size: 11, lr: 6.68e-03 +2024-08-06 09:30:47,198 INFO [trainer.py:765] (5/8) Epoch 13, batch 600, train_loss[loss=3.349, NarTop10Accuracy=0.6545, over 5753.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6204, over 5682.97 frames. ], batch size: 9, lr: 6.67e-03 +2024-08-06 09:31:23,821 INFO [trainer.py:765] (5/8) Epoch 13, batch 700, train_loss[loss=3.603, NarTop10Accuracy=0.6136, over 5082.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6178, over 5759.23 frames. ], batch size: 6, lr: 6.66e-03 +2024-08-06 09:31:58,208 INFO [trainer.py:765] (5/8) Epoch 13, batch 800, train_loss[loss=3.342, NarTop10Accuracy=0.6564, over 5079.00 frames. ], tot_loss[loss=3.51, NarTop10Accuracy=0.6155, over 5793.04 frames. ], batch size: 6, lr: 6.65e-03 +2024-08-06 09:32:29,193 INFO [trainer.py:765] (5/8) Epoch 13, batch 900, train_loss[loss=3.382, NarTop10Accuracy=0.638, over 6320.00 frames. ], tot_loss[loss=3.513, NarTop10Accuracy=0.6149, over 5822.76 frames. ], batch size: 13, lr: 6.64e-03 +2024-08-06 09:33:03,133 INFO [trainer.py:765] (5/8) Epoch 13, batch 1000, train_loss[loss=3.462, NarTop10Accuracy=0.6182, over 6222.00 frames. ], tot_loss[loss=3.52, NarTop10Accuracy=0.6131, over 5913.53 frames. ], batch size: 13, lr: 6.63e-03 +2024-08-06 09:33:14,218 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 09:33:24,525 INFO [trainer.py:811] (5/8) Epoch 13, validation: loss=3.389, NarTop10Accuracy=0.6428, over 1907754.00 frames. +2024-08-06 09:33:24,526 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 27597MB +2024-08-06 09:33:25,132 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.457e+02 1.794e+02 1.964e+02 2.145e+02 3.608e+02, threshold=3.929e+02, percent-clipped=0.0 +2024-08-06 09:33:51,714 INFO [trainer.py:765] (5/8) Epoch 13, batch 1100, train_loss[loss=3.603, NarTop10Accuracy=0.6002, over 6728.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.6101, over 5955.14 frames. ], batch size: 17, lr: 6.62e-03 +2024-08-06 09:34:25,485 INFO [trainer.py:765] (5/8) Epoch 13, batch 1200, train_loss[loss=3.567, NarTop10Accuracy=0.6067, over 7378.00 frames. ], tot_loss[loss=3.529, NarTop10Accuracy=0.6109, over 5947.93 frames. ], batch size: 31, lr: 6.61e-03 +2024-08-06 09:35:05,084 INFO [trainer.py:765] (5/8) Epoch 13, batch 1300, train_loss[loss=3.547, NarTop10Accuracy=0.6, over 5225.00 frames. ], tot_loss[loss=3.528, NarTop10Accuracy=0.6111, over 6018.34 frames. ], batch size: 6, lr: 6.60e-03 +2024-08-06 09:35:36,404 INFO [trainer.py:765] (5/8) Epoch 13, batch 1400, train_loss[loss=3.287, NarTop10Accuracy=0.6525, over 6116.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6091, over 6037.23 frames. ], batch size: 11, lr: 6.59e-03 +2024-08-06 09:36:07,320 INFO [trainer.py:765] (5/8) Epoch 13, batch 1500, train_loss[loss=3.796, NarTop10Accuracy=0.5562, over 5836.00 frames. ], tot_loss[loss=3.548, NarTop10Accuracy=0.607, over 5981.12 frames. ], batch size: 48, lr: 6.58e-03 +2024-08-06 09:36:35,388 INFO [trainer.py:765] (5/8) Epoch 13, batch 1600, train_loss[loss=3.618, NarTop10Accuracy=0.5921, over 7050.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.6079, over 5962.11 frames. ], batch size: 22, lr: 6.57e-03 +2024-08-06 09:37:02,143 INFO [trainer.py:765] (5/8) Epoch 13, batch 1700, train_loss[loss=3.465, NarTop10Accuracy=0.6328, over 6132.00 frames. ], tot_loss[loss=3.546, NarTop10Accuracy=0.6075, over 5944.07 frames. ], batch size: 13, lr: 6.56e-03 +2024-08-06 09:37:28,778 INFO [trainer.py:765] (5/8) Epoch 13, batch 1800, train_loss[loss=3.496, NarTop10Accuracy=0.6229, over 7158.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.6105, over 6013.11 frames. ], batch size: 22, lr: 6.55e-03 +2024-08-06 09:37:55,386 INFO [trainer.py:765] (5/8) Epoch 13, batch 1900, train_loss[loss=3.447, NarTop10Accuracy=0.6355, over 5787.00 frames. ], tot_loss[loss=3.541, NarTop10Accuracy=0.6091, over 6043.80 frames. ], batch size: 48, lr: 6.54e-03 +2024-08-06 09:38:21,122 INFO [trainer.py:765] (5/8) Epoch 13, batch 2000, train_loss[loss=3.578, NarTop10Accuracy=0.5985, over 6150.00 frames. ], tot_loss[loss=3.547, NarTop10Accuracy=0.6079, over 6008.90 frames. ], batch size: 49, lr: 6.53e-03 +2024-08-06 09:38:49,691 INFO [trainer.py:765] (5/8) Epoch 13, batch 2100, train_loss[loss=3.579, NarTop10Accuracy=0.5973, over 3788.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.6103, over 5993.06 frames. ], batch size: 4, lr: 6.52e-03 +2024-08-06 09:39:15,107 INFO [trainer.py:765] (5/8) Epoch 13, batch 2200, train_loss[loss=3.456, NarTop10Accuracy=0.6217, over 7002.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.6101, over 6021.30 frames. ], batch size: 30, lr: 6.51e-03 +2024-08-06 09:39:40,617 INFO [trainer.py:765] (5/8) Epoch 13, batch 2300, train_loss[loss=3.581, NarTop10Accuracy=0.5942, over 5687.00 frames. ], tot_loss[loss=3.548, NarTop10Accuracy=0.6074, over 6059.48 frames. ], batch size: 9, lr: 6.50e-03 +2024-08-06 09:40:05,343 INFO [trainer.py:765] (5/8) Epoch 13, batch 2400, train_loss[loss=3.408, NarTop10Accuracy=0.6304, over 5111.00 frames. ], tot_loss[loss=3.558, NarTop10Accuracy=0.6054, over 5867.77 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 09:40:28,767 INFO [trainer.py:765] (5/8) Epoch 13, batch 2500, train_loss[loss=3.327, NarTop10Accuracy=0.6562, over 4997.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.61, over 5532.17 frames. ], batch size: 6, lr: 6.48e-03 +2024-08-06 09:40:50,413 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 09:41:48,980 INFO [trainer.py:765] (5/8) Epoch 14, batch 100, train_loss[loss=3.382, NarTop10Accuracy=0.6385, over 7277.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6231, over 2375.56 frames. ], batch size: 31, lr: 6.24e-03 +2024-08-06 09:42:22,937 INFO [trainer.py:765] (5/8) Epoch 14, batch 200, train_loss[loss=3.566, NarTop10Accuracy=0.5978, over 6811.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6252, over 3868.81 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 09:42:58,414 INFO [trainer.py:765] (5/8) Epoch 14, batch 300, train_loss[loss=3.727, NarTop10Accuracy=0.574, over 7141.00 frames. ], tot_loss[loss=3.484, NarTop10Accuracy=0.6213, over 4675.52 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 09:43:30,439 INFO [trainer.py:765] (5/8) Epoch 14, batch 400, train_loss[loss=3.299, NarTop10Accuracy=0.6673, over 5131.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.62, over 5134.10 frames. ], batch size: 7, lr: 6.21e-03 +2024-08-06 09:43:42,487 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 09:43:53,651 INFO [trainer.py:811] (5/8) Epoch 14, validation: loss=3.321, NarTop10Accuracy=0.6566, over 1907754.00 frames. +2024-08-06 09:43:53,652 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 27597MB +2024-08-06 09:43:54,211 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.805e+02 1.968e+02 2.158e+02 4.264e+02, threshold=3.936e+02, percent-clipped=0.2 +2024-08-06 09:44:11,700 INFO [trainer.py:765] (5/8) Epoch 14, batch 500, train_loss[loss=3.617, NarTop10Accuracy=0.5965, over 6036.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6213, over 5423.69 frames. ], batch size: 11, lr: 6.20e-03 +2024-08-06 09:44:47,166 INFO [trainer.py:765] (5/8) Epoch 14, batch 600, train_loss[loss=3.548, NarTop10Accuracy=0.6005, over 5780.00 frames. ], tot_loss[loss=3.48, NarTop10Accuracy=0.6216, over 5695.10 frames. ], batch size: 9, lr: 6.19e-03 +2024-08-06 09:45:19,804 INFO [trainer.py:765] (5/8) Epoch 14, batch 700, train_loss[loss=3.679, NarTop10Accuracy=0.5884, over 5011.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6224, over 5757.79 frames. ], batch size: 6, lr: 6.18e-03 +2024-08-06 09:45:58,435 INFO [trainer.py:765] (5/8) Epoch 14, batch 800, train_loss[loss=3.492, NarTop10Accuracy=0.6252, over 4952.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6193, over 5792.86 frames. ], batch size: 6, lr: 6.17e-03 +2024-08-06 09:46:35,420 INFO [trainer.py:765] (5/8) Epoch 14, batch 900, train_loss[loss=3.674, NarTop10Accuracy=0.57, over 6406.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6198, over 5819.34 frames. ], batch size: 13, lr: 6.17e-03 +2024-08-06 09:47:08,399 INFO [trainer.py:765] (5/8) Epoch 14, batch 1000, train_loss[loss=3.509, NarTop10Accuracy=0.6245, over 6312.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6202, over 5918.55 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 09:47:47,663 INFO [trainer.py:765] (5/8) Epoch 14, batch 1100, train_loss[loss=3.293, NarTop10Accuracy=0.6601, over 6818.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6176, over 5956.81 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 09:48:23,500 INFO [trainer.py:765] (5/8) Epoch 14, batch 1200, train_loss[loss=3.346, NarTop10Accuracy=0.6583, over 7384.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6186, over 5959.71 frames. ], batch size: 31, lr: 6.14e-03 +2024-08-06 09:48:57,971 INFO [trainer.py:765] (5/8) Epoch 14, batch 1300, train_loss[loss=3.418, NarTop10Accuracy=0.6259, over 5087.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.6169, over 6017.98 frames. ], batch size: 6, lr: 6.13e-03 +2024-08-06 09:49:30,234 INFO [trainer.py:765] (5/8) Epoch 14, batch 1400, train_loss[loss=3.744, NarTop10Accuracy=0.5704, over 6064.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.6135, over 6038.30 frames. ], batch size: 11, lr: 6.12e-03 +2024-08-06 09:50:07,531 INFO [trainer.py:765] (5/8) Epoch 14, batch 1500, train_loss[loss=3.554, NarTop10Accuracy=0.6085, over 6104.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.6136, over 5969.95 frames. ], batch size: 54, lr: 6.11e-03 +2024-08-06 09:50:35,637 INFO [trainer.py:765] (5/8) Epoch 14, batch 1600, train_loss[loss=3.44, NarTop10Accuracy=0.6291, over 7190.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.6149, over 5945.87 frames. ], batch size: 22, lr: 6.10e-03 +2024-08-06 09:51:02,378 INFO [trainer.py:765] (5/8) Epoch 14, batch 1700, train_loss[loss=3.395, NarTop10Accuracy=0.6442, over 6262.00 frames. ], tot_loss[loss=3.507, NarTop10Accuracy=0.6156, over 5935.43 frames. ], batch size: 13, lr: 6.10e-03 +2024-08-06 09:51:28,994 INFO [trainer.py:765] (5/8) Epoch 14, batch 1800, train_loss[loss=3.699, NarTop10Accuracy=0.5866, over 7043.00 frames. ], tot_loss[loss=3.51, NarTop10Accuracy=0.6155, over 6001.73 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 09:51:55,729 INFO [trainer.py:765] (5/8) Epoch 14, batch 1900, train_loss[loss=3.86, NarTop10Accuracy=0.5453, over 6135.00 frames. ], tot_loss[loss=3.53, NarTop10Accuracy=0.6117, over 6027.46 frames. ], batch size: 49, lr: 6.08e-03 +2024-08-06 09:52:21,503 INFO [trainer.py:765] (5/8) Epoch 14, batch 2000, train_loss[loss=3.681, NarTop10Accuracy=0.5893, over 5882.00 frames. ], tot_loss[loss=3.532, NarTop10Accuracy=0.6114, over 6018.56 frames. ], batch size: 49, lr: 6.07e-03 +2024-08-06 09:52:47,011 INFO [trainer.py:765] (5/8) Epoch 14, batch 2100, train_loss[loss=3.372, NarTop10Accuracy=0.6226, over 3902.00 frames. ], tot_loss[loss=3.516, NarTop10Accuracy=0.6139, over 5991.53 frames. ], batch size: 4, lr: 6.06e-03 +2024-08-06 09:53:12,480 INFO [trainer.py:765] (5/8) Epoch 14, batch 2200, train_loss[loss=3.348, NarTop10Accuracy=0.6552, over 7032.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.6138, over 6037.61 frames. ], batch size: 30, lr: 6.05e-03 +2024-08-06 09:53:37,975 INFO [trainer.py:765] (5/8) Epoch 14, batch 2300, train_loss[loss=3.516, NarTop10Accuracy=0.6188, over 5803.00 frames. ], tot_loss[loss=3.53, NarTop10Accuracy=0.6108, over 6062.60 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 09:54:02,717 INFO [trainer.py:765] (5/8) Epoch 14, batch 2400, train_loss[loss=3.535, NarTop10Accuracy=0.591, over 5203.00 frames. ], tot_loss[loss=3.541, NarTop10Accuracy=0.609, over 5873.30 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 09:54:12,820 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 09:54:24,304 INFO [trainer.py:811] (5/8) Epoch 14, validation: loss=3.364, NarTop10Accuracy=0.6477, over 1907754.00 frames. +2024-08-06 09:54:24,304 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 29161MB +2024-08-06 09:54:24,752 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.815e+02 1.970e+02 2.165e+02 3.684e+02, threshold=3.939e+02, percent-clipped=0.0 +2024-08-06 09:54:37,619 INFO [trainer.py:765] (5/8) Epoch 14, batch 2500, train_loss[loss=3.628, NarTop10Accuracy=0.5763, over 5060.00 frames. ], tot_loss[loss=3.516, NarTop10Accuracy=0.6144, over 5537.07 frames. ], batch size: 6, lr: 6.03e-03 +2024-08-06 09:54:58,751 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 09:56:03,097 INFO [trainer.py:765] (5/8) Epoch 15, batch 100, train_loss[loss=3.515, NarTop10Accuracy=0.6078, over 7430.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.627, over 2372.53 frames. ], batch size: 30, lr: 5.81e-03 +2024-08-06 09:56:35,980 INFO [trainer.py:765] (5/8) Epoch 15, batch 200, train_loss[loss=3.516, NarTop10Accuracy=0.6255, over 6786.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6264, over 3861.32 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 09:57:07,653 INFO [trainer.py:765] (5/8) Epoch 15, batch 300, train_loss[loss=3.262, NarTop10Accuracy=0.6623, over 7103.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6286, over 4664.81 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 09:57:38,464 INFO [trainer.py:765] (5/8) Epoch 15, batch 400, train_loss[loss=3.703, NarTop10Accuracy=0.5808, over 5302.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6283, over 5117.97 frames. ], batch size: 7, lr: 5.79e-03 +2024-08-06 09:58:12,235 INFO [trainer.py:765] (5/8) Epoch 15, batch 500, train_loss[loss=3.511, NarTop10Accuracy=0.6214, over 6102.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6258, over 5407.22 frames. ], batch size: 11, lr: 5.78e-03 +2024-08-06 09:58:47,543 INFO [trainer.py:765] (5/8) Epoch 15, batch 600, train_loss[loss=3.606, NarTop10Accuracy=0.6042, over 5807.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6229, over 5683.87 frames. ], batch size: 9, lr: 5.77e-03 +2024-08-06 09:59:17,062 INFO [trainer.py:765] (5/8) Epoch 15, batch 700, train_loss[loss=3.219, NarTop10Accuracy=0.6734, over 4997.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6215, over 5750.36 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 09:59:55,588 INFO [trainer.py:765] (5/8) Epoch 15, batch 800, train_loss[loss=3.623, NarTop10Accuracy=0.5839, over 4974.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6221, over 5798.30 frames. ], batch size: 6, lr: 5.76e-03 +2024-08-06 10:00:32,024 INFO [trainer.py:765] (5/8) Epoch 15, batch 900, train_loss[loss=3.255, NarTop10Accuracy=0.6578, over 6666.00 frames. ], tot_loss[loss=3.471, NarTop10Accuracy=0.6229, over 5814.30 frames. ], batch size: 14, lr: 5.75e-03 +2024-08-06 10:01:05,538 INFO [trainer.py:765] (5/8) Epoch 15, batch 1000, train_loss[loss=3.346, NarTop10Accuracy=0.6607, over 6289.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.6234, over 5927.54 frames. ], batch size: 13, lr: 5.74e-03 +2024-08-06 10:01:45,154 INFO [trainer.py:765] (5/8) Epoch 15, batch 1100, train_loss[loss=3.525, NarTop10Accuracy=0.6075, over 6958.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6197, over 5962.99 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 10:02:18,757 INFO [trainer.py:765] (5/8) Epoch 15, batch 1200, train_loss[loss=3.838, NarTop10Accuracy=0.5428, over 7139.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6196, over 5962.50 frames. ], batch size: 31, lr: 5.73e-03 +2024-08-06 10:02:51,921 INFO [trainer.py:765] (5/8) Epoch 15, batch 1300, train_loss[loss=3.441, NarTop10Accuracy=0.6363, over 5045.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6188, over 6022.47 frames. ], batch size: 6, lr: 5.72e-03 +2024-08-06 10:03:25,436 INFO [trainer.py:765] (5/8) Epoch 15, batch 1400, train_loss[loss=3.546, NarTop10Accuracy=0.6082, over 6066.00 frames. ], tot_loss[loss=3.505, NarTop10Accuracy=0.6161, over 6040.42 frames. ], batch size: 11, lr: 5.71e-03 +2024-08-06 10:03:59,042 INFO [trainer.py:765] (5/8) Epoch 15, batch 1500, train_loss[loss=3.524, NarTop10Accuracy=0.6141, over 5969.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6179, over 5969.51 frames. ], batch size: 49, lr: 5.71e-03 +2024-08-06 10:04:27,106 INFO [trainer.py:765] (5/8) Epoch 15, batch 1600, train_loss[loss=3.654, NarTop10Accuracy=0.583, over 7121.00 frames. ], tot_loss[loss=3.48, NarTop10Accuracy=0.6211, over 5931.77 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 10:04:53,908 INFO [trainer.py:765] (5/8) Epoch 15, batch 1700, train_loss[loss=3.604, NarTop10Accuracy=0.5934, over 6160.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6209, over 5939.44 frames. ], batch size: 13, lr: 5.69e-03 +2024-08-06 10:05:20,730 INFO [trainer.py:765] (5/8) Epoch 15, batch 1800, train_loss[loss=3.857, NarTop10Accuracy=0.5385, over 6999.00 frames. ], tot_loss[loss=3.502, NarTop10Accuracy=0.6169, over 6007.58 frames. ], batch size: 22, lr: 5.68e-03 +2024-08-06 10:05:37,267 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 10:05:47,411 INFO [trainer.py:811] (5/8) Epoch 15, validation: loss=3.325, NarTop10Accuracy=0.6551, over 1907754.00 frames. +2024-08-06 10:05:47,412 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 29161MB +2024-08-06 10:05:47,919 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.835e+02 1.986e+02 2.156e+02 4.531e+02, threshold=3.972e+02, percent-clipped=0.1 +2024-08-06 10:05:57,568 INFO [trainer.py:765] (5/8) Epoch 15, batch 1900, train_loss[loss=3.662, NarTop10Accuracy=0.5843, over 5883.00 frames. ], tot_loss[loss=3.508, NarTop10Accuracy=0.6157, over 6061.30 frames. ], batch size: 48, lr: 5.68e-03 +2024-08-06 10:06:23,371 INFO [trainer.py:765] (5/8) Epoch 15, batch 2000, train_loss[loss=3.577, NarTop10Accuracy=0.6041, over 6218.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6164, over 6045.32 frames. ], batch size: 50, lr: 5.67e-03 +2024-08-06 10:06:48,759 INFO [trainer.py:765] (5/8) Epoch 15, batch 2100, train_loss[loss=3.148, NarTop10Accuracy=0.6932, over 3928.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6168, over 6011.26 frames. ], batch size: 4, lr: 5.66e-03 +2024-08-06 10:07:14,171 INFO [trainer.py:765] (5/8) Epoch 15, batch 2200, train_loss[loss=3.454, NarTop10Accuracy=0.6348, over 7317.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6196, over 6031.47 frames. ], batch size: 31, lr: 5.65e-03 +2024-08-06 10:07:39,629 INFO [trainer.py:765] (5/8) Epoch 15, batch 2300, train_loss[loss=3.317, NarTop10Accuracy=0.6571, over 5733.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6184, over 6054.39 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 10:08:04,361 INFO [trainer.py:765] (5/8) Epoch 15, batch 2400, train_loss[loss=3.458, NarTop10Accuracy=0.614, over 5124.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6181, over 5869.63 frames. ], batch size: 7, lr: 5.64e-03 +2024-08-06 10:08:27,714 INFO [trainer.py:765] (5/8) Epoch 15, batch 2500, train_loss[loss=3.505, NarTop10Accuracy=0.6132, over 5042.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6207, over 5529.70 frames. ], batch size: 6, lr: 5.63e-03 +2024-08-06 10:08:49,044 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 10:09:44,182 INFO [trainer.py:765] (5/8) Epoch 16, batch 100, train_loss[loss=3.508, NarTop10Accuracy=0.6177, over 7111.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6299, over 2374.54 frames. ], batch size: 30, lr: 5.44e-03 +2024-08-06 10:10:23,205 INFO [trainer.py:765] (5/8) Epoch 16, batch 200, train_loss[loss=3.357, NarTop10Accuracy=0.643, over 6929.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6314, over 3866.56 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 10:10:58,839 INFO [trainer.py:765] (5/8) Epoch 16, batch 300, train_loss[loss=3.307, NarTop10Accuracy=0.6638, over 7141.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6274, over 4678.88 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 10:11:29,593 INFO [trainer.py:765] (5/8) Epoch 16, batch 400, train_loss[loss=3.391, NarTop10Accuracy=0.6351, over 5066.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.628, over 5127.43 frames. ], batch size: 7, lr: 5.42e-03 +2024-08-06 10:12:02,296 INFO [trainer.py:765] (5/8) Epoch 16, batch 500, train_loss[loss=3.519, NarTop10Accuracy=0.6119, over 5977.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6275, over 5412.02 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 10:12:42,338 INFO [trainer.py:765] (5/8) Epoch 16, batch 600, train_loss[loss=3.37, NarTop10Accuracy=0.6421, over 5880.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6273, over 5674.35 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 10:13:13,948 INFO [trainer.py:765] (5/8) Epoch 16, batch 700, train_loss[loss=3.029, NarTop10Accuracy=0.7074, over 5044.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6261, over 5741.76 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:13:46,283 INFO [trainer.py:765] (5/8) Epoch 16, batch 800, train_loss[loss=3.63, NarTop10Accuracy=0.5853, over 5211.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6266, over 5797.08 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:14:23,293 INFO [trainer.py:765] (5/8) Epoch 16, batch 900, train_loss[loss=3.762, NarTop10Accuracy=0.5646, over 6353.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6268, over 5824.27 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 10:15:00,058 INFO [trainer.py:765] (5/8) Epoch 16, batch 1000, train_loss[loss=3.573, NarTop10Accuracy=0.599, over 6230.00 frames. ], tot_loss[loss=3.474, NarTop10Accuracy=0.6228, over 5921.12 frames. ], batch size: 13, lr: 5.38e-03 +2024-08-06 10:15:30,507 INFO [trainer.py:765] (5/8) Epoch 16, batch 1100, train_loss[loss=3.394, NarTop10Accuracy=0.6398, over 6845.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6209, over 5945.28 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 10:16:11,382 INFO [trainer.py:765] (5/8) Epoch 16, batch 1200, train_loss[loss=3.543, NarTop10Accuracy=0.606, over 7258.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6223, over 5950.75 frames. ], batch size: 31, lr: 5.37e-03 +2024-08-06 10:16:39,395 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 10:16:49,676 INFO [trainer.py:811] (5/8) Epoch 16, validation: loss=3.375, NarTop10Accuracy=0.6455, over 1907754.00 frames. +2024-08-06 10:16:49,676 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 29161MB +2024-08-06 10:16:52,482 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 1.814e+02 1.975e+02 2.151e+02 4.776e+02, threshold=3.950e+02, percent-clipped=0.2 +2024-08-06 10:16:58,042 INFO [trainer.py:765] (5/8) Epoch 16, batch 1300, train_loss[loss=3.827, NarTop10Accuracy=0.5542, over 5040.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6229, over 6009.99 frames. ], batch size: 6, lr: 5.36e-03 +2024-08-06 10:17:29,376 INFO [trainer.py:765] (5/8) Epoch 16, batch 1400, train_loss[loss=3.346, NarTop10Accuracy=0.6518, over 6130.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6224, over 6028.66 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 10:18:02,354 INFO [trainer.py:765] (5/8) Epoch 16, batch 1500, train_loss[loss=3.493, NarTop10Accuracy=0.6238, over 5911.00 frames. ], tot_loss[loss=3.482, NarTop10Accuracy=0.621, over 5970.08 frames. ], batch size: 49, lr: 5.35e-03 +2024-08-06 10:18:30,469 INFO [trainer.py:765] (5/8) Epoch 16, batch 1600, train_loss[loss=3.766, NarTop10Accuracy=0.5586, over 7252.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6191, over 5949.39 frames. ], batch size: 22, lr: 5.34e-03 +2024-08-06 10:18:57,272 INFO [trainer.py:765] (5/8) Epoch 16, batch 1700, train_loss[loss=3.455, NarTop10Accuracy=0.6178, over 6269.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6205, over 5929.78 frames. ], batch size: 13, lr: 5.34e-03 +2024-08-06 10:19:23,979 INFO [trainer.py:765] (5/8) Epoch 16, batch 1800, train_loss[loss=3.838, NarTop10Accuracy=0.5532, over 7108.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6201, over 5991.87 frames. ], batch size: 22, lr: 5.33e-03 +2024-08-06 10:19:50,772 INFO [trainer.py:765] (5/8) Epoch 16, batch 1900, train_loss[loss=3.675, NarTop10Accuracy=0.5847, over 6100.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6203, over 6035.91 frames. ], batch size: 48, lr: 5.32e-03 +2024-08-06 10:20:16,602 INFO [trainer.py:765] (5/8) Epoch 16, batch 2000, train_loss[loss=3.5, NarTop10Accuracy=0.6237, over 5921.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6181, over 6013.96 frames. ], batch size: 49, lr: 5.32e-03 +2024-08-06 10:20:42,160 INFO [trainer.py:765] (5/8) Epoch 16, batch 2100, train_loss[loss=3.675, NarTop10Accuracy=0.5689, over 3890.00 frames. ], tot_loss[loss=3.513, NarTop10Accuracy=0.6147, over 5997.51 frames. ], batch size: 4, lr: 5.31e-03 +2024-08-06 10:21:07,651 INFO [trainer.py:765] (5/8) Epoch 16, batch 2200, train_loss[loss=3.384, NarTop10Accuracy=0.6482, over 7361.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.6158, over 6033.49 frames. ], batch size: 30, lr: 5.30e-03 +2024-08-06 10:21:36,082 INFO [trainer.py:765] (5/8) Epoch 16, batch 2300, train_loss[loss=3.648, NarTop10Accuracy=0.5878, over 5810.00 frames. ], tot_loss[loss=3.517, NarTop10Accuracy=0.6146, over 6045.63 frames. ], batch size: 9, lr: 5.30e-03 +2024-08-06 10:22:00,907 INFO [trainer.py:765] (5/8) Epoch 16, batch 2400, train_loss[loss=3.495, NarTop10Accuracy=0.6203, over 5083.00 frames. ], tot_loss[loss=3.515, NarTop10Accuracy=0.615, over 5868.15 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 10:22:24,290 INFO [trainer.py:765] (5/8) Epoch 16, batch 2500, train_loss[loss=3.297, NarTop10Accuracy=0.6742, over 5180.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.62, over 5524.00 frames. ], batch size: 6, lr: 5.28e-03 +2024-08-06 10:22:45,548 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 10:23:45,726 INFO [trainer.py:765] (5/8) Epoch 17, batch 100, train_loss[loss=3.413, NarTop10Accuracy=0.6393, over 7413.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.631, over 2371.07 frames. ], batch size: 31, lr: 5.12e-03 +2024-08-06 10:24:19,032 INFO [trainer.py:765] (5/8) Epoch 17, batch 200, train_loss[loss=3.189, NarTop10Accuracy=0.685, over 7100.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6337, over 3880.84 frames. ], batch size: 18, lr: 5.11e-03 +2024-08-06 10:24:53,439 INFO [trainer.py:765] (5/8) Epoch 17, batch 300, train_loss[loss=3.677, NarTop10Accuracy=0.5797, over 7079.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6317, over 4669.75 frames. ], batch size: 22, lr: 5.10e-03 +2024-08-06 10:25:28,012 INFO [trainer.py:765] (5/8) Epoch 17, batch 400, train_loss[loss=3.688, NarTop10Accuracy=0.5876, over 5102.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6321, over 5107.31 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 10:25:58,605 INFO [trainer.py:765] (5/8) Epoch 17, batch 500, train_loss[loss=3.197, NarTop10Accuracy=0.6717, over 6564.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6305, over 5403.08 frames. ], batch size: 12, lr: 5.09e-03 +2024-08-06 10:26:29,754 INFO [trainer.py:765] (5/8) Epoch 17, batch 600, train_loss[loss=3.611, NarTop10Accuracy=0.5896, over 5919.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6311, over 5673.60 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 10:27:07,498 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 10:27:17,547 INFO [trainer.py:811] (5/8) Epoch 17, validation: loss=3.327, NarTop10Accuracy=0.6554, over 1907754.00 frames. +2024-08-06 10:27:17,548 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 29161MB +2024-08-06 10:27:18,066 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 1.825e+02 1.985e+02 2.150e+02 4.169e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 10:27:18,072 INFO [trainer.py:765] (5/8) Epoch 17, batch 700, train_loss[loss=3.444, NarTop10Accuracy=0.6406, over 5012.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6276, over 5748.21 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 10:27:49,841 INFO [trainer.py:765] (5/8) Epoch 17, batch 800, train_loss[loss=3.517, NarTop10Accuracy=0.6213, over 4362.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6284, over 5785.80 frames. ], batch size: 5, lr: 5.07e-03 +2024-08-06 10:28:24,838 INFO [trainer.py:765] (5/8) Epoch 17, batch 900, train_loss[loss=3.207, NarTop10Accuracy=0.6812, over 6135.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6276, over 5806.78 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 10:28:59,683 INFO [trainer.py:765] (5/8) Epoch 17, batch 1000, train_loss[loss=3.439, NarTop10Accuracy=0.6273, over 6223.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6276, over 5911.50 frames. ], batch size: 13, lr: 5.06e-03 +2024-08-06 10:29:36,659 INFO [trainer.py:765] (5/8) Epoch 17, batch 1100, train_loss[loss=3.26, NarTop10Accuracy=0.6717, over 6732.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6249, over 5954.35 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 10:30:08,241 INFO [trainer.py:765] (5/8) Epoch 17, batch 1200, train_loss[loss=3.531, NarTop10Accuracy=0.6146, over 7396.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.626, over 5942.52 frames. ], batch size: 30, lr: 5.05e-03 +2024-08-06 10:30:47,102 INFO [trainer.py:765] (5/8) Epoch 17, batch 1300, train_loss[loss=3.458, NarTop10Accuracy=0.6306, over 4986.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6265, over 6016.23 frames. ], batch size: 6, lr: 5.04e-03 +2024-08-06 10:31:20,893 INFO [trainer.py:765] (5/8) Epoch 17, batch 1400, train_loss[loss=3.435, NarTop10Accuracy=0.6388, over 6213.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6263, over 6031.98 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 10:31:51,401 INFO [trainer.py:765] (5/8) Epoch 17, batch 1500, train_loss[loss=3.52, NarTop10Accuracy=0.623, over 6330.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6266, over 5973.17 frames. ], batch size: 49, lr: 5.03e-03 +2024-08-06 10:32:19,401 INFO [trainer.py:765] (5/8) Epoch 17, batch 1600, train_loss[loss=3.473, NarTop10Accuracy=0.6263, over 7347.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6253, over 5963.15 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 10:32:50,394 INFO [trainer.py:765] (5/8) Epoch 17, batch 1700, train_loss[loss=3.579, NarTop10Accuracy=0.5997, over 6264.00 frames. ], tot_loss[loss=3.484, NarTop10Accuracy=0.6201, over 5949.26 frames. ], batch size: 13, lr: 5.02e-03 +2024-08-06 10:33:17,036 INFO [trainer.py:765] (5/8) Epoch 17, batch 1800, train_loss[loss=3.691, NarTop10Accuracy=0.5756, over 7084.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6181, over 6004.70 frames. ], batch size: 22, lr: 5.02e-03 +2024-08-06 10:33:43,597 INFO [trainer.py:765] (5/8) Epoch 17, batch 1900, train_loss[loss=3.847, NarTop10Accuracy=0.5463, over 6623.00 frames. ], tot_loss[loss=3.499, NarTop10Accuracy=0.6175, over 6047.26 frames. ], batch size: 49, lr: 5.01e-03 +2024-08-06 10:34:09,287 INFO [trainer.py:765] (5/8) Epoch 17, batch 2000, train_loss[loss=3.728, NarTop10Accuracy=0.5732, over 5908.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6204, over 6021.77 frames. ], batch size: 48, lr: 5.00e-03 +2024-08-06 10:34:34,801 INFO [trainer.py:765] (5/8) Epoch 17, batch 2100, train_loss[loss=2.951, NarTop10Accuracy=0.7157, over 3826.00 frames. ], tot_loss[loss=3.493, NarTop10Accuracy=0.6183, over 6002.98 frames. ], batch size: 4, lr: 5.00e-03 +2024-08-06 10:35:00,245 INFO [trainer.py:765] (5/8) Epoch 17, batch 2200, train_loss[loss=3.273, NarTop10Accuracy=0.6685, over 7352.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6205, over 6039.70 frames. ], batch size: 31, lr: 4.99e-03 +2024-08-06 10:35:25,733 INFO [trainer.py:765] (5/8) Epoch 17, batch 2300, train_loss[loss=3.329, NarTop10Accuracy=0.6511, over 5709.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6173, over 6063.08 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 10:35:50,526 INFO [trainer.py:765] (5/8) Epoch 17, batch 2400, train_loss[loss=3.917, NarTop10Accuracy=0.5334, over 5169.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6151, over 5880.46 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 10:36:14,105 INFO [trainer.py:765] (5/8) Epoch 17, batch 2500, train_loss[loss=3.96, NarTop10Accuracy=0.5172, over 5018.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6189, over 5552.20 frames. ], batch size: 6, lr: 4.98e-03 +2024-08-06 10:36:35,383 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 10:37:32,051 INFO [trainer.py:765] (5/8) Epoch 18, batch 100, train_loss[loss=3.322, NarTop10Accuracy=0.6592, over 7300.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6331, over 2367.51 frames. ], batch size: 31, lr: 4.83e-03 +2024-08-06 10:37:39,161 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 10:37:49,085 INFO [trainer.py:811] (5/8) Epoch 18, validation: loss=3.339, NarTop10Accuracy=0.6526, over 1907754.00 frames. +2024-08-06 10:37:49,086 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 29161MB +2024-08-06 10:37:49,685 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.841e+02 1.993e+02 2.161e+02 3.871e+02, threshold=3.985e+02, percent-clipped=0.0 +2024-08-06 10:38:18,144 INFO [trainer.py:765] (5/8) Epoch 18, batch 200, train_loss[loss=3.31, NarTop10Accuracy=0.6537, over 6811.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6344, over 3874.04 frames. ], batch size: 17, lr: 4.82e-03 +2024-08-06 10:38:50,198 INFO [trainer.py:765] (5/8) Epoch 18, batch 300, train_loss[loss=3.415, NarTop10Accuracy=0.6349, over 7318.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.634, over 4679.20 frames. ], batch size: 23, lr: 4.81e-03 +2024-08-06 10:39:23,743 INFO [trainer.py:765] (5/8) Epoch 18, batch 400, train_loss[loss=3.459, NarTop10Accuracy=0.6151, over 5173.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.634, over 5142.59 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 10:39:54,103 INFO [trainer.py:765] (5/8) Epoch 18, batch 500, train_loss[loss=3.309, NarTop10Accuracy=0.6565, over 6076.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.636, over 5420.18 frames. ], batch size: 11, lr: 4.80e-03 +2024-08-06 10:40:28,526 INFO [trainer.py:765] (5/8) Epoch 18, batch 600, train_loss[loss=3.475, NarTop10Accuracy=0.6247, over 5825.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6346, over 5676.02 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 10:41:02,143 INFO [trainer.py:765] (5/8) Epoch 18, batch 700, train_loss[loss=3.109, NarTop10Accuracy=0.6983, over 5115.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6318, over 5744.76 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:41:38,519 INFO [trainer.py:765] (5/8) Epoch 18, batch 800, train_loss[loss=3.627, NarTop10Accuracy=0.5898, over 5126.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6329, over 5806.69 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:42:12,611 INFO [trainer.py:765] (5/8) Epoch 18, batch 900, train_loss[loss=3.497, NarTop10Accuracy=0.62, over 6372.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6316, over 5806.25 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:42:46,702 INFO [trainer.py:765] (5/8) Epoch 18, batch 1000, train_loss[loss=3.401, NarTop10Accuracy=0.6515, over 6320.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6302, over 5916.66 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:43:24,183 INFO [trainer.py:765] (5/8) Epoch 18, batch 1100, train_loss[loss=3.652, NarTop10Accuracy=0.5674, over 6770.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6266, over 5943.66 frames. ], batch size: 17, lr: 4.77e-03 +2024-08-06 10:44:02,363 INFO [trainer.py:765] (5/8) Epoch 18, batch 1200, train_loss[loss=3.544, NarTop10Accuracy=0.6126, over 7400.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6261, over 5952.60 frames. ], batch size: 30, lr: 4.77e-03 +2024-08-06 10:44:35,919 INFO [trainer.py:765] (5/8) Epoch 18, batch 1300, train_loss[loss=3.116, NarTop10Accuracy=0.6974, over 5108.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6284, over 6019.59 frames. ], batch size: 6, lr: 4.76e-03 +2024-08-06 10:45:10,237 INFO [trainer.py:765] (5/8) Epoch 18, batch 1400, train_loss[loss=3.341, NarTop10Accuracy=0.6522, over 6127.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6249, over 6044.17 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 10:45:40,976 INFO [trainer.py:765] (5/8) Epoch 18, batch 1500, train_loss[loss=3.833, NarTop10Accuracy=0.5467, over 6222.00 frames. ], tot_loss[loss=3.465, NarTop10Accuracy=0.6244, over 5972.04 frames. ], batch size: 49, lr: 4.75e-03 +2024-08-06 10:46:09,055 INFO [trainer.py:765] (5/8) Epoch 18, batch 1600, train_loss[loss=3.573, NarTop10Accuracy=0.593, over 7225.00 frames. ], tot_loss[loss=3.466, NarTop10Accuracy=0.624, over 5946.42 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 10:46:35,859 INFO [trainer.py:765] (5/8) Epoch 18, batch 1700, train_loss[loss=3.713, NarTop10Accuracy=0.5732, over 6250.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6261, over 5941.00 frames. ], batch size: 13, lr: 4.74e-03 +2024-08-06 10:47:02,438 INFO [trainer.py:765] (5/8) Epoch 18, batch 1800, train_loss[loss=3.509, NarTop10Accuracy=0.6202, over 7092.00 frames. ], tot_loss[loss=3.465, NarTop10Accuracy=0.6245, over 6004.30 frames. ], batch size: 22, lr: 4.74e-03 +2024-08-06 10:47:29,093 INFO [trainer.py:765] (5/8) Epoch 18, batch 1900, train_loss[loss=3.806, NarTop10Accuracy=0.5605, over 5753.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6228, over 6042.90 frames. ], batch size: 49, lr: 4.73e-03 +2024-08-06 10:47:54,884 INFO [trainer.py:765] (5/8) Epoch 18, batch 2000, train_loss[loss=3.459, NarTop10Accuracy=0.63, over 6669.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.6238, over 6028.18 frames. ], batch size: 49, lr: 4.73e-03 +2024-08-06 10:48:20,370 INFO [trainer.py:765] (5/8) Epoch 18, batch 2100, train_loss[loss=3.195, NarTop10Accuracy=0.6773, over 3931.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6252, over 5994.80 frames. ], batch size: 4, lr: 4.72e-03 +2024-08-06 10:48:24,747 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 10:48:35,039 INFO [trainer.py:811] (5/8) Epoch 18, validation: loss=3.307, NarTop10Accuracy=0.6593, over 1907754.00 frames. +2024-08-06 10:48:35,040 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 10:48:35,535 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 1.855e+02 2.003e+02 2.193e+02 3.481e+02, threshold=4.005e+02, percent-clipped=0.0 +2024-08-06 10:48:56,096 INFO [trainer.py:765] (5/8) Epoch 18, batch 2200, train_loss[loss=3.505, NarTop10Accuracy=0.6163, over 7327.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.625, over 6038.03 frames. ], batch size: 31, lr: 4.72e-03 +2024-08-06 10:49:21,520 INFO [trainer.py:765] (5/8) Epoch 18, batch 2300, train_loss[loss=3.211, NarTop10Accuracy=0.6741, over 5870.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6239, over 6068.63 frames. ], batch size: 9, lr: 4.71e-03 +2024-08-06 10:49:46,256 INFO [trainer.py:765] (5/8) Epoch 18, batch 2400, train_loss[loss=3.313, NarTop10Accuracy=0.6617, over 5116.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6232, over 5872.84 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 10:50:09,707 INFO [trainer.py:765] (5/8) Epoch 18, batch 2500, train_loss[loss=3.34, NarTop10Accuracy=0.6479, over 4991.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6274, over 5529.86 frames. ], batch size: 6, lr: 4.70e-03 +2024-08-06 10:50:30,988 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 10:51:33,564 INFO [trainer.py:765] (5/8) Epoch 19, batch 100, train_loss[loss=3.195, NarTop10Accuracy=0.6713, over 7375.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6337, over 2355.61 frames. ], batch size: 30, lr: 4.57e-03 +2024-08-06 10:52:06,164 INFO [trainer.py:765] (5/8) Epoch 19, batch 200, train_loss[loss=3.803, NarTop10Accuracy=0.5559, over 6854.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6354, over 3860.15 frames. ], batch size: 17, lr: 4.56e-03 +2024-08-06 10:52:40,031 INFO [trainer.py:765] (5/8) Epoch 19, batch 300, train_loss[loss=3.468, NarTop10Accuracy=0.6219, over 7164.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6353, over 4665.20 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 10:53:12,829 INFO [trainer.py:765] (5/8) Epoch 19, batch 400, train_loss[loss=3.307, NarTop10Accuracy=0.6524, over 5109.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6361, over 5111.30 frames. ], batch size: 7, lr: 4.55e-03 +2024-08-06 10:53:45,020 INFO [trainer.py:765] (5/8) Epoch 19, batch 500, train_loss[loss=3.249, NarTop10Accuracy=0.6689, over 6055.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6356, over 5382.79 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 10:54:18,601 INFO [trainer.py:765] (5/8) Epoch 19, batch 600, train_loss[loss=3.23, NarTop10Accuracy=0.6602, over 5715.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6352, over 5661.25 frames. ], batch size: 9, lr: 4.54e-03 +2024-08-06 10:54:54,112 INFO [trainer.py:765] (5/8) Epoch 19, batch 700, train_loss[loss=3.557, NarTop10Accuracy=0.6051, over 5166.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6335, over 5730.03 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 10:55:29,925 INFO [trainer.py:765] (5/8) Epoch 19, batch 800, train_loss[loss=3.448, NarTop10Accuracy=0.6499, over 5080.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6343, over 5759.85 frames. ], batch size: 6, lr: 4.53e-03 +2024-08-06 10:56:02,238 INFO [trainer.py:765] (5/8) Epoch 19, batch 900, train_loss[loss=3.365, NarTop10Accuracy=0.6365, over 6718.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6331, over 5806.93 frames. ], batch size: 14, lr: 4.53e-03 +2024-08-06 10:56:38,299 INFO [trainer.py:765] (5/8) Epoch 19, batch 1000, train_loss[loss=3.285, NarTop10Accuracy=0.6568, over 6676.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6309, over 5927.70 frames. ], batch size: 14, lr: 4.52e-03 +2024-08-06 10:57:15,187 INFO [trainer.py:765] (5/8) Epoch 19, batch 1100, train_loss[loss=3.172, NarTop10Accuracy=0.6731, over 6832.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6278, over 5960.22 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 10:57:46,665 INFO [trainer.py:765] (5/8) Epoch 19, batch 1200, train_loss[loss=3.528, NarTop10Accuracy=0.6116, over 7103.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6277, over 5964.56 frames. ], batch size: 30, lr: 4.51e-03 +2024-08-06 10:58:23,900 INFO [trainer.py:765] (5/8) Epoch 19, batch 1300, train_loss[loss=3.141, NarTop10Accuracy=0.6898, over 5148.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6277, over 6022.06 frames. ], batch size: 6, lr: 4.51e-03 +2024-08-06 10:58:58,028 INFO [trainer.py:765] (5/8) Epoch 19, batch 1400, train_loss[loss=3.206, NarTop10Accuracy=0.6813, over 6165.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6275, over 6041.60 frames. ], batch size: 11, lr: 4.50e-03 +2024-08-06 10:59:30,770 INFO [trainer.py:765] (5/8) Epoch 19, batch 1500, train_loss[loss=3.843, NarTop10Accuracy=0.5551, over 6276.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6265, over 5974.04 frames. ], batch size: 49, lr: 4.50e-03 +2024-08-06 10:59:40,831 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 10:59:50,899 INFO [trainer.py:811] (5/8) Epoch 19, validation: loss=3.276, NarTop10Accuracy=0.6653, over 1907754.00 frames. +2024-08-06 10:59:50,899 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 10:59:51,426 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.829e+02 1.984e+02 2.176e+02 3.542e+02, threshold=3.967e+02, percent-clipped=0.0 +2024-08-06 11:00:08,816 INFO [trainer.py:765] (5/8) Epoch 19, batch 1600, train_loss[loss=3.562, NarTop10Accuracy=0.6003, over 7079.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6273, over 5958.56 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:00:35,588 INFO [trainer.py:765] (5/8) Epoch 19, batch 1700, train_loss[loss=3.94, NarTop10Accuracy=0.5333, over 6264.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6258, over 5948.24 frames. ], batch size: 13, lr: 4.49e-03 +2024-08-06 11:01:02,256 INFO [trainer.py:765] (5/8) Epoch 19, batch 1800, train_loss[loss=3.393, NarTop10Accuracy=0.6401, over 7298.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6276, over 6012.94 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:01:28,930 INFO [trainer.py:765] (5/8) Epoch 19, batch 1900, train_loss[loss=3.441, NarTop10Accuracy=0.6351, over 5209.00 frames. ], tot_loss[loss=3.467, NarTop10Accuracy=0.6246, over 6059.64 frames. ], batch size: 49, lr: 4.48e-03 +2024-08-06 11:01:54,632 INFO [trainer.py:765] (5/8) Epoch 19, batch 2000, train_loss[loss=3.681, NarTop10Accuracy=0.5852, over 6534.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6277, over 6023.32 frames. ], batch size: 50, lr: 4.48e-03 +2024-08-06 11:02:20,186 INFO [trainer.py:765] (5/8) Epoch 19, batch 2100, train_loss[loss=3.337, NarTop10Accuracy=0.6382, over 3841.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6269, over 6006.52 frames. ], batch size: 4, lr: 4.47e-03 +2024-08-06 11:02:45,694 INFO [trainer.py:765] (5/8) Epoch 19, batch 2200, train_loss[loss=3.545, NarTop10Accuracy=0.6118, over 7046.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6255, over 6035.38 frames. ], batch size: 30, lr: 4.47e-03 +2024-08-06 11:03:11,130 INFO [trainer.py:765] (5/8) Epoch 19, batch 2300, train_loss[loss=3.465, NarTop10Accuracy=0.6219, over 5739.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6258, over 6055.50 frames. ], batch size: 9, lr: 4.46e-03 +2024-08-06 11:03:35,950 INFO [trainer.py:765] (5/8) Epoch 19, batch 2400, train_loss[loss=3.389, NarTop10Accuracy=0.6465, over 5076.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6235, over 5873.81 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 11:03:59,405 INFO [trainer.py:765] (5/8) Epoch 19, batch 2500, train_loss[loss=3.419, NarTop10Accuracy=0.6425, over 5026.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.63, over 5526.43 frames. ], batch size: 6, lr: 4.45e-03 +2024-08-06 11:04:24,031 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 11:05:26,560 INFO [trainer.py:765] (5/8) Epoch 20, batch 100, train_loss[loss=3.438, NarTop10Accuracy=0.6183, over 7431.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6389, over 2368.26 frames. ], batch size: 31, lr: 4.33e-03 +2024-08-06 11:05:57,408 INFO [trainer.py:765] (5/8) Epoch 20, batch 200, train_loss[loss=3.308, NarTop10Accuracy=0.6661, over 6856.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6409, over 3872.11 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 11:06:30,633 INFO [trainer.py:765] (5/8) Epoch 20, batch 300, train_loss[loss=3.258, NarTop10Accuracy=0.6633, over 7205.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6402, over 4666.57 frames. ], batch size: 22, lr: 4.32e-03 +2024-08-06 11:07:06,395 INFO [trainer.py:765] (5/8) Epoch 20, batch 400, train_loss[loss=3.375, NarTop10Accuracy=0.6498, over 5122.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.639, over 5130.21 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 11:07:38,165 INFO [trainer.py:765] (5/8) Epoch 20, batch 500, train_loss[loss=3.498, NarTop10Accuracy=0.6234, over 6203.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6386, over 5407.94 frames. ], batch size: 11, lr: 4.31e-03 +2024-08-06 11:08:11,567 INFO [trainer.py:765] (5/8) Epoch 20, batch 600, train_loss[loss=3.103, NarTop10Accuracy=0.6948, over 5872.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6384, over 5683.12 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 11:08:46,273 INFO [trainer.py:765] (5/8) Epoch 20, batch 700, train_loss[loss=3.272, NarTop10Accuracy=0.6633, over 5012.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6348, over 5740.83 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 11:09:23,425 INFO [trainer.py:765] (5/8) Epoch 20, batch 800, train_loss[loss=3.319, NarTop10Accuracy=0.6604, over 4972.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6335, over 5802.60 frames. ], batch size: 6, lr: 4.30e-03 +2024-08-06 11:09:53,512 INFO [trainer.py:765] (5/8) Epoch 20, batch 900, train_loss[loss=3.324, NarTop10Accuracy=0.6556, over 6310.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6322, over 5815.32 frames. ], batch size: 13, lr: 4.30e-03 +2024-08-06 11:10:12,198 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 11:10:23,738 INFO [trainer.py:811] (5/8) Epoch 20, validation: loss=3.279, NarTop10Accuracy=0.6658, over 1907754.00 frames. +2024-08-06 11:10:23,739 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 11:10:24,298 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.847e+02 2.007e+02 2.180e+02 4.417e+02, threshold=4.013e+02, percent-clipped=0.1 +2024-08-06 11:10:42,964 INFO [trainer.py:765] (5/8) Epoch 20, batch 1000, train_loss[loss=3.208, NarTop10Accuracy=0.6739, over 6311.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6326, over 5925.31 frames. ], batch size: 13, lr: 4.29e-03 +2024-08-06 11:11:21,021 INFO [trainer.py:765] (5/8) Epoch 20, batch 1100, train_loss[loss=3.293, NarTop10Accuracy=0.6535, over 6882.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6292, over 5947.85 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 11:11:55,393 INFO [trainer.py:765] (5/8) Epoch 20, batch 1200, train_loss[loss=3.335, NarTop10Accuracy=0.646, over 7208.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6293, over 5948.63 frames. ], batch size: 30, lr: 4.28e-03 +2024-08-06 11:12:30,751 INFO [trainer.py:765] (5/8) Epoch 20, batch 1300, train_loss[loss=3.502, NarTop10Accuracy=0.6173, over 5181.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6324, over 6023.55 frames. ], batch size: 6, lr: 4.28e-03 +2024-08-06 11:13:10,291 INFO [trainer.py:765] (5/8) Epoch 20, batch 1400, train_loss[loss=3.567, NarTop10Accuracy=0.6018, over 6085.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6297, over 6017.55 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 11:13:38,988 INFO [trainer.py:765] (5/8) Epoch 20, batch 1500, train_loss[loss=3.511, NarTop10Accuracy=0.6219, over 6294.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6296, over 5976.55 frames. ], batch size: 51, lr: 4.27e-03 +2024-08-06 11:14:07,050 INFO [trainer.py:765] (5/8) Epoch 20, batch 1600, train_loss[loss=3.301, NarTop10Accuracy=0.6547, over 7072.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6304, over 5957.62 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 11:14:33,910 INFO [trainer.py:765] (5/8) Epoch 20, batch 1700, train_loss[loss=3.712, NarTop10Accuracy=0.5678, over 6322.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6318, over 5924.36 frames. ], batch size: 13, lr: 4.26e-03 +2024-08-06 11:15:00,589 INFO [trainer.py:765] (5/8) Epoch 20, batch 1800, train_loss[loss=3.382, NarTop10Accuracy=0.6389, over 7126.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6305, over 6009.52 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 11:15:27,276 INFO [trainer.py:765] (5/8) Epoch 20, batch 1900, train_loss[loss=3.581, NarTop10Accuracy=0.6078, over 5513.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6276, over 6039.41 frames. ], batch size: 49, lr: 4.26e-03 +2024-08-06 11:15:56,437 INFO [trainer.py:765] (5/8) Epoch 20, batch 2000, train_loss[loss=3.491, NarTop10Accuracy=0.6215, over 6144.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6277, over 6012.19 frames. ], batch size: 49, lr: 4.25e-03 +2024-08-06 11:16:21,957 INFO [trainer.py:765] (5/8) Epoch 20, batch 2100, train_loss[loss=3.137, NarTop10Accuracy=0.6966, over 4784.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6285, over 5996.11 frames. ], batch size: 5, lr: 4.25e-03 +2024-08-06 11:16:47,405 INFO [trainer.py:765] (5/8) Epoch 20, batch 2200, train_loss[loss=3.383, NarTop10Accuracy=0.6412, over 7566.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6265, over 6043.28 frames. ], batch size: 31, lr: 4.24e-03 +2024-08-06 11:17:12,907 INFO [trainer.py:765] (5/8) Epoch 20, batch 2300, train_loss[loss=3.694, NarTop10Accuracy=0.5861, over 5780.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6236, over 6059.41 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 11:17:37,714 INFO [trainer.py:765] (5/8) Epoch 20, batch 2400, train_loss[loss=3.182, NarTop10Accuracy=0.6799, over 5089.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6256, over 5848.83 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 11:18:01,247 INFO [trainer.py:765] (5/8) Epoch 20, batch 2500, train_loss[loss=3.272, NarTop10Accuracy=0.6631, over 5164.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6324, over 5537.34 frames. ], batch size: 6, lr: 4.23e-03 +2024-08-06 11:18:21,838 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 11:19:21,459 INFO [trainer.py:765] (5/8) Epoch 21, batch 100, train_loss[loss=3.237, NarTop10Accuracy=0.6817, over 7531.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6381, over 2380.19 frames. ], batch size: 32, lr: 4.12e-03 +2024-08-06 11:19:56,522 INFO [trainer.py:765] (5/8) Epoch 21, batch 200, train_loss[loss=3.381, NarTop10Accuracy=0.6361, over 6904.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6362, over 3868.18 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 11:20:26,597 INFO [trainer.py:765] (5/8) Epoch 21, batch 300, train_loss[loss=3.564, NarTop10Accuracy=0.6058, over 7275.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.641, over 4665.19 frames. ], batch size: 22, lr: 4.11e-03 +2024-08-06 11:20:54,241 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 11:21:04,970 INFO [trainer.py:811] (5/8) Epoch 21, validation: loss=3.291, NarTop10Accuracy=0.6625, over 1907754.00 frames. +2024-08-06 11:21:04,970 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 11:21:05,486 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 1.858e+02 2.007e+02 2.193e+02 3.729e+02, threshold=4.015e+02, percent-clipped=0.0 +2024-08-06 11:21:12,220 INFO [trainer.py:765] (5/8) Epoch 21, batch 400, train_loss[loss=3.542, NarTop10Accuracy=0.5987, over 5081.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6402, over 5107.22 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 11:21:47,569 INFO [trainer.py:765] (5/8) Epoch 21, batch 500, train_loss[loss=3.172, NarTop10Accuracy=0.689, over 6018.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6421, over 5394.66 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 11:22:18,238 INFO [trainer.py:765] (5/8) Epoch 21, batch 600, train_loss[loss=3.492, NarTop10Accuracy=0.6183, over 5800.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6415, over 5673.81 frames. ], batch size: 9, lr: 4.10e-03 +2024-08-06 11:22:56,843 INFO [trainer.py:765] (5/8) Epoch 21, batch 700, train_loss[loss=3.196, NarTop10Accuracy=0.6811, over 5057.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6383, over 5732.39 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 11:23:33,076 INFO [trainer.py:765] (5/8) Epoch 21, batch 800, train_loss[loss=3.279, NarTop10Accuracy=0.6452, over 4200.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6369, over 5772.01 frames. ], batch size: 5, lr: 4.09e-03 +2024-08-06 11:24:03,021 INFO [trainer.py:765] (5/8) Epoch 21, batch 900, train_loss[loss=3.532, NarTop10Accuracy=0.6013, over 6213.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6371, over 5815.61 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 11:24:37,089 INFO [trainer.py:765] (5/8) Epoch 21, batch 1000, train_loss[loss=3.455, NarTop10Accuracy=0.6297, over 6286.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6343, over 5914.95 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 11:25:16,428 INFO [trainer.py:765] (5/8) Epoch 21, batch 1100, train_loss[loss=3.43, NarTop10Accuracy=0.6321, over 6595.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6332, over 5951.02 frames. ], batch size: 17, lr: 4.08e-03 +2024-08-06 11:25:47,741 INFO [trainer.py:765] (5/8) Epoch 21, batch 1200, train_loss[loss=3.381, NarTop10Accuracy=0.6417, over 7302.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6349, over 5941.58 frames. ], batch size: 30, lr: 4.08e-03 +2024-08-06 11:26:23,056 INFO [trainer.py:765] (5/8) Epoch 21, batch 1300, train_loss[loss=3.641, NarTop10Accuracy=0.5954, over 4993.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6336, over 5996.71 frames. ], batch size: 6, lr: 4.07e-03 +2024-08-06 11:27:00,082 INFO [trainer.py:765] (5/8) Epoch 21, batch 1400, train_loss[loss=3.395, NarTop10Accuracy=0.6313, over 6252.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6307, over 6009.79 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 11:27:35,328 INFO [trainer.py:765] (5/8) Epoch 21, batch 1500, train_loss[loss=3.859, NarTop10Accuracy=0.5563, over 5859.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6303, over 5950.67 frames. ], batch size: 50, lr: 4.07e-03 +2024-08-06 11:28:03,315 INFO [trainer.py:765] (5/8) Epoch 21, batch 1600, train_loss[loss=3.355, NarTop10Accuracy=0.6397, over 6977.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6288, over 5930.31 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 11:28:30,105 INFO [trainer.py:765] (5/8) Epoch 21, batch 1700, train_loss[loss=3.515, NarTop10Accuracy=0.6122, over 6274.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6303, over 5924.96 frames. ], batch size: 13, lr: 4.06e-03 +2024-08-06 11:28:56,641 INFO [trainer.py:765] (5/8) Epoch 21, batch 1800, train_loss[loss=3.601, NarTop10Accuracy=0.5967, over 7102.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6282, over 5985.43 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 11:29:23,198 INFO [trainer.py:765] (5/8) Epoch 21, batch 1900, train_loss[loss=3.47, NarTop10Accuracy=0.6271, over 6131.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6279, over 6024.77 frames. ], batch size: 49, lr: 4.05e-03 +2024-08-06 11:29:49,028 INFO [trainer.py:765] (5/8) Epoch 21, batch 2000, train_loss[loss=3.553, NarTop10Accuracy=0.606, over 5778.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6287, over 5999.06 frames. ], batch size: 49, lr: 4.05e-03 +2024-08-06 11:30:14,529 INFO [trainer.py:765] (5/8) Epoch 21, batch 2100, train_loss[loss=3.274, NarTop10Accuracy=0.6662, over 3908.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6286, over 5990.52 frames. ], batch size: 4, lr: 4.04e-03 +2024-08-06 11:30:39,870 INFO [trainer.py:765] (5/8) Epoch 21, batch 2200, train_loss[loss=3.658, NarTop10Accuracy=0.5918, over 7257.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6278, over 6046.44 frames. ], batch size: 30, lr: 4.04e-03 +2024-08-06 11:31:05,472 INFO [trainer.py:765] (5/8) Epoch 21, batch 2300, train_loss[loss=3.424, NarTop10Accuracy=0.6325, over 5723.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6261, over 6072.94 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 11:31:23,874 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 11:31:34,439 INFO [trainer.py:811] (5/8) Epoch 21, validation: loss=3.272, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 11:31:34,439 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 11:31:34,937 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 1.892e+02 2.038e+02 2.210e+02 4.910e+02, threshold=4.076e+02, percent-clipped=0.1 +2024-08-06 11:31:40,752 INFO [trainer.py:765] (5/8) Epoch 21, batch 2400, train_loss[loss=3.292, NarTop10Accuracy=0.6633, over 5180.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6255, over 5886.43 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 11:32:04,056 INFO [trainer.py:765] (5/8) Epoch 21, batch 2500, train_loss[loss=3.298, NarTop10Accuracy=0.6575, over 5122.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6318, over 5543.65 frames. ], batch size: 6, lr: 4.03e-03 +2024-08-06 11:32:25,878 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 11:33:29,683 INFO [trainer.py:765] (5/8) Epoch 22, batch 100, train_loss[loss=3.612, NarTop10Accuracy=0.6013, over 7132.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6433, over 2371.39 frames. ], batch size: 31, lr: 3.93e-03 +2024-08-06 11:34:05,036 INFO [trainer.py:765] (5/8) Epoch 22, batch 200, train_loss[loss=3.361, NarTop10Accuracy=0.6484, over 6814.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6463, over 3870.28 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 11:34:37,619 INFO [trainer.py:765] (5/8) Epoch 22, batch 300, train_loss[loss=3.294, NarTop10Accuracy=0.664, over 7179.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6457, over 4681.73 frames. ], batch size: 22, lr: 3.92e-03 +2024-08-06 11:35:09,969 INFO [trainer.py:765] (5/8) Epoch 22, batch 400, train_loss[loss=3.095, NarTop10Accuracy=0.6861, over 5214.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6434, over 5136.48 frames. ], batch size: 7, lr: 3.92e-03 +2024-08-06 11:35:42,508 INFO [trainer.py:765] (5/8) Epoch 22, batch 500, train_loss[loss=3.371, NarTop10Accuracy=0.6302, over 5985.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6423, over 5394.92 frames. ], batch size: 11, lr: 3.91e-03 +2024-08-06 11:36:16,059 INFO [trainer.py:765] (5/8) Epoch 22, batch 600, train_loss[loss=3.139, NarTop10Accuracy=0.6897, over 5762.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6413, over 5662.57 frames. ], batch size: 9, lr: 3.91e-03 +2024-08-06 11:36:53,858 INFO [trainer.py:765] (5/8) Epoch 22, batch 700, train_loss[loss=3.289, NarTop10Accuracy=0.6545, over 5145.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6395, over 5717.91 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 11:37:28,480 INFO [trainer.py:765] (5/8) Epoch 22, batch 800, train_loss[loss=3.25, NarTop10Accuracy=0.6621, over 5221.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.637, over 5784.56 frames. ], batch size: 6, lr: 3.90e-03 +2024-08-06 11:38:03,950 INFO [trainer.py:765] (5/8) Epoch 22, batch 900, train_loss[loss=3.279, NarTop10Accuracy=0.6676, over 6299.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6369, over 5813.39 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 11:38:38,329 INFO [trainer.py:765] (5/8) Epoch 22, batch 1000, train_loss[loss=3.11, NarTop10Accuracy=0.687, over 6643.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6378, over 5921.24 frames. ], batch size: 14, lr: 3.90e-03 +2024-08-06 11:39:14,789 INFO [trainer.py:765] (5/8) Epoch 22, batch 1100, train_loss[loss=3.382, NarTop10Accuracy=0.6385, over 6878.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6362, over 5948.86 frames. ], batch size: 17, lr: 3.89e-03 +2024-08-06 11:39:48,523 INFO [trainer.py:765] (5/8) Epoch 22, batch 1200, train_loss[loss=3.337, NarTop10Accuracy=0.6432, over 7273.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6361, over 5936.40 frames. ], batch size: 31, lr: 3.89e-03 +2024-08-06 11:40:25,245 INFO [trainer.py:765] (5/8) Epoch 22, batch 1300, train_loss[loss=3.456, NarTop10Accuracy=0.6365, over 5001.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6347, over 6011.58 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 11:41:00,609 INFO [trainer.py:765] (5/8) Epoch 22, batch 1400, train_loss[loss=3.473, NarTop10Accuracy=0.6317, over 6077.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6339, over 6025.20 frames. ], batch size: 11, lr: 3.88e-03 +2024-08-06 11:41:31,584 INFO [trainer.py:765] (5/8) Epoch 22, batch 1500, train_loss[loss=3.686, NarTop10Accuracy=0.5831, over 6249.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.632, over 5949.24 frames. ], batch size: 49, lr: 3.88e-03 +2024-08-06 11:41:59,677 INFO [trainer.py:765] (5/8) Epoch 22, batch 1600, train_loss[loss=3.416, NarTop10Accuracy=0.6417, over 7247.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6316, over 5935.20 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 11:42:26,463 INFO [trainer.py:765] (5/8) Epoch 22, batch 1700, train_loss[loss=3.267, NarTop10Accuracy=0.6568, over 6192.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6307, over 5926.47 frames. ], batch size: 13, lr: 3.87e-03 +2024-08-06 11:42:50,723 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 11:43:00,818 INFO [trainer.py:811] (5/8) Epoch 22, validation: loss=3.305, NarTop10Accuracy=0.6597, over 1907754.00 frames. +2024-08-06 11:43:00,819 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 11:43:01,327 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.500e+02 1.900e+02 2.042e+02 2.234e+02 3.494e+02, threshold=4.085e+02, percent-clipped=0.0 +2024-08-06 11:43:03,219 INFO [trainer.py:765] (5/8) Epoch 22, batch 1800, train_loss[loss=3.478, NarTop10Accuracy=0.623, over 7123.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6311, over 5990.46 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 11:43:29,751 INFO [trainer.py:765] (5/8) Epoch 22, batch 1900, train_loss[loss=3.604, NarTop10Accuracy=0.5984, over 6071.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6315, over 6034.96 frames. ], batch size: 49, lr: 3.87e-03 +2024-08-06 11:43:55,485 INFO [trainer.py:765] (5/8) Epoch 22, batch 2000, train_loss[loss=3.687, NarTop10Accuracy=0.5838, over 5744.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6325, over 6010.95 frames. ], batch size: 51, lr: 3.86e-03 +2024-08-06 11:44:20,932 INFO [trainer.py:765] (5/8) Epoch 22, batch 2100, train_loss[loss=3.57, NarTop10Accuracy=0.607, over 4947.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6331, over 6023.35 frames. ], batch size: 5, lr: 3.86e-03 +2024-08-06 11:44:46,456 INFO [trainer.py:765] (5/8) Epoch 22, batch 2200, train_loss[loss=3.711, NarTop10Accuracy=0.5755, over 7147.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6313, over 6065.44 frames. ], batch size: 30, lr: 3.86e-03 +2024-08-06 11:45:11,882 INFO [trainer.py:765] (5/8) Epoch 22, batch 2300, train_loss[loss=3.184, NarTop10Accuracy=0.6695, over 5855.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6304, over 6084.15 frames. ], batch size: 9, lr: 3.85e-03 +2024-08-06 11:45:36,583 INFO [trainer.py:765] (5/8) Epoch 22, batch 2400, train_loss[loss=3.241, NarTop10Accuracy=0.6716, over 5244.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6279, over 5865.81 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 11:46:00,081 INFO [trainer.py:765] (5/8) Epoch 22, batch 2500, train_loss[loss=3.296, NarTop10Accuracy=0.6574, over 5001.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6334, over 5517.97 frames. ], batch size: 6, lr: 3.85e-03 +2024-08-06 11:46:21,616 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 11:47:20,476 INFO [trainer.py:765] (5/8) Epoch 23, batch 100, train_loss[loss=3.341, NarTop10Accuracy=0.6563, over 7059.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6446, over 2357.07 frames. ], batch size: 30, lr: 3.75e-03 +2024-08-06 11:47:52,035 INFO [trainer.py:765] (5/8) Epoch 23, batch 200, train_loss[loss=3.549, NarTop10Accuracy=0.6088, over 6844.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6494, over 3859.58 frames. ], batch size: 17, lr: 3.75e-03 +2024-08-06 11:48:33,922 INFO [trainer.py:765] (5/8) Epoch 23, batch 300, train_loss[loss=3.292, NarTop10Accuracy=0.6567, over 7035.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6449, over 4666.34 frames. ], batch size: 22, lr: 3.75e-03 +2024-08-06 11:49:06,656 INFO [trainer.py:765] (5/8) Epoch 23, batch 400, train_loss[loss=3.387, NarTop10Accuracy=0.6406, over 5046.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6437, over 5125.33 frames. ], batch size: 7, lr: 3.74e-03 +2024-08-06 11:49:37,619 INFO [trainer.py:765] (5/8) Epoch 23, batch 500, train_loss[loss=3.521, NarTop10Accuracy=0.6141, over 6126.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6445, over 5389.12 frames. ], batch size: 11, lr: 3.74e-03 +2024-08-06 11:50:06,740 INFO [trainer.py:765] (5/8) Epoch 23, batch 600, train_loss[loss=3.532, NarTop10Accuracy=0.6054, over 5784.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6431, over 5666.45 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 11:50:47,601 INFO [trainer.py:765] (5/8) Epoch 23, batch 700, train_loss[loss=3.329, NarTop10Accuracy=0.6577, over 5005.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6407, over 5763.15 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:21,344 INFO [trainer.py:765] (5/8) Epoch 23, batch 800, train_loss[loss=3.093, NarTop10Accuracy=0.6977, over 4989.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6419, over 5813.61 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:52,396 INFO [trainer.py:765] (5/8) Epoch 23, batch 900, train_loss[loss=3.456, NarTop10Accuracy=0.6328, over 6217.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6422, over 5835.35 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 11:52:33,918 INFO [trainer.py:765] (5/8) Epoch 23, batch 1000, train_loss[loss=3.086, NarTop10Accuracy=0.7026, over 6725.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6415, over 5947.38 frames. ], batch size: 14, lr: 3.73e-03 +2024-08-06 11:53:08,608 INFO [trainer.py:765] (5/8) Epoch 23, batch 1100, train_loss[loss=3.463, NarTop10Accuracy=0.6275, over 6805.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6373, over 5979.81 frames. ], batch size: 17, lr: 3.72e-03 +2024-08-06 11:53:40,339 INFO [trainer.py:765] (5/8) Epoch 23, batch 1200, train_loss[loss=3.356, NarTop10Accuracy=0.652, over 7155.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6368, over 5974.25 frames. ], batch size: 30, lr: 3.72e-03 +2024-08-06 11:53:42,824 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 11:53:53,935 INFO [trainer.py:811] (5/8) Epoch 23, validation: loss=3.236, NarTop10Accuracy=0.6739, over 1907754.00 frames. +2024-08-06 11:53:53,935 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 11:53:54,457 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.901e+02 2.047e+02 2.234e+02 4.368e+02, threshold=4.093e+02, percent-clipped=0.1 +2024-08-06 11:54:30,447 INFO [trainer.py:765] (5/8) Epoch 23, batch 1300, train_loss[loss=3.352, NarTop10Accuracy=0.6436, over 5028.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6365, over 6029.40 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 11:55:04,197 INFO [trainer.py:765] (5/8) Epoch 23, batch 1400, train_loss[loss=3.511, NarTop10Accuracy=0.6225, over 6266.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6364, over 6042.16 frames. ], batch size: 11, lr: 3.71e-03 +2024-08-06 11:55:35,398 INFO [trainer.py:765] (5/8) Epoch 23, batch 1500, train_loss[loss=3.648, NarTop10Accuracy=0.5926, over 5779.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6336, over 5961.66 frames. ], batch size: 49, lr: 3.71e-03 +2024-08-06 11:56:03,427 INFO [trainer.py:765] (5/8) Epoch 23, batch 1600, train_loss[loss=3.313, NarTop10Accuracy=0.653, over 6999.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6344, over 5947.52 frames. ], batch size: 22, lr: 3.71e-03 +2024-08-06 11:56:30,201 INFO [trainer.py:765] (5/8) Epoch 23, batch 1700, train_loss[loss=3.413, NarTop10Accuracy=0.6351, over 6177.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6307, over 5936.51 frames. ], batch size: 13, lr: 3.70e-03 +2024-08-06 11:56:56,968 INFO [trainer.py:765] (5/8) Epoch 23, batch 1800, train_loss[loss=3.269, NarTop10Accuracy=0.6671, over 7198.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6325, over 6009.79 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 11:57:23,596 INFO [trainer.py:765] (5/8) Epoch 23, batch 1900, train_loss[loss=3.591, NarTop10Accuracy=0.6053, over 6024.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6316, over 6057.29 frames. ], batch size: 49, lr: 3.70e-03 +2024-08-06 11:57:49,251 INFO [trainer.py:765] (5/8) Epoch 23, batch 2000, train_loss[loss=3.603, NarTop10Accuracy=0.5979, over 5751.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6317, over 6024.46 frames. ], batch size: 51, lr: 3.69e-03 +2024-08-06 11:58:14,770 INFO [trainer.py:765] (5/8) Epoch 23, batch 2100, train_loss[loss=3.598, NarTop10Accuracy=0.5924, over 3986.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6311, over 5993.07 frames. ], batch size: 4, lr: 3.69e-03 +2024-08-06 11:58:40,237 INFO [trainer.py:765] (5/8) Epoch 23, batch 2200, train_loss[loss=3.708, NarTop10Accuracy=0.5735, over 7410.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6337, over 6028.57 frames. ], batch size: 31, lr: 3.69e-03 +2024-08-06 11:59:08,916 INFO [trainer.py:765] (5/8) Epoch 23, batch 2300, train_loss[loss=3.149, NarTop10Accuracy=0.6709, over 5680.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6314, over 6071.64 frames. ], batch size: 9, lr: 3.68e-03 +2024-08-06 11:59:33,601 INFO [trainer.py:765] (5/8) Epoch 23, batch 2400, train_loss[loss=3.278, NarTop10Accuracy=0.6667, over 5154.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6286, over 5875.44 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 11:59:57,010 INFO [trainer.py:765] (5/8) Epoch 23, batch 2500, train_loss[loss=3.159, NarTop10Accuracy=0.6911, over 5083.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6336, over 5538.11 frames. ], batch size: 6, lr: 3.68e-03 +2024-08-06 12:00:17,856 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 12:01:22,111 INFO [trainer.py:765] (5/8) Epoch 24, batch 100, train_loss[loss=3.677, NarTop10Accuracy=0.5719, over 6896.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6428, over 2374.31 frames. ], batch size: 30, lr: 3.59e-03 +2024-08-06 12:01:51,342 INFO [trainer.py:765] (5/8) Epoch 24, batch 200, train_loss[loss=3.565, NarTop10Accuracy=0.6116, over 6861.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6475, over 3876.24 frames. ], batch size: 17, lr: 3.59e-03 +2024-08-06 12:02:23,512 INFO [trainer.py:765] (5/8) Epoch 24, batch 300, train_loss[loss=3.315, NarTop10Accuracy=0.6596, over 6900.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6461, over 4667.08 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 12:03:02,847 INFO [trainer.py:765] (5/8) Epoch 24, batch 400, train_loss[loss=3.223, NarTop10Accuracy=0.6747, over 5181.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6453, over 5114.47 frames. ], batch size: 7, lr: 3.59e-03 +2024-08-06 12:03:31,256 INFO [trainer.py:765] (5/8) Epoch 24, batch 500, train_loss[loss=3.096, NarTop10Accuracy=0.7114, over 6194.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6474, over 5402.30 frames. ], batch size: 11, lr: 3.58e-03 +2024-08-06 12:04:00,172 INFO [trainer.py:765] (5/8) Epoch 24, batch 600, train_loss[loss=3.395, NarTop10Accuracy=0.6338, over 5769.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6472, over 5660.56 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 12:04:12,531 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 12:04:22,775 INFO [trainer.py:811] (5/8) Epoch 24, validation: loss=3.282, NarTop10Accuracy=0.6644, over 1907754.00 frames. +2024-08-06 12:04:22,776 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 12:04:23,310 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 1.905e+02 2.071e+02 2.258e+02 3.709e+02, threshold=4.142e+02, percent-clipped=0.0 +2024-08-06 12:04:51,733 INFO [trainer.py:765] (5/8) Epoch 24, batch 700, train_loss[loss=3.031, NarTop10Accuracy=0.7008, over 5030.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6447, over 5743.66 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 12:05:21,274 INFO [trainer.py:765] (5/8) Epoch 24, batch 800, train_loss[loss=3.538, NarTop10Accuracy=0.6189, over 4961.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6419, over 5789.98 frames. ], batch size: 6, lr: 3.57e-03 +2024-08-06 12:05:51,754 INFO [trainer.py:765] (5/8) Epoch 24, batch 900, train_loss[loss=3.583, NarTop10Accuracy=0.5934, over 6605.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.641, over 5809.89 frames. ], batch size: 14, lr: 3.57e-03 +2024-08-06 12:06:32,812 INFO [trainer.py:765] (5/8) Epoch 24, batch 1000, train_loss[loss=3.385, NarTop10Accuracy=0.6471, over 6647.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6386, over 5924.69 frames. ], batch size: 14, lr: 3.57e-03 +2024-08-06 12:07:09,040 INFO [trainer.py:765] (5/8) Epoch 24, batch 1100, train_loss[loss=3.156, NarTop10Accuracy=0.6781, over 7061.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6384, over 5972.76 frames. ], batch size: 17, lr: 3.56e-03 +2024-08-06 12:07:38,135 INFO [trainer.py:765] (5/8) Epoch 24, batch 1200, train_loss[loss=3.419, NarTop10Accuracy=0.6355, over 7084.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6364, over 5974.43 frames. ], batch size: 30, lr: 3.56e-03 +2024-08-06 12:08:20,731 INFO [trainer.py:765] (5/8) Epoch 24, batch 1300, train_loss[loss=3.278, NarTop10Accuracy=0.6584, over 5033.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6372, over 6035.83 frames. ], batch size: 6, lr: 3.56e-03 +2024-08-06 12:08:56,066 INFO [trainer.py:765] (5/8) Epoch 24, batch 1400, train_loss[loss=3.274, NarTop10Accuracy=0.6591, over 6187.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6344, over 6048.64 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 12:09:24,338 INFO [trainer.py:765] (5/8) Epoch 24, batch 1500, train_loss[loss=3.611, NarTop10Accuracy=0.6024, over 5926.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6332, over 5972.10 frames. ], batch size: 49, lr: 3.55e-03 +2024-08-06 12:09:52,524 INFO [trainer.py:765] (5/8) Epoch 24, batch 1600, train_loss[loss=3.44, NarTop10Accuracy=0.6318, over 7146.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6331, over 5954.33 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 12:10:22,546 INFO [trainer.py:765] (5/8) Epoch 24, batch 1700, train_loss[loss=3.447, NarTop10Accuracy=0.6233, over 6375.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6316, over 5935.22 frames. ], batch size: 13, lr: 3.55e-03 +2024-08-06 12:10:49,273 INFO [trainer.py:765] (5/8) Epoch 24, batch 1800, train_loss[loss=3.234, NarTop10Accuracy=0.6728, over 7281.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6328, over 6006.40 frames. ], batch size: 22, lr: 3.54e-03 +2024-08-06 12:11:15,847 INFO [trainer.py:765] (5/8) Epoch 24, batch 1900, train_loss[loss=3.498, NarTop10Accuracy=0.6296, over 5968.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.633, over 6048.21 frames. ], batch size: 48, lr: 3.54e-03 +2024-08-06 12:11:41,666 INFO [trainer.py:765] (5/8) Epoch 24, batch 2000, train_loss[loss=3.426, NarTop10Accuracy=0.6389, over 5858.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6319, over 6018.64 frames. ], batch size: 49, lr: 3.54e-03 +2024-08-06 12:12:07,103 INFO [trainer.py:765] (5/8) Epoch 24, batch 2100, train_loss[loss=3.171, NarTop10Accuracy=0.6651, over 3874.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.633, over 6001.52 frames. ], batch size: 4, lr: 3.54e-03 +2024-08-06 12:12:33,372 INFO [trainer.py:765] (5/8) Epoch 24, batch 2200, train_loss[loss=3.58, NarTop10Accuracy=0.6031, over 7217.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6335, over 6026.57 frames. ], batch size: 30, lr: 3.53e-03 +2024-08-06 12:12:58,772 INFO [trainer.py:765] (5/8) Epoch 24, batch 2300, train_loss[loss=3.766, NarTop10Accuracy=0.5591, over 5806.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6304, over 6052.30 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 12:13:23,487 INFO [trainer.py:765] (5/8) Epoch 24, batch 2400, train_loss[loss=3.377, NarTop10Accuracy=0.6447, over 5783.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6292, over 5858.29 frames. ], batch size: 8, lr: 3.53e-03 +2024-08-06 12:13:47,005 INFO [trainer.py:765] (5/8) Epoch 24, batch 2500, train_loss[loss=3.515, NarTop10Accuracy=0.6218, over 5026.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6354, over 5512.62 frames. ], batch size: 6, lr: 3.52e-03 +2024-08-06 12:14:08,522 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 12:14:50,195 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 12:15:00,657 INFO [trainer.py:811] (5/8) Epoch 25, validation: loss=3.279, NarTop10Accuracy=0.6656, over 1907754.00 frames. +2024-08-06 12:15:00,658 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 12:15:01,363 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.921e+02 2.068e+02 2.276e+02 6.228e+02, threshold=4.136e+02, percent-clipped=0.3 +2024-08-06 12:15:17,917 INFO [trainer.py:765] (5/8) Epoch 25, batch 100, train_loss[loss=3.193, NarTop10Accuracy=0.6767, over 7139.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6509, over 2368.57 frames. ], batch size: 30, lr: 3.45e-03 +2024-08-06 12:15:53,499 INFO [trainer.py:765] (5/8) Epoch 25, batch 200, train_loss[loss=3.267, NarTop10Accuracy=0.6677, over 6850.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6493, over 3859.86 frames. ], batch size: 17, lr: 3.44e-03 +2024-08-06 12:16:23,595 INFO [trainer.py:765] (5/8) Epoch 25, batch 300, train_loss[loss=3.45, NarTop10Accuracy=0.6368, over 7143.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.648, over 4657.72 frames. ], batch size: 22, lr: 3.44e-03 +2024-08-06 12:16:59,163 INFO [trainer.py:765] (5/8) Epoch 25, batch 400, train_loss[loss=3.469, NarTop10Accuracy=0.6332, over 5127.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6462, over 5109.68 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 12:17:32,096 INFO [trainer.py:765] (5/8) Epoch 25, batch 500, train_loss[loss=3.219, NarTop10Accuracy=0.6915, over 6144.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6489, over 5395.89 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 12:18:05,180 INFO [trainer.py:765] (5/8) Epoch 25, batch 600, train_loss[loss=3.285, NarTop10Accuracy=0.6575, over 5716.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6466, over 5667.32 frames. ], batch size: 9, lr: 3.43e-03 +2024-08-06 12:18:39,597 INFO [trainer.py:765] (5/8) Epoch 25, batch 700, train_loss[loss=3.179, NarTop10Accuracy=0.6932, over 5095.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6442, over 5748.00 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 12:19:16,014 INFO [trainer.py:765] (5/8) Epoch 25, batch 800, train_loss[loss=3.182, NarTop10Accuracy=0.6765, over 4210.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6424, over 5800.20 frames. ], batch size: 5, lr: 3.43e-03 +2024-08-06 12:19:49,558 INFO [trainer.py:765] (5/8) Epoch 25, batch 900, train_loss[loss=3.214, NarTop10Accuracy=0.6687, over 6222.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6431, over 5836.97 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 12:20:23,876 INFO [trainer.py:765] (5/8) Epoch 25, batch 1000, train_loss[loss=3.413, NarTop10Accuracy=0.629, over 6296.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6418, over 5938.50 frames. ], batch size: 13, lr: 3.42e-03 +2024-08-06 12:21:01,915 INFO [trainer.py:765] (5/8) Epoch 25, batch 1100, train_loss[loss=3.372, NarTop10Accuracy=0.6428, over 7078.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6373, over 5968.80 frames. ], batch size: 18, lr: 3.42e-03 +2024-08-06 12:21:40,637 INFO [trainer.py:765] (5/8) Epoch 25, batch 1200, train_loss[loss=3.466, NarTop10Accuracy=0.6205, over 7115.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6404, over 5961.83 frames. ], batch size: 30, lr: 3.42e-03 +2024-08-06 12:22:11,837 INFO [trainer.py:765] (5/8) Epoch 25, batch 1300, train_loss[loss=3.286, NarTop10Accuracy=0.6434, over 5043.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.64, over 6027.64 frames. ], batch size: 6, lr: 3.41e-03 +2024-08-06 12:22:48,550 INFO [trainer.py:765] (5/8) Epoch 25, batch 1400, train_loss[loss=3.657, NarTop10Accuracy=0.5733, over 6234.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6401, over 6042.40 frames. ], batch size: 11, lr: 3.41e-03 +2024-08-06 12:23:21,655 INFO [trainer.py:765] (5/8) Epoch 25, batch 1500, train_loss[loss=3.801, NarTop10Accuracy=0.555, over 6021.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6392, over 5963.01 frames. ], batch size: 49, lr: 3.41e-03 +2024-08-06 12:23:49,716 INFO [trainer.py:765] (5/8) Epoch 25, batch 1600, train_loss[loss=3.226, NarTop10Accuracy=0.6736, over 7063.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6376, over 5941.31 frames. ], batch size: 22, lr: 3.41e-03 +2024-08-06 12:24:16,372 INFO [trainer.py:765] (5/8) Epoch 25, batch 1700, train_loss[loss=3.835, NarTop10Accuracy=0.5589, over 6187.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6337, over 5923.43 frames. ], batch size: 13, lr: 3.40e-03 +2024-08-06 12:24:43,092 INFO [trainer.py:765] (5/8) Epoch 25, batch 1800, train_loss[loss=3.331, NarTop10Accuracy=0.6418, over 7108.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6336, over 5994.98 frames. ], batch size: 22, lr: 3.40e-03 +2024-08-06 12:25:09,775 INFO [trainer.py:765] (5/8) Epoch 25, batch 1900, train_loss[loss=3.556, NarTop10Accuracy=0.6061, over 6229.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6311, over 6032.66 frames. ], batch size: 49, lr: 3.40e-03 +2024-08-06 12:25:35,710 INFO [trainer.py:765] (5/8) Epoch 25, batch 2000, train_loss[loss=3.666, NarTop10Accuracy=0.585, over 6251.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6345, over 6010.67 frames. ], batch size: 49, lr: 3.40e-03 +2024-08-06 12:25:47,853 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 12:25:58,846 INFO [trainer.py:811] (5/8) Epoch 25, validation: loss=3.265, NarTop10Accuracy=0.667, over 1907754.00 frames. +2024-08-06 12:25:58,847 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 12:25:59,344 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 1.947e+02 2.092e+02 2.280e+02 8.190e+02, threshold=4.185e+02, percent-clipped=0.2 +2024-08-06 12:26:12,224 INFO [trainer.py:765] (5/8) Epoch 25, batch 2100, train_loss[loss=3.011, NarTop10Accuracy=0.7031, over 3898.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6345, over 5994.14 frames. ], batch size: 4, lr: 3.39e-03 +2024-08-06 12:26:37,833 INFO [trainer.py:765] (5/8) Epoch 25, batch 2200, train_loss[loss=3.53, NarTop10Accuracy=0.6132, over 7477.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6349, over 6029.28 frames. ], batch size: 31, lr: 3.39e-03 +2024-08-06 12:27:03,344 INFO [trainer.py:765] (5/8) Epoch 25, batch 2300, train_loss[loss=3.465, NarTop10Accuracy=0.6233, over 5824.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.634, over 6060.76 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 12:27:28,150 INFO [trainer.py:765] (5/8) Epoch 25, batch 2400, train_loss[loss=3.34, NarTop10Accuracy=0.6466, over 5321.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6323, over 5875.73 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 12:27:51,732 INFO [trainer.py:765] (5/8) Epoch 25, batch 2500, train_loss[loss=2.998, NarTop10Accuracy=0.6999, over 5042.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6387, over 5531.50 frames. ], batch size: 6, lr: 3.38e-03 +2024-08-06 12:28:12,944 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 12:29:08,881 INFO [trainer.py:765] (5/8) Epoch 26, batch 100, train_loss[loss=3.44, NarTop10Accuracy=0.6284, over 7401.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6469, over 2378.03 frames. ], batch size: 31, lr: 3.31e-03 +2024-08-06 12:29:44,318 INFO [trainer.py:765] (5/8) Epoch 26, batch 200, train_loss[loss=3.248, NarTop10Accuracy=0.672, over 6910.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.648, over 3862.59 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 12:30:19,754 INFO [trainer.py:765] (5/8) Epoch 26, batch 300, train_loss[loss=3.459, NarTop10Accuracy=0.6317, over 6981.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.646, over 4677.90 frames. ], batch size: 21, lr: 3.31e-03 +2024-08-06 12:30:52,509 INFO [trainer.py:765] (5/8) Epoch 26, batch 400, train_loss[loss=3.353, NarTop10Accuracy=0.6587, over 5154.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6464, over 5140.85 frames. ], batch size: 7, lr: 3.30e-03 +2024-08-06 12:31:26,531 INFO [trainer.py:765] (5/8) Epoch 26, batch 500, train_loss[loss=3.291, NarTop10Accuracy=0.6548, over 6159.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6488, over 5416.28 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 12:31:59,782 INFO [trainer.py:765] (5/8) Epoch 26, batch 600, train_loss[loss=3.432, NarTop10Accuracy=0.6224, over 5774.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6464, over 5696.57 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 12:32:36,966 INFO [trainer.py:765] (5/8) Epoch 26, batch 700, train_loss[loss=3.367, NarTop10Accuracy=0.6357, over 5039.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6446, over 5755.34 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 12:33:10,809 INFO [trainer.py:765] (5/8) Epoch 26, batch 800, train_loss[loss=3.3, NarTop10Accuracy=0.6537, over 5023.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.645, over 5802.54 frames. ], batch size: 6, lr: 3.29e-03 +2024-08-06 12:33:46,257 INFO [trainer.py:765] (5/8) Epoch 26, batch 900, train_loss[loss=3.507, NarTop10Accuracy=0.6166, over 6222.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6424, over 5823.80 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 12:34:22,902 INFO [trainer.py:765] (5/8) Epoch 26, batch 1000, train_loss[loss=3.299, NarTop10Accuracy=0.6584, over 6793.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6427, over 5908.43 frames. ], batch size: 14, lr: 3.29e-03 +2024-08-06 12:34:57,798 INFO [trainer.py:765] (5/8) Epoch 26, batch 1100, train_loss[loss=3.342, NarTop10Accuracy=0.6476, over 6835.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.643, over 5964.82 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 12:35:31,893 INFO [trainer.py:765] (5/8) Epoch 26, batch 1200, train_loss[loss=3.298, NarTop10Accuracy=0.6588, over 7136.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6424, over 5960.97 frames. ], batch size: 30, lr: 3.28e-03 +2024-08-06 12:36:10,658 INFO [trainer.py:765] (5/8) Epoch 26, batch 1300, train_loss[loss=3.555, NarTop10Accuracy=0.6115, over 5244.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6423, over 6015.09 frames. ], batch size: 6, lr: 3.28e-03 +2024-08-06 12:36:44,564 INFO [trainer.py:765] (5/8) Epoch 26, batch 1400, train_loss[loss=3.336, NarTop10Accuracy=0.6519, over 6167.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6401, over 6022.17 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 12:37:03,593 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 12:37:13,567 INFO [trainer.py:811] (5/8) Epoch 26, validation: loss=3.231, NarTop10Accuracy=0.6753, over 1907754.00 frames. +2024-08-06 12:37:13,568 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 12:37:14,078 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.928e+02 2.102e+02 2.299e+02 4.602e+02, threshold=4.203e+02, percent-clipped=0.2 +2024-08-06 12:37:23,027 INFO [trainer.py:765] (5/8) Epoch 26, batch 1500, train_loss[loss=3.665, NarTop10Accuracy=0.5844, over 6136.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.64, over 5953.06 frames. ], batch size: 51, lr: 3.28e-03 +2024-08-06 12:37:51,060 INFO [trainer.py:765] (5/8) Epoch 26, batch 1600, train_loss[loss=3.323, NarTop10Accuracy=0.6556, over 7146.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6405, over 5931.23 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:38:17,853 INFO [trainer.py:765] (5/8) Epoch 26, batch 1700, train_loss[loss=3.294, NarTop10Accuracy=0.6378, over 6268.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6382, over 5929.04 frames. ], batch size: 13, lr: 3.27e-03 +2024-08-06 12:38:44,384 INFO [trainer.py:765] (5/8) Epoch 26, batch 1800, train_loss[loss=3.21, NarTop10Accuracy=0.6725, over 7110.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6385, over 6001.82 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:39:10,952 INFO [trainer.py:765] (5/8) Epoch 26, batch 1900, train_loss[loss=3.715, NarTop10Accuracy=0.5848, over 6211.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6362, over 6035.65 frames. ], batch size: 49, lr: 3.27e-03 +2024-08-06 12:39:36,610 INFO [trainer.py:765] (5/8) Epoch 26, batch 2000, train_loss[loss=3.438, NarTop10Accuracy=0.6356, over 6627.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6369, over 6011.56 frames. ], batch size: 50, lr: 3.26e-03 +2024-08-06 12:40:02,148 INFO [trainer.py:765] (5/8) Epoch 26, batch 2100, train_loss[loss=3.289, NarTop10Accuracy=0.6614, over 4852.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6387, over 6011.51 frames. ], batch size: 5, lr: 3.26e-03 +2024-08-06 12:40:27,759 INFO [trainer.py:765] (5/8) Epoch 26, batch 2200, train_loss[loss=3.493, NarTop10Accuracy=0.6134, over 7554.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6375, over 6047.01 frames. ], batch size: 31, lr: 3.26e-03 +2024-08-06 12:40:53,233 INFO [trainer.py:765] (5/8) Epoch 26, batch 2300, train_loss[loss=3.265, NarTop10Accuracy=0.6613, over 5612.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6356, over 6058.91 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 12:41:17,931 INFO [trainer.py:765] (5/8) Epoch 26, batch 2400, train_loss[loss=3.033, NarTop10Accuracy=0.7056, over 5267.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.635, over 5866.42 frames. ], batch size: 7, lr: 3.25e-03 +2024-08-06 12:41:44,478 INFO [trainer.py:765] (5/8) Epoch 26, batch 2500, train_loss[loss=3.303, NarTop10Accuracy=0.66, over 5094.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6388, over 5538.41 frames. ], batch size: 6, lr: 3.25e-03 +2024-08-06 12:42:05,606 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 12:43:12,531 INFO [trainer.py:765] (5/8) Epoch 27, batch 100, train_loss[loss=3.617, NarTop10Accuracy=0.5939, over 7227.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6475, over 2370.31 frames. ], batch size: 30, lr: 3.19e-03 +2024-08-06 12:43:43,574 INFO [trainer.py:765] (5/8) Epoch 27, batch 200, train_loss[loss=3.518, NarTop10Accuracy=0.6183, over 6837.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6501, over 3862.52 frames. ], batch size: 17, lr: 3.18e-03 +2024-08-06 12:44:13,784 INFO [trainer.py:765] (5/8) Epoch 27, batch 300, train_loss[loss=3.329, NarTop10Accuracy=0.6604, over 7301.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6502, over 4674.04 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 12:44:50,459 INFO [trainer.py:765] (5/8) Epoch 27, batch 400, train_loss[loss=3.083, NarTop10Accuracy=0.7099, over 5198.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6526, over 5122.51 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 12:45:20,668 INFO [trainer.py:765] (5/8) Epoch 27, batch 500, train_loss[loss=3.333, NarTop10Accuracy=0.6555, over 6138.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6506, over 5393.56 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 12:45:55,259 INFO [trainer.py:765] (5/8) Epoch 27, batch 600, train_loss[loss=3.323, NarTop10Accuracy=0.6511, over 5670.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6469, over 5667.93 frames. ], batch size: 9, lr: 3.17e-03 +2024-08-06 12:46:26,745 INFO [trainer.py:765] (5/8) Epoch 27, batch 700, train_loss[loss=3.479, NarTop10Accuracy=0.6169, over 5192.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6464, over 5734.83 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:05,014 INFO [trainer.py:765] (5/8) Epoch 27, batch 800, train_loss[loss=3.095, NarTop10Accuracy=0.6983, over 5072.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6441, over 5787.74 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:32,740 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 12:47:42,765 INFO [trainer.py:811] (5/8) Epoch 27, validation: loss=3.258, NarTop10Accuracy=0.6695, over 1907754.00 frames. +2024-08-06 12:47:42,766 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 12:47:43,335 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 1.939e+02 2.100e+02 2.298e+02 4.859e+02, threshold=4.201e+02, percent-clipped=0.2 +2024-08-06 12:47:47,259 INFO [trainer.py:765] (5/8) Epoch 27, batch 900, train_loss[loss=3.461, NarTop10Accuracy=0.6305, over 6648.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6445, over 5839.29 frames. ], batch size: 14, lr: 3.17e-03 +2024-08-06 12:48:22,863 INFO [trainer.py:765] (5/8) Epoch 27, batch 1000, train_loss[loss=3.206, NarTop10Accuracy=0.6878, over 6218.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6437, over 5939.69 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 12:48:58,084 INFO [trainer.py:765] (5/8) Epoch 27, batch 1100, train_loss[loss=3.735, NarTop10Accuracy=0.5706, over 6882.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6414, over 5973.61 frames. ], batch size: 17, lr: 3.16e-03 +2024-08-06 12:49:34,896 INFO [trainer.py:765] (5/8) Epoch 27, batch 1200, train_loss[loss=3.37, NarTop10Accuracy=0.6421, over 7016.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6412, over 5970.86 frames. ], batch size: 30, lr: 3.16e-03 +2024-08-06 12:50:06,242 INFO [trainer.py:765] (5/8) Epoch 27, batch 1300, train_loss[loss=2.885, NarTop10Accuracy=0.7248, over 5064.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6421, over 6038.40 frames. ], batch size: 6, lr: 3.16e-03 +2024-08-06 12:50:42,950 INFO [trainer.py:765] (5/8) Epoch 27, batch 1400, train_loss[loss=3.253, NarTop10Accuracy=0.6647, over 6113.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6396, over 6048.55 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 12:51:11,278 INFO [trainer.py:765] (5/8) Epoch 27, batch 1500, train_loss[loss=3.449, NarTop10Accuracy=0.6357, over 6235.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6402, over 5987.49 frames. ], batch size: 49, lr: 3.15e-03 +2024-08-06 12:51:39,352 INFO [trainer.py:765] (5/8) Epoch 27, batch 1600, train_loss[loss=3.238, NarTop10Accuracy=0.6719, over 7224.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6406, over 5979.80 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:52:06,062 INFO [trainer.py:765] (5/8) Epoch 27, batch 1700, train_loss[loss=3.544, NarTop10Accuracy=0.6145, over 6666.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6405, over 5950.11 frames. ], batch size: 14, lr: 3.15e-03 +2024-08-06 12:52:32,669 INFO [trainer.py:765] (5/8) Epoch 27, batch 1800, train_loss[loss=3.506, NarTop10Accuracy=0.6269, over 7088.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6418, over 6015.03 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:53:02,291 INFO [trainer.py:765] (5/8) Epoch 27, batch 1900, train_loss[loss=3.554, NarTop10Accuracy=0.6078, over 5775.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6388, over 6041.68 frames. ], batch size: 49, lr: 3.14e-03 +2024-08-06 12:53:27,998 INFO [trainer.py:765] (5/8) Epoch 27, batch 2000, train_loss[loss=3.429, NarTop10Accuracy=0.6362, over 6164.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6405, over 6005.67 frames. ], batch size: 48, lr: 3.14e-03 +2024-08-06 12:53:53,539 INFO [trainer.py:765] (5/8) Epoch 27, batch 2100, train_loss[loss=3.802, NarTop10Accuracy=0.5571, over 3842.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6396, over 5987.68 frames. ], batch size: 4, lr: 3.14e-03 +2024-08-06 12:54:18,997 INFO [trainer.py:765] (5/8) Epoch 27, batch 2200, train_loss[loss=3.353, NarTop10Accuracy=0.6514, over 7150.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.641, over 6027.29 frames. ], batch size: 30, lr: 3.14e-03 +2024-08-06 12:54:44,480 INFO [trainer.py:765] (5/8) Epoch 27, batch 2300, train_loss[loss=3.25, NarTop10Accuracy=0.664, over 5855.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6373, over 6053.16 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 12:55:09,218 INFO [trainer.py:765] (5/8) Epoch 27, batch 2400, train_loss[loss=3.086, NarTop10Accuracy=0.6776, over 5220.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6342, over 5852.28 frames. ], batch size: 7, lr: 3.13e-03 +2024-08-06 12:55:32,726 INFO [trainer.py:765] (5/8) Epoch 27, batch 2500, train_loss[loss=3.125, NarTop10Accuracy=0.6913, over 5051.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6412, over 5528.09 frames. ], batch size: 6, lr: 3.13e-03 +2024-08-06 12:55:53,639 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 12:56:46,803 INFO [trainer.py:765] (5/8) Epoch 28, batch 100, train_loss[loss=3.212, NarTop10Accuracy=0.6775, over 7278.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6539, over 2372.56 frames. ], batch size: 30, lr: 3.07e-03 +2024-08-06 12:57:23,204 INFO [trainer.py:765] (5/8) Epoch 28, batch 200, train_loss[loss=3.336, NarTop10Accuracy=0.6504, over 6756.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6497, over 3868.05 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 12:57:55,704 INFO [trainer.py:765] (5/8) Epoch 28, batch 300, train_loss[loss=3.395, NarTop10Accuracy=0.6434, over 7166.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6492, over 4677.62 frames. ], batch size: 22, lr: 3.07e-03 +2024-08-06 12:57:56,458 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 12:58:06,828 INFO [trainer.py:811] (5/8) Epoch 28, validation: loss=3.275, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 12:58:06,828 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 12:58:07,333 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 1.944e+02 2.106e+02 2.298e+02 4.786e+02, threshold=4.211e+02, percent-clipped=0.1 +2024-08-06 12:58:34,932 INFO [trainer.py:765] (5/8) Epoch 28, batch 400, train_loss[loss=3.641, NarTop10Accuracy=0.5906, over 5188.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6472, over 5112.80 frames. ], batch size: 7, lr: 3.06e-03 +2024-08-06 12:59:11,437 INFO [trainer.py:765] (5/8) Epoch 28, batch 500, train_loss[loss=3.304, NarTop10Accuracy=0.6798, over 6076.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6495, over 5402.59 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 12:59:44,488 INFO [trainer.py:765] (5/8) Epoch 28, batch 600, train_loss[loss=3.346, NarTop10Accuracy=0.6597, over 5997.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6494, over 5679.05 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 13:00:20,013 INFO [trainer.py:765] (5/8) Epoch 28, batch 700, train_loss[loss=3.125, NarTop10Accuracy=0.6848, over 4999.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6466, over 5740.46 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 13:00:56,433 INFO [trainer.py:765] (5/8) Epoch 28, batch 800, train_loss[loss=3.263, NarTop10Accuracy=0.6683, over 5016.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6467, over 5796.55 frames. ], batch size: 6, lr: 3.05e-03 +2024-08-06 13:01:31,042 INFO [trainer.py:765] (5/8) Epoch 28, batch 900, train_loss[loss=3.332, NarTop10Accuracy=0.6692, over 6259.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.645, over 5808.14 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 13:02:06,494 INFO [trainer.py:765] (5/8) Epoch 28, batch 1000, train_loss[loss=3.584, NarTop10Accuracy=0.5953, over 6212.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6429, over 5902.56 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 13:02:41,229 INFO [trainer.py:765] (5/8) Epoch 28, batch 1100, train_loss[loss=3.131, NarTop10Accuracy=0.6873, over 6960.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6435, over 5946.73 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 13:03:16,895 INFO [trainer.py:765] (5/8) Epoch 28, batch 1200, train_loss[loss=3.391, NarTop10Accuracy=0.6315, over 7310.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6414, over 5940.57 frames. ], batch size: 30, lr: 3.05e-03 +2024-08-06 13:03:54,154 INFO [trainer.py:765] (5/8) Epoch 28, batch 1300, train_loss[loss=3.305, NarTop10Accuracy=0.6678, over 5028.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6421, over 6006.38 frames. ], batch size: 6, lr: 3.04e-03 +2024-08-06 13:04:28,712 INFO [trainer.py:765] (5/8) Epoch 28, batch 1400, train_loss[loss=3.421, NarTop10Accuracy=0.6389, over 6106.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6412, over 6022.48 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 13:05:02,349 INFO [trainer.py:765] (5/8) Epoch 28, batch 1500, train_loss[loss=3.398, NarTop10Accuracy=0.6361, over 6196.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6422, over 5962.10 frames. ], batch size: 51, lr: 3.04e-03 +2024-08-06 13:05:30,370 INFO [trainer.py:765] (5/8) Epoch 28, batch 1600, train_loss[loss=3.682, NarTop10Accuracy=0.5826, over 7200.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6409, over 5944.95 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 13:05:57,130 INFO [trainer.py:765] (5/8) Epoch 28, batch 1700, train_loss[loss=3.459, NarTop10Accuracy=0.6282, over 6577.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6425, over 5951.45 frames. ], batch size: 14, lr: 3.04e-03 +2024-08-06 13:06:23,732 INFO [trainer.py:765] (5/8) Epoch 28, batch 1800, train_loss[loss=3.566, NarTop10Accuracy=0.5998, over 7221.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6427, over 6005.39 frames. ], batch size: 22, lr: 3.03e-03 +2024-08-06 13:06:50,373 INFO [trainer.py:765] (5/8) Epoch 28, batch 1900, train_loss[loss=3.645, NarTop10Accuracy=0.5974, over 5989.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6418, over 6030.81 frames. ], batch size: 49, lr: 3.03e-03 +2024-08-06 13:07:16,115 INFO [trainer.py:765] (5/8) Epoch 28, batch 2000, train_loss[loss=3.421, NarTop10Accuracy=0.638, over 6149.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6437, over 6005.10 frames. ], batch size: 49, lr: 3.03e-03 +2024-08-06 13:07:41,546 INFO [trainer.py:765] (5/8) Epoch 28, batch 2100, train_loss[loss=3.379, NarTop10Accuracy=0.6457, over 4851.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6401, over 5994.48 frames. ], batch size: 5, lr: 3.03e-03 +2024-08-06 13:08:06,931 INFO [trainer.py:765] (5/8) Epoch 28, batch 2200, train_loss[loss=3.531, NarTop10Accuracy=0.608, over 7339.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6391, over 6033.43 frames. ], batch size: 30, lr: 3.02e-03 +2024-08-06 13:08:32,387 INFO [trainer.py:765] (5/8) Epoch 28, batch 2300, train_loss[loss=3.381, NarTop10Accuracy=0.6472, over 5818.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6356, over 6059.95 frames. ], batch size: 9, lr: 3.02e-03 +2024-08-06 13:08:33,134 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 13:08:43,385 INFO [trainer.py:811] (5/8) Epoch 28, validation: loss=3.224, NarTop10Accuracy=0.676, over 1907754.00 frames. +2024-08-06 13:08:43,386 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 13:08:43,890 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 1.997e+02 2.131e+02 2.314e+02 6.875e+02, threshold=4.261e+02, percent-clipped=0.5 +2024-08-06 13:09:07,389 INFO [trainer.py:765] (5/8) Epoch 28, batch 2400, train_loss[loss=3.353, NarTop10Accuracy=0.6377, over 5211.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6355, over 5870.17 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 13:09:30,781 INFO [trainer.py:765] (5/8) Epoch 28, batch 2500, train_loss[loss=3.422, NarTop10Accuracy=0.6303, over 5172.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6424, over 5510.15 frames. ], batch size: 6, lr: 3.02e-03 +2024-08-06 13:09:51,698 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 13:10:48,192 INFO [trainer.py:765] (5/8) Epoch 29, batch 100, train_loss[loss=3.655, NarTop10Accuracy=0.5778, over 7184.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6525, over 2367.61 frames. ], batch size: 30, lr: 2.96e-03 +2024-08-06 13:11:20,840 INFO [trainer.py:765] (5/8) Epoch 29, batch 200, train_loss[loss=3.297, NarTop10Accuracy=0.6572, over 7004.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6516, over 3873.83 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 13:11:56,948 INFO [trainer.py:765] (5/8) Epoch 29, batch 300, train_loss[loss=3.183, NarTop10Accuracy=0.6844, over 7067.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6519, over 4685.67 frames. ], batch size: 22, lr: 2.96e-03 +2024-08-06 13:12:29,715 INFO [trainer.py:765] (5/8) Epoch 29, batch 400, train_loss[loss=3.181, NarTop10Accuracy=0.6828, over 5253.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6526, over 5144.05 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 13:12:59,920 INFO [trainer.py:765] (5/8) Epoch 29, batch 500, train_loss[loss=3.287, NarTop10Accuracy=0.6646, over 6149.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6506, over 5409.62 frames. ], batch size: 11, lr: 2.95e-03 +2024-08-06 13:13:33,546 INFO [trainer.py:765] (5/8) Epoch 29, batch 600, train_loss[loss=3.56, NarTop10Accuracy=0.6013, over 5913.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6498, over 5680.72 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 13:14:09,936 INFO [trainer.py:765] (5/8) Epoch 29, batch 700, train_loss[loss=3.669, NarTop10Accuracy=0.5976, over 5006.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6476, over 5747.35 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:14:46,676 INFO [trainer.py:765] (5/8) Epoch 29, batch 800, train_loss[loss=3.409, NarTop10Accuracy=0.6258, over 4984.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6432, over 5798.58 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:15:17,113 INFO [trainer.py:765] (5/8) Epoch 29, batch 900, train_loss[loss=3.282, NarTop10Accuracy=0.6646, over 6286.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6464, over 5813.48 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 13:15:59,362 INFO [trainer.py:765] (5/8) Epoch 29, batch 1000, train_loss[loss=3.52, NarTop10Accuracy=0.6112, over 6319.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6461, over 5926.73 frames. ], batch size: 13, lr: 2.94e-03 +2024-08-06 13:16:31,712 INFO [trainer.py:765] (5/8) Epoch 29, batch 1100, train_loss[loss=3.526, NarTop10Accuracy=0.6153, over 6772.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6435, over 5960.56 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 13:17:04,932 INFO [trainer.py:765] (5/8) Epoch 29, batch 1200, train_loss[loss=3.541, NarTop10Accuracy=0.6133, over 7001.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6426, over 5970.95 frames. ], batch size: 30, lr: 2.94e-03 +2024-08-06 13:17:43,955 INFO [trainer.py:765] (5/8) Epoch 29, batch 1300, train_loss[loss=3.346, NarTop10Accuracy=0.6593, over 5222.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6419, over 6030.09 frames. ], batch size: 6, lr: 2.94e-03 +2024-08-06 13:18:17,923 INFO [trainer.py:765] (5/8) Epoch 29, batch 1400, train_loss[loss=3.509, NarTop10Accuracy=0.6104, over 6063.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6408, over 6048.65 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 13:18:48,305 INFO [trainer.py:765] (5/8) Epoch 29, batch 1500, train_loss[loss=3.576, NarTop10Accuracy=0.5991, over 6231.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6425, over 5982.48 frames. ], batch size: 48, lr: 2.93e-03 +2024-08-06 13:19:16,408 INFO [trainer.py:765] (5/8) Epoch 29, batch 1600, train_loss[loss=3.156, NarTop10Accuracy=0.6805, over 7260.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6414, over 5953.32 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:19:43,241 INFO [trainer.py:765] (5/8) Epoch 29, batch 1700, train_loss[loss=3.195, NarTop10Accuracy=0.6875, over 6304.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6421, over 5925.81 frames. ], batch size: 13, lr: 2.93e-03 +2024-08-06 13:19:49,089 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 13:19:59,386 INFO [trainer.py:811] (5/8) Epoch 29, validation: loss=3.233, NarTop10Accuracy=0.6754, over 1907754.00 frames. +2024-08-06 13:19:59,387 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 13:19:59,902 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.964e+02 2.123e+02 2.299e+02 5.520e+02, threshold=4.246e+02, percent-clipped=0.2 +2024-08-06 13:20:20,106 INFO [trainer.py:765] (5/8) Epoch 29, batch 1800, train_loss[loss=3.175, NarTop10Accuracy=0.6705, over 7168.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6441, over 5997.31 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:20:46,843 INFO [trainer.py:765] (5/8) Epoch 29, batch 1900, train_loss[loss=3.472, NarTop10Accuracy=0.6283, over 5873.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6404, over 6047.18 frames. ], batch size: 49, lr: 2.93e-03 +2024-08-06 13:21:12,477 INFO [trainer.py:765] (5/8) Epoch 29, batch 2000, train_loss[loss=3.563, NarTop10Accuracy=0.6086, over 6309.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6402, over 6023.17 frames. ], batch size: 48, lr: 2.92e-03 +2024-08-06 13:21:37,981 INFO [trainer.py:765] (5/8) Epoch 29, batch 2100, train_loss[loss=3.067, NarTop10Accuracy=0.6904, over 3998.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6391, over 6003.75 frames. ], batch size: 4, lr: 2.92e-03 +2024-08-06 13:22:03,358 INFO [trainer.py:765] (5/8) Epoch 29, batch 2200, train_loss[loss=3.429, NarTop10Accuracy=0.6468, over 6943.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6376, over 6038.55 frames. ], batch size: 30, lr: 2.92e-03 +2024-08-06 13:22:28,830 INFO [trainer.py:765] (5/8) Epoch 29, batch 2300, train_loss[loss=3.43, NarTop10Accuracy=0.6367, over 5709.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6365, over 6061.86 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 13:22:53,619 INFO [trainer.py:765] (5/8) Epoch 29, batch 2400, train_loss[loss=3.507, NarTop10Accuracy=0.6135, over 5086.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6375, over 5867.72 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 13:23:16,978 INFO [trainer.py:765] (5/8) Epoch 29, batch 2500, train_loss[loss=3.274, NarTop10Accuracy=0.6582, over 4989.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6428, over 5520.24 frames. ], batch size: 6, lr: 2.91e-03 +2024-08-06 13:23:38,351 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 13:24:38,391 INFO [trainer.py:765] (5/8) Epoch 30, batch 100, train_loss[loss=3.288, NarTop10Accuracy=0.6586, over 7296.00 frames. ], tot_loss[loss=3.272, NarTop10Accuracy=0.665, over 2375.38 frames. ], batch size: 30, lr: 2.86e-03 +2024-08-06 13:25:14,782 INFO [trainer.py:765] (5/8) Epoch 30, batch 200, train_loss[loss=3.343, NarTop10Accuracy=0.657, over 6809.00 frames. ], tot_loss[loss=3.288, NarTop10Accuracy=0.6623, over 3880.99 frames. ], batch size: 17, lr: 2.86e-03 +2024-08-06 13:25:46,846 INFO [trainer.py:765] (5/8) Epoch 30, batch 300, train_loss[loss=3.293, NarTop10Accuracy=0.671, over 7151.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6572, over 4698.42 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 13:26:17,538 INFO [trainer.py:765] (5/8) Epoch 30, batch 400, train_loss[loss=3.236, NarTop10Accuracy=0.6585, over 5279.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6529, over 5147.25 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 13:26:53,919 INFO [trainer.py:765] (5/8) Epoch 30, batch 500, train_loss[loss=3.215, NarTop10Accuracy=0.6759, over 6275.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6552, over 5404.71 frames. ], batch size: 11, lr: 2.85e-03 +2024-08-06 13:27:25,422 INFO [trainer.py:765] (5/8) Epoch 30, batch 600, train_loss[loss=3.456, NarTop10Accuracy=0.6287, over 5675.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6534, over 5674.27 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 13:28:00,307 INFO [trainer.py:765] (5/8) Epoch 30, batch 700, train_loss[loss=3.212, NarTop10Accuracy=0.6705, over 4867.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6516, over 5729.51 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:28:37,477 INFO [trainer.py:765] (5/8) Epoch 30, batch 800, train_loss[loss=3.34, NarTop10Accuracy=0.6401, over 5184.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6485, over 5786.01 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:29:10,424 INFO [trainer.py:765] (5/8) Epoch 30, batch 900, train_loss[loss=3.684, NarTop10Accuracy=0.5847, over 6261.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6468, over 5806.67 frames. ], batch size: 13, lr: 2.85e-03 +2024-08-06 13:29:45,913 INFO [trainer.py:765] (5/8) Epoch 30, batch 1000, train_loss[loss=3.567, NarTop10Accuracy=0.6057, over 6135.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6453, over 5909.01 frames. ], batch size: 13, lr: 2.84e-03 +2024-08-06 13:30:24,171 INFO [trainer.py:765] (5/8) Epoch 30, batch 1100, train_loss[loss=3.357, NarTop10Accuracy=0.6519, over 6852.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.646, over 5943.04 frames. ], batch size: 17, lr: 2.84e-03 +2024-08-06 13:30:38,000 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 13:30:48,195 INFO [trainer.py:811] (5/8) Epoch 30, validation: loss=3.239, NarTop10Accuracy=0.6729, over 1907754.00 frames. +2024-08-06 13:30:48,196 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 13:30:48,916 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 1.985e+02 2.139e+02 2.326e+02 4.628e+02, threshold=4.279e+02, percent-clipped=0.1 +2024-08-06 13:31:05,665 INFO [trainer.py:765] (5/8) Epoch 30, batch 1200, train_loss[loss=3.157, NarTop10Accuracy=0.6791, over 7448.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6446, over 5951.07 frames. ], batch size: 32, lr: 2.84e-03 +2024-08-06 13:31:43,020 INFO [trainer.py:765] (5/8) Epoch 30, batch 1300, train_loss[loss=3.315, NarTop10Accuracy=0.6609, over 4975.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6445, over 6028.48 frames. ], batch size: 6, lr: 2.84e-03 +2024-08-06 13:32:19,325 INFO [trainer.py:765] (5/8) Epoch 30, batch 1400, train_loss[loss=3.649, NarTop10Accuracy=0.5795, over 6081.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6429, over 6039.39 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 13:32:52,335 INFO [trainer.py:765] (5/8) Epoch 30, batch 1500, train_loss[loss=3.627, NarTop10Accuracy=0.5924, over 6305.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6439, over 5975.48 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:33:20,407 INFO [trainer.py:765] (5/8) Epoch 30, batch 1600, train_loss[loss=3.584, NarTop10Accuracy=0.5972, over 7202.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.642, over 5951.82 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:33:47,200 INFO [trainer.py:765] (5/8) Epoch 30, batch 1700, train_loss[loss=3.609, NarTop10Accuracy=0.6027, over 6666.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6416, over 5927.36 frames. ], batch size: 14, lr: 2.83e-03 +2024-08-06 13:34:13,887 INFO [trainer.py:765] (5/8) Epoch 30, batch 1800, train_loss[loss=3.514, NarTop10Accuracy=0.6023, over 7037.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6435, over 6008.51 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:34:40,547 INFO [trainer.py:765] (5/8) Epoch 30, batch 1900, train_loss[loss=3.482, NarTop10Accuracy=0.6153, over 6555.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6412, over 6038.72 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:35:06,315 INFO [trainer.py:765] (5/8) Epoch 30, batch 2000, train_loss[loss=3.819, NarTop10Accuracy=0.5517, over 6305.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6436, over 6007.75 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:35:31,872 INFO [trainer.py:765] (5/8) Epoch 30, batch 2100, train_loss[loss=3.622, NarTop10Accuracy=0.6079, over 3917.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6402, over 5986.17 frames. ], batch size: 4, lr: 2.82e-03 +2024-08-06 13:36:00,553 INFO [trainer.py:765] (5/8) Epoch 30, batch 2200, train_loss[loss=3.576, NarTop10Accuracy=0.6048, over 7175.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6396, over 6017.19 frames. ], batch size: 30, lr: 2.82e-03 +2024-08-06 13:36:26,029 INFO [trainer.py:765] (5/8) Epoch 30, batch 2300, train_loss[loss=3.639, NarTop10Accuracy=0.5899, over 5830.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6397, over 6060.16 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 13:36:50,825 INFO [trainer.py:765] (5/8) Epoch 30, batch 2400, train_loss[loss=3.278, NarTop10Accuracy=0.6615, over 5185.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6385, over 5878.40 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 13:37:14,388 INFO [trainer.py:765] (5/8) Epoch 30, batch 2500, train_loss[loss=3.238, NarTop10Accuracy=0.6765, over 5022.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6458, over 5549.94 frames. ], batch size: 6, lr: 2.82e-03 +2024-08-06 13:37:36,037 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 13:38:28,438 INFO [trainer.py:765] (5/8) Epoch 31, batch 100, train_loss[loss=3.195, NarTop10Accuracy=0.6866, over 7334.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.657, over 2365.13 frames. ], batch size: 30, lr: 2.77e-03 +2024-08-06 13:39:02,651 INFO [trainer.py:765] (5/8) Epoch 31, batch 200, train_loss[loss=3.108, NarTop10Accuracy=0.6983, over 6885.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6566, over 3872.10 frames. ], batch size: 17, lr: 2.76e-03 +2024-08-06 13:39:34,677 INFO [trainer.py:765] (5/8) Epoch 31, batch 300, train_loss[loss=3.18, NarTop10Accuracy=0.6843, over 7235.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6564, over 4694.68 frames. ], batch size: 22, lr: 2.76e-03 +2024-08-06 13:40:07,363 INFO [trainer.py:765] (5/8) Epoch 31, batch 400, train_loss[loss=3.712, NarTop10Accuracy=0.5801, over 5201.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.653, over 5136.15 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 13:40:37,813 INFO [trainer.py:765] (5/8) Epoch 31, batch 500, train_loss[loss=3.174, NarTop10Accuracy=0.6867, over 6232.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6531, over 5423.63 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 13:40:58,299 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 13:41:08,777 INFO [trainer.py:811] (5/8) Epoch 31, validation: loss=3.268, NarTop10Accuracy=0.6673, over 1907754.00 frames. +2024-08-06 13:41:08,778 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 13:41:09,338 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 1.987e+02 2.143e+02 2.328e+02 4.341e+02, threshold=4.287e+02, percent-clipped=0.1 +2024-08-06 13:41:20,863 INFO [trainer.py:765] (5/8) Epoch 31, batch 600, train_loss[loss=3.184, NarTop10Accuracy=0.6865, over 5831.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6521, over 5700.75 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 13:41:54,260 INFO [trainer.py:765] (5/8) Epoch 31, batch 700, train_loss[loss=3.298, NarTop10Accuracy=0.6469, over 4198.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6516, over 5766.67 frames. ], batch size: 5, lr: 2.76e-03 +2024-08-06 13:42:32,158 INFO [trainer.py:765] (5/8) Epoch 31, batch 800, train_loss[loss=3.247, NarTop10Accuracy=0.6575, over 5151.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6498, over 5810.86 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:43:06,275 INFO [trainer.py:765] (5/8) Epoch 31, batch 900, train_loss[loss=3.39, NarTop10Accuracy=0.6528, over 6708.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6522, over 5841.81 frames. ], batch size: 14, lr: 2.75e-03 +2024-08-06 13:43:38,009 INFO [trainer.py:765] (5/8) Epoch 31, batch 1000, train_loss[loss=3.243, NarTop10Accuracy=0.6717, over 6321.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6503, over 5940.15 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 13:44:14,513 INFO [trainer.py:765] (5/8) Epoch 31, batch 1100, train_loss[loss=3.336, NarTop10Accuracy=0.6505, over 6814.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6489, over 5965.45 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 13:44:53,786 INFO [trainer.py:765] (5/8) Epoch 31, batch 1200, train_loss[loss=3.302, NarTop10Accuracy=0.6549, over 7351.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6465, over 5944.22 frames. ], batch size: 30, lr: 2.75e-03 +2024-08-06 13:45:25,076 INFO [trainer.py:765] (5/8) Epoch 31, batch 1300, train_loss[loss=3.166, NarTop10Accuracy=0.6783, over 4900.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6472, over 6018.54 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:45:58,741 INFO [trainer.py:765] (5/8) Epoch 31, batch 1400, train_loss[loss=3.325, NarTop10Accuracy=0.6518, over 6163.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6433, over 6041.11 frames. ], batch size: 11, lr: 2.74e-03 +2024-08-06 13:46:33,490 INFO [trainer.py:765] (5/8) Epoch 31, batch 1500, train_loss[loss=3.496, NarTop10Accuracy=0.6195, over 5655.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6438, over 5976.21 frames. ], batch size: 49, lr: 2.74e-03 +2024-08-06 13:47:04,658 INFO [trainer.py:765] (5/8) Epoch 31, batch 1600, train_loss[loss=3.33, NarTop10Accuracy=0.655, over 7194.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6424, over 5967.57 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 13:47:31,424 INFO [trainer.py:765] (5/8) Epoch 31, batch 1700, train_loss[loss=3.626, NarTop10Accuracy=0.5971, over 6239.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6428, over 5948.89 frames. ], batch size: 13, lr: 2.74e-03 +2024-08-06 13:47:58,017 INFO [trainer.py:765] (5/8) Epoch 31, batch 1800, train_loss[loss=3.535, NarTop10Accuracy=0.6098, over 7166.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6434, over 6015.11 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 13:48:24,577 INFO [trainer.py:765] (5/8) Epoch 31, batch 1900, train_loss[loss=3.497, NarTop10Accuracy=0.6188, over 6141.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6419, over 6049.26 frames. ], batch size: 49, lr: 2.74e-03 +2024-08-06 13:48:50,258 INFO [trainer.py:765] (5/8) Epoch 31, batch 2000, train_loss[loss=3.711, NarTop10Accuracy=0.5777, over 6140.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6435, over 6007.33 frames. ], batch size: 49, lr: 2.73e-03 +2024-08-06 13:49:15,765 INFO [trainer.py:765] (5/8) Epoch 31, batch 2100, train_loss[loss=3.599, NarTop10Accuracy=0.6071, over 3980.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.643, over 5995.23 frames. ], batch size: 4, lr: 2.73e-03 +2024-08-06 13:49:41,278 INFO [trainer.py:765] (5/8) Epoch 31, batch 2200, train_loss[loss=3.512, NarTop10Accuracy=0.6273, over 7500.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6423, over 6045.35 frames. ], batch size: 31, lr: 2.73e-03 +2024-08-06 13:50:06,708 INFO [trainer.py:765] (5/8) Epoch 31, batch 2300, train_loss[loss=3.217, NarTop10Accuracy=0.6853, over 5670.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6389, over 6081.79 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 13:50:31,392 INFO [trainer.py:765] (5/8) Epoch 31, batch 2400, train_loss[loss=3.515, NarTop10Accuracy=0.6231, over 5144.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6368, over 5893.71 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 13:50:54,891 INFO [trainer.py:765] (5/8) Epoch 31, batch 2500, train_loss[loss=3.263, NarTop10Accuracy=0.6618, over 4925.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6413, over 5547.38 frames. ], batch size: 6, lr: 2.72e-03 +2024-08-06 13:51:08,994 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 13:51:19,070 INFO [trainer.py:811] (5/8) Epoch 31, validation: loss=3.234, NarTop10Accuracy=0.6746, over 1907754.00 frames. +2024-08-06 13:51:19,070 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 13:51:19,540 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.007e+02 2.182e+02 2.368e+02 4.565e+02, threshold=4.363e+02, percent-clipped=0.1 +2024-08-06 13:51:26,547 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 13:52:19,910 INFO [trainer.py:765] (5/8) Epoch 32, batch 100, train_loss[loss=3.121, NarTop10Accuracy=0.6973, over 7420.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.6573, over 2369.00 frames. ], batch size: 31, lr: 2.68e-03 +2024-08-06 13:52:52,538 INFO [trainer.py:765] (5/8) Epoch 32, batch 200, train_loss[loss=3.485, NarTop10Accuracy=0.6228, over 6736.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6529, over 3860.57 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 13:53:28,093 INFO [trainer.py:765] (5/8) Epoch 32, batch 300, train_loss[loss=3.287, NarTop10Accuracy=0.6571, over 7113.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6532, over 4659.08 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 13:54:00,887 INFO [trainer.py:765] (5/8) Epoch 32, batch 400, train_loss[loss=3.779, NarTop10Accuracy=0.5595, over 5249.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6534, over 5126.96 frames. ], batch size: 7, lr: 2.67e-03 +2024-08-06 13:54:32,822 INFO [trainer.py:765] (5/8) Epoch 32, batch 500, train_loss[loss=3.099, NarTop10Accuracy=0.7052, over 6073.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6555, over 5413.04 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 13:55:01,773 INFO [trainer.py:765] (5/8) Epoch 32, batch 600, train_loss[loss=3.311, NarTop10Accuracy=0.6579, over 5818.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6573, over 5676.68 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 13:55:41,512 INFO [trainer.py:765] (5/8) Epoch 32, batch 700, train_loss[loss=3.174, NarTop10Accuracy=0.6881, over 5002.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6529, over 5741.57 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:13,173 INFO [trainer.py:765] (5/8) Epoch 32, batch 800, train_loss[loss=3.083, NarTop10Accuracy=0.7063, over 5072.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6522, over 5810.46 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:43,166 INFO [trainer.py:765] (5/8) Epoch 32, batch 900, train_loss[loss=3.546, NarTop10Accuracy=0.6013, over 6332.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6523, over 5831.31 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 13:57:24,521 INFO [trainer.py:765] (5/8) Epoch 32, batch 1000, train_loss[loss=3.703, NarTop10Accuracy=0.5808, over 6265.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6508, over 5920.50 frames. ], batch size: 13, lr: 2.66e-03 +2024-08-06 13:57:57,452 INFO [trainer.py:765] (5/8) Epoch 32, batch 1100, train_loss[loss=3.152, NarTop10Accuracy=0.6882, over 7045.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6467, over 5958.60 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 13:58:30,542 INFO [trainer.py:765] (5/8) Epoch 32, batch 1200, train_loss[loss=3.179, NarTop10Accuracy=0.69, over 7032.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6456, over 5967.04 frames. ], batch size: 30, lr: 2.66e-03 +2024-08-06 13:59:08,260 INFO [trainer.py:765] (5/8) Epoch 32, batch 1300, train_loss[loss=3.168, NarTop10Accuracy=0.6859, over 5151.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6465, over 6026.40 frames. ], batch size: 6, lr: 2.66e-03 +2024-08-06 13:59:42,266 INFO [trainer.py:765] (5/8) Epoch 32, batch 1400, train_loss[loss=3.296, NarTop10Accuracy=0.6471, over 6036.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6462, over 6040.69 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 14:00:12,976 INFO [trainer.py:765] (5/8) Epoch 32, batch 1500, train_loss[loss=3.889, NarTop10Accuracy=0.5406, over 5986.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6458, over 5975.96 frames. ], batch size: 49, lr: 2.66e-03 +2024-08-06 14:00:40,824 INFO [trainer.py:765] (5/8) Epoch 32, batch 1600, train_loss[loss=3.332, NarTop10Accuracy=0.648, over 7207.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6444, over 5955.90 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:01:07,534 INFO [trainer.py:765] (5/8) Epoch 32, batch 1700, train_loss[loss=3.402, NarTop10Accuracy=0.645, over 6595.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6442, over 5932.48 frames. ], batch size: 14, lr: 2.65e-03 +2024-08-06 14:01:34,090 INFO [trainer.py:765] (5/8) Epoch 32, batch 1800, train_loss[loss=3.232, NarTop10Accuracy=0.6616, over 7380.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6429, over 6006.01 frames. ], batch size: 23, lr: 2.65e-03 +2024-08-06 14:02:00,636 INFO [trainer.py:765] (5/8) Epoch 32, batch 1900, train_loss[loss=3.495, NarTop10Accuracy=0.6137, over 5672.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6422, over 6037.00 frames. ], batch size: 49, lr: 2.65e-03 +2024-08-06 14:02:20,591 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 14:02:30,653 INFO [trainer.py:811] (5/8) Epoch 32, validation: loss=3.204, NarTop10Accuracy=0.6812, over 1907754.00 frames. +2024-08-06 14:02:30,653 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 14:02:31,152 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.032e+02 2.200e+02 2.392e+02 6.182e+02, threshold=4.401e+02, percent-clipped=0.1 +2024-08-06 14:02:36,382 INFO [trainer.py:765] (5/8) Epoch 32, batch 2000, train_loss[loss=3.498, NarTop10Accuracy=0.6188, over 5978.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6427, over 6003.36 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 14:03:01,697 INFO [trainer.py:765] (5/8) Epoch 32, batch 2100, train_loss[loss=3.516, NarTop10Accuracy=0.6286, over 3846.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6437, over 5990.82 frames. ], batch size: 4, lr: 2.65e-03 +2024-08-06 14:03:27,176 INFO [trainer.py:765] (5/8) Epoch 32, batch 2200, train_loss[loss=3.687, NarTop10Accuracy=0.5861, over 6972.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6441, over 6033.66 frames. ], batch size: 30, lr: 2.64e-03 +2024-08-06 14:03:52,585 INFO [trainer.py:765] (5/8) Epoch 32, batch 2300, train_loss[loss=3.647, NarTop10Accuracy=0.5912, over 5932.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6408, over 6061.60 frames. ], batch size: 9, lr: 2.64e-03 +2024-08-06 14:04:17,273 INFO [trainer.py:765] (5/8) Epoch 32, batch 2400, train_loss[loss=3.349, NarTop10Accuracy=0.6448, over 5131.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6395, over 5880.44 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 14:04:40,635 INFO [trainer.py:765] (5/8) Epoch 32, batch 2500, train_loss[loss=3.078, NarTop10Accuracy=0.7076, over 5013.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6467, over 5537.60 frames. ], batch size: 6, lr: 2.64e-03 +2024-08-06 14:05:01,921 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 14:06:02,904 INFO [trainer.py:765] (5/8) Epoch 33, batch 100, train_loss[loss=3.271, NarTop10Accuracy=0.6563, over 7151.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6533, over 2369.02 frames. ], batch size: 30, lr: 2.60e-03 +2024-08-06 14:06:36,077 INFO [trainer.py:765] (5/8) Epoch 33, batch 200, train_loss[loss=3.465, NarTop10Accuracy=0.6288, over 6797.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6598, over 3869.04 frames. ], batch size: 17, lr: 2.59e-03 +2024-08-06 14:07:12,144 INFO [trainer.py:765] (5/8) Epoch 33, batch 300, train_loss[loss=3.214, NarTop10Accuracy=0.6738, over 7310.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6586, over 4696.73 frames. ], batch size: 23, lr: 2.59e-03 +2024-08-06 14:07:48,254 INFO [trainer.py:765] (5/8) Epoch 33, batch 400, train_loss[loss=3.367, NarTop10Accuracy=0.6402, over 5152.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6568, over 5139.38 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 14:08:18,545 INFO [trainer.py:765] (5/8) Epoch 33, batch 500, train_loss[loss=3.233, NarTop10Accuracy=0.6737, over 6247.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6559, over 5424.63 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 14:08:49,791 INFO [trainer.py:765] (5/8) Epoch 33, batch 600, train_loss[loss=3.213, NarTop10Accuracy=0.6817, over 5911.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6538, over 5688.44 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 14:09:32,923 INFO [trainer.py:765] (5/8) Epoch 33, batch 700, train_loss[loss=3.351, NarTop10Accuracy=0.6403, over 5070.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6523, over 5758.18 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 14:10:04,595 INFO [trainer.py:765] (5/8) Epoch 33, batch 800, train_loss[loss=3.169, NarTop10Accuracy=0.6945, over 5239.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.65, over 5812.53 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 14:10:35,385 INFO [trainer.py:765] (5/8) Epoch 33, batch 900, train_loss[loss=3.09, NarTop10Accuracy=0.7054, over 6621.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6496, over 5838.35 frames. ], batch size: 14, lr: 2.58e-03 +2024-08-06 14:11:15,067 INFO [trainer.py:765] (5/8) Epoch 33, batch 1000, train_loss[loss=3.425, NarTop10Accuracy=0.6323, over 6379.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6478, over 5926.46 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 14:11:47,300 INFO [trainer.py:765] (5/8) Epoch 33, batch 1100, train_loss[loss=3.632, NarTop10Accuracy=0.5935, over 6808.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6467, over 5956.87 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 14:12:20,926 INFO [trainer.py:765] (5/8) Epoch 33, batch 1200, train_loss[loss=3.512, NarTop10Accuracy=0.6183, over 7389.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6475, over 5961.64 frames. ], batch size: 32, lr: 2.58e-03 +2024-08-06 14:12:57,628 INFO [trainer.py:765] (5/8) Epoch 33, batch 1300, train_loss[loss=3.686, NarTop10Accuracy=0.573, over 4922.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6476, over 6019.63 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 14:13:30,664 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 14:13:41,686 INFO [trainer.py:811] (5/8) Epoch 33, validation: loss=3.242, NarTop10Accuracy=0.6732, over 1907754.00 frames. +2024-08-06 14:13:41,687 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 14:13:42,264 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.031e+02 2.174e+02 2.363e+02 4.871e+02, threshold=4.347e+02, percent-clipped=0.1 +2024-08-06 14:13:42,803 INFO [trainer.py:765] (5/8) Epoch 33, batch 1400, train_loss[loss=3.457, NarTop10Accuracy=0.6303, over 6139.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6494, over 6026.76 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 14:14:11,244 INFO [trainer.py:765] (5/8) Epoch 33, batch 1500, train_loss[loss=3.504, NarTop10Accuracy=0.6204, over 6181.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6477, over 5974.45 frames. ], batch size: 49, lr: 2.57e-03 +2024-08-06 14:14:39,191 INFO [trainer.py:765] (5/8) Epoch 33, batch 1600, train_loss[loss=3.252, NarTop10Accuracy=0.6817, over 7167.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6445, over 5949.21 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:05,857 INFO [trainer.py:765] (5/8) Epoch 33, batch 1700, train_loss[loss=3.447, NarTop10Accuracy=0.629, over 6764.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6447, over 5938.72 frames. ], batch size: 14, lr: 2.57e-03 +2024-08-06 14:15:32,589 INFO [trainer.py:765] (5/8) Epoch 33, batch 1800, train_loss[loss=3.459, NarTop10Accuracy=0.6218, over 7336.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6473, over 6003.32 frames. ], batch size: 23, lr: 2.57e-03 +2024-08-06 14:15:59,214 INFO [trainer.py:765] (5/8) Epoch 33, batch 1900, train_loss[loss=3.361, NarTop10Accuracy=0.6488, over 6070.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6443, over 6038.63 frames. ], batch size: 48, lr: 2.57e-03 +2024-08-06 14:16:24,894 INFO [trainer.py:765] (5/8) Epoch 33, batch 2000, train_loss[loss=3.549, NarTop10Accuracy=0.6061, over 6589.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6434, over 6024.33 frames. ], batch size: 54, lr: 2.57e-03 +2024-08-06 14:16:50,350 INFO [trainer.py:765] (5/8) Epoch 33, batch 2100, train_loss[loss=3.315, NarTop10Accuracy=0.6611, over 4707.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6451, over 5998.50 frames. ], batch size: 5, lr: 2.56e-03 +2024-08-06 14:17:15,825 INFO [trainer.py:765] (5/8) Epoch 33, batch 2200, train_loss[loss=3.558, NarTop10Accuracy=0.6061, over 7190.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6448, over 6051.58 frames. ], batch size: 30, lr: 2.56e-03 +2024-08-06 14:17:41,309 INFO [trainer.py:765] (5/8) Epoch 33, batch 2300, train_loss[loss=3.193, NarTop10Accuracy=0.6697, over 5767.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.643, over 6077.09 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 14:18:10,143 INFO [trainer.py:765] (5/8) Epoch 33, batch 2400, train_loss[loss=3.426, NarTop10Accuracy=0.6261, over 5195.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6432, over 5877.04 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 14:18:33,706 INFO [trainer.py:765] (5/8) Epoch 33, batch 2500, train_loss[loss=3.182, NarTop10Accuracy=0.6745, over 5114.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6497, over 5547.93 frames. ], batch size: 6, lr: 2.56e-03 +2024-08-06 14:18:54,744 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 14:19:51,932 INFO [trainer.py:765] (5/8) Epoch 34, batch 100, train_loss[loss=3.267, NarTop10Accuracy=0.6675, over 7222.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.658, over 2361.36 frames. ], batch size: 31, lr: 2.52e-03 +2024-08-06 14:20:24,372 INFO [trainer.py:765] (5/8) Epoch 34, batch 200, train_loss[loss=3.299, NarTop10Accuracy=0.6651, over 6836.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6606, over 3851.45 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 14:21:00,841 INFO [trainer.py:765] (5/8) Epoch 34, batch 300, train_loss[loss=3.31, NarTop10Accuracy=0.6607, over 6963.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6578, over 4657.39 frames. ], batch size: 22, lr: 2.51e-03 +2024-08-06 14:21:31,449 INFO [trainer.py:765] (5/8) Epoch 34, batch 400, train_loss[loss=3.117, NarTop10Accuracy=0.6962, over 5107.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6562, over 5113.33 frames. ], batch size: 7, lr: 2.51e-03 +2024-08-06 14:22:01,875 INFO [trainer.py:765] (5/8) Epoch 34, batch 500, train_loss[loss=3.201, NarTop10Accuracy=0.6811, over 6120.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6561, over 5401.88 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 14:22:36,826 INFO [trainer.py:765] (5/8) Epoch 34, batch 600, train_loss[loss=3.449, NarTop10Accuracy=0.6302, over 5795.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6556, over 5690.32 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 14:23:14,605 INFO [trainer.py:765] (5/8) Epoch 34, batch 700, train_loss[loss=3.248, NarTop10Accuracy=0.6657, over 5002.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6543, over 5749.63 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:46,606 INFO [trainer.py:765] (5/8) Epoch 34, batch 800, train_loss[loss=3.315, NarTop10Accuracy=0.6714, over 5011.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6528, over 5805.20 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:50,718 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 14:24:00,855 INFO [trainer.py:811] (5/8) Epoch 34, validation: loss=3.226, NarTop10Accuracy=0.6758, over 1907754.00 frames. +2024-08-06 14:24:00,856 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 14:24:01,413 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.033e+02 2.200e+02 2.391e+02 5.918e+02, threshold=4.399e+02, percent-clipped=0.1 +2024-08-06 14:24:28,899 INFO [trainer.py:765] (5/8) Epoch 34, batch 900, train_loss[loss=3.2, NarTop10Accuracy=0.6799, over 6669.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6525, over 5825.96 frames. ], batch size: 14, lr: 2.51e-03 +2024-08-06 14:25:05,287 INFO [trainer.py:765] (5/8) Epoch 34, batch 1000, train_loss[loss=3.348, NarTop10Accuracy=0.654, over 6723.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6513, over 5922.11 frames. ], batch size: 14, lr: 2.50e-03 +2024-08-06 14:25:37,996 INFO [trainer.py:765] (5/8) Epoch 34, batch 1100, train_loss[loss=3.527, NarTop10Accuracy=0.6149, over 6688.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6478, over 5953.37 frames. ], batch size: 17, lr: 2.50e-03 +2024-08-06 14:26:13,974 INFO [trainer.py:765] (5/8) Epoch 34, batch 1200, train_loss[loss=3.246, NarTop10Accuracy=0.6684, over 7111.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6492, over 5959.84 frames. ], batch size: 30, lr: 2.50e-03 +2024-08-06 14:26:52,652 INFO [trainer.py:765] (5/8) Epoch 34, batch 1300, train_loss[loss=3.525, NarTop10Accuracy=0.6076, over 5099.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6485, over 6033.41 frames. ], batch size: 6, lr: 2.50e-03 +2024-08-06 14:27:24,384 INFO [trainer.py:765] (5/8) Epoch 34, batch 1400, train_loss[loss=3.24, NarTop10Accuracy=0.6696, over 6275.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6484, over 6047.05 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 14:27:52,726 INFO [trainer.py:765] (5/8) Epoch 34, batch 1500, train_loss[loss=3.734, NarTop10Accuracy=0.5743, over 5938.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6502, over 5978.71 frames. ], batch size: 49, lr: 2.50e-03 +2024-08-06 14:28:20,672 INFO [trainer.py:765] (5/8) Epoch 34, batch 1600, train_loss[loss=3.106, NarTop10Accuracy=0.6896, over 6966.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6481, over 5959.45 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 14:28:47,384 INFO [trainer.py:765] (5/8) Epoch 34, batch 1700, train_loss[loss=3.359, NarTop10Accuracy=0.6355, over 6239.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6446, over 5951.74 frames. ], batch size: 13, lr: 2.49e-03 +2024-08-06 14:29:14,010 INFO [trainer.py:765] (5/8) Epoch 34, batch 1800, train_loss[loss=3.677, NarTop10Accuracy=0.587, over 7207.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6467, over 6013.86 frames. ], batch size: 22, lr: 2.49e-03 +2024-08-06 14:29:43,752 INFO [trainer.py:765] (5/8) Epoch 34, batch 1900, train_loss[loss=3.494, NarTop10Accuracy=0.6205, over 6318.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.644, over 6053.56 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 14:30:09,515 INFO [trainer.py:765] (5/8) Epoch 34, batch 2000, train_loss[loss=3.552, NarTop10Accuracy=0.6146, over 6044.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6438, over 6032.90 frames. ], batch size: 50, lr: 2.49e-03 +2024-08-06 14:30:35,016 INFO [trainer.py:765] (5/8) Epoch 34, batch 2100, train_loss[loss=3.198, NarTop10Accuracy=0.6663, over 3852.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6452, over 6012.83 frames. ], batch size: 4, lr: 2.49e-03 +2024-08-06 14:31:00,511 INFO [trainer.py:765] (5/8) Epoch 34, batch 2200, train_loss[loss=3.595, NarTop10Accuracy=0.6035, over 7143.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6436, over 6050.85 frames. ], batch size: 31, lr: 2.49e-03 +2024-08-06 14:31:25,979 INFO [trainer.py:765] (5/8) Epoch 34, batch 2300, train_loss[loss=3.339, NarTop10Accuracy=0.6526, over 5787.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6431, over 6072.28 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 14:31:50,751 INFO [trainer.py:765] (5/8) Epoch 34, batch 2400, train_loss[loss=3.331, NarTop10Accuracy=0.6469, over 5263.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6423, over 5887.78 frames. ], batch size: 7, lr: 2.48e-03 +2024-08-06 14:32:14,249 INFO [trainer.py:765] (5/8) Epoch 34, batch 2500, train_loss[loss=3.082, NarTop10Accuracy=0.7002, over 5132.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6473, over 5529.46 frames. ], batch size: 6, lr: 2.48e-03 +2024-08-06 14:32:35,064 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 14:33:26,336 INFO [trainer.py:765] (5/8) Epoch 35, batch 100, train_loss[loss=3.311, NarTop10Accuracy=0.657, over 7276.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6601, over 2381.21 frames. ], batch size: 30, lr: 2.44e-03 +2024-08-06 14:34:03,581 INFO [trainer.py:765] (5/8) Epoch 35, batch 200, train_loss[loss=3.364, NarTop10Accuracy=0.6588, over 6877.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.6591, over 3863.32 frames. ], batch size: 17, lr: 2.44e-03 +2024-08-06 14:34:13,186 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 14:34:23,574 INFO [trainer.py:811] (5/8) Epoch 35, validation: loss=3.163, NarTop10Accuracy=0.689, over 1907754.00 frames. +2024-08-06 14:34:23,575 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 14:34:24,109 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.042e+02 2.203e+02 2.360e+02 4.181e+02, threshold=4.406e+02, percent-clipped=0.0 +2024-08-06 14:34:44,664 INFO [trainer.py:765] (5/8) Epoch 35, batch 300, train_loss[loss=3.468, NarTop10Accuracy=0.6252, over 6726.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.6591, over 4667.82 frames. ], batch size: 21, lr: 2.44e-03 +2024-08-06 14:35:13,542 INFO [trainer.py:765] (5/8) Epoch 35, batch 400, train_loss[loss=3.169, NarTop10Accuracy=0.6866, over 5129.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6556, over 5132.48 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 14:35:48,187 INFO [trainer.py:765] (5/8) Epoch 35, batch 500, train_loss[loss=3.55, NarTop10Accuracy=0.6122, over 6114.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6553, over 5413.00 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 14:36:22,746 INFO [trainer.py:765] (5/8) Epoch 35, batch 600, train_loss[loss=3.009, NarTop10Accuracy=0.7175, over 5983.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6555, over 5685.00 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 14:36:57,825 INFO [trainer.py:765] (5/8) Epoch 35, batch 700, train_loss[loss=3.461, NarTop10Accuracy=0.6287, over 5191.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6558, over 5745.59 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 14:37:29,768 INFO [trainer.py:765] (5/8) Epoch 35, batch 800, train_loss[loss=3.217, NarTop10Accuracy=0.6689, over 5068.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6546, over 5795.70 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:38:03,302 INFO [trainer.py:765] (5/8) Epoch 35, batch 900, train_loss[loss=3.214, NarTop10Accuracy=0.6839, over 6574.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.652, over 5815.24 frames. ], batch size: 14, lr: 2.43e-03 +2024-08-06 14:38:43,707 INFO [trainer.py:765] (5/8) Epoch 35, batch 1000, train_loss[loss=3.41, NarTop10Accuracy=0.6426, over 6203.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6517, over 5937.54 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 14:39:16,566 INFO [trainer.py:765] (5/8) Epoch 35, batch 1100, train_loss[loss=3.446, NarTop10Accuracy=0.6316, over 6598.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6506, over 5969.19 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 14:39:50,836 INFO [trainer.py:765] (5/8) Epoch 35, batch 1200, train_loss[loss=3.395, NarTop10Accuracy=0.6481, over 7138.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6509, over 5965.09 frames. ], batch size: 30, lr: 2.43e-03 +2024-08-06 14:40:33,952 INFO [trainer.py:765] (5/8) Epoch 35, batch 1300, train_loss[loss=3.262, NarTop10Accuracy=0.6692, over 4935.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6501, over 6021.65 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:41:03,182 INFO [trainer.py:765] (5/8) Epoch 35, batch 1400, train_loss[loss=3.428, NarTop10Accuracy=0.6373, over 6109.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6457, over 6040.45 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 14:41:33,822 INFO [trainer.py:765] (5/8) Epoch 35, batch 1500, train_loss[loss=3.535, NarTop10Accuracy=0.6089, over 5955.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6472, over 5970.28 frames. ], batch size: 49, lr: 2.43e-03 +2024-08-06 14:42:01,776 INFO [trainer.py:765] (5/8) Epoch 35, batch 1600, train_loss[loss=3.575, NarTop10Accuracy=0.5951, over 7061.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6463, over 5953.18 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 14:42:28,465 INFO [trainer.py:765] (5/8) Epoch 35, batch 1700, train_loss[loss=3.295, NarTop10Accuracy=0.6688, over 6193.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6472, over 5935.62 frames. ], batch size: 13, lr: 2.42e-03 +2024-08-06 14:42:55,038 INFO [trainer.py:765] (5/8) Epoch 35, batch 1800, train_loss[loss=3.143, NarTop10Accuracy=0.6951, over 7284.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6463, over 6000.68 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 14:43:21,645 INFO [trainer.py:765] (5/8) Epoch 35, batch 1900, train_loss[loss=3.415, NarTop10Accuracy=0.6325, over 5749.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6457, over 6037.56 frames. ], batch size: 48, lr: 2.42e-03 +2024-08-06 14:43:47,366 INFO [trainer.py:765] (5/8) Epoch 35, batch 2000, train_loss[loss=3.507, NarTop10Accuracy=0.6224, over 6185.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6474, over 6021.15 frames. ], batch size: 50, lr: 2.42e-03 +2024-08-06 14:44:12,855 INFO [trainer.py:765] (5/8) Epoch 35, batch 2100, train_loss[loss=3.168, NarTop10Accuracy=0.7023, over 3940.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6486, over 5996.54 frames. ], batch size: 4, lr: 2.42e-03 +2024-08-06 14:44:38,387 INFO [trainer.py:765] (5/8) Epoch 35, batch 2200, train_loss[loss=3.407, NarTop10Accuracy=0.6296, over 7204.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6487, over 6035.89 frames. ], batch size: 30, lr: 2.42e-03 +2024-08-06 14:44:47,198 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 14:44:57,441 INFO [trainer.py:811] (5/8) Epoch 35, validation: loss=3.219, NarTop10Accuracy=0.6773, over 1907754.00 frames. +2024-08-06 14:44:57,441 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 14:44:57,973 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.083e+02 2.237e+02 2.412e+02 3.944e+02, threshold=4.474e+02, percent-clipped=0.0 +2024-08-06 14:45:14,098 INFO [trainer.py:765] (5/8) Epoch 35, batch 2300, train_loss[loss=3.279, NarTop10Accuracy=0.6602, over 5710.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6464, over 6061.54 frames. ], batch size: 9, lr: 2.41e-03 +2024-08-06 14:45:38,818 INFO [trainer.py:765] (5/8) Epoch 35, batch 2400, train_loss[loss=3.238, NarTop10Accuracy=0.6579, over 5109.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6435, over 5874.71 frames. ], batch size: 7, lr: 2.41e-03 +2024-08-06 14:46:02,145 INFO [trainer.py:765] (5/8) Epoch 35, batch 2500, train_loss[loss=3.338, NarTop10Accuracy=0.6432, over 5164.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6492, over 5524.02 frames. ], batch size: 6, lr: 2.41e-03 +2024-08-06 14:46:23,087 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 14:47:25,441 INFO [trainer.py:765] (5/8) Epoch 36, batch 100, train_loss[loss=3.377, NarTop10Accuracy=0.646, over 7204.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6546, over 2360.05 frames. ], batch size: 30, lr: 2.38e-03 +2024-08-06 14:47:58,358 INFO [trainer.py:765] (5/8) Epoch 36, batch 200, train_loss[loss=3.283, NarTop10Accuracy=0.6635, over 6960.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6589, over 3876.31 frames. ], batch size: 17, lr: 2.37e-03 +2024-08-06 14:48:30,723 INFO [trainer.py:765] (5/8) Epoch 36, batch 300, train_loss[loss=3.285, NarTop10Accuracy=0.6726, over 7162.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6606, over 4676.88 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 14:49:04,814 INFO [trainer.py:765] (5/8) Epoch 36, batch 400, train_loss[loss=3.084, NarTop10Accuracy=0.7116, over 5208.00 frames. ], tot_loss[loss=3.289, NarTop10Accuracy=0.6603, over 5134.58 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 14:49:36,588 INFO [trainer.py:765] (5/8) Epoch 36, batch 500, train_loss[loss=3.692, NarTop10Accuracy=0.5839, over 6045.00 frames. ], tot_loss[loss=3.284, NarTop10Accuracy=0.6618, over 5397.60 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 14:50:09,654 INFO [trainer.py:765] (5/8) Epoch 36, batch 600, train_loss[loss=3.212, NarTop10Accuracy=0.6632, over 5778.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6579, over 5665.96 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 14:50:46,513 INFO [trainer.py:765] (5/8) Epoch 36, batch 700, train_loss[loss=3.11, NarTop10Accuracy=0.7, over 5120.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6554, over 5729.46 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:23,702 INFO [trainer.py:765] (5/8) Epoch 36, batch 800, train_loss[loss=3.441, NarTop10Accuracy=0.6268, over 5112.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6559, over 5804.76 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:54,346 INFO [trainer.py:765] (5/8) Epoch 36, batch 900, train_loss[loss=3.112, NarTop10Accuracy=0.6882, over 6252.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6558, over 5815.25 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 14:52:30,324 INFO [trainer.py:765] (5/8) Epoch 36, batch 1000, train_loss[loss=3.125, NarTop10Accuracy=0.6805, over 6200.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6544, over 5933.54 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 14:53:06,863 INFO [trainer.py:765] (5/8) Epoch 36, batch 1100, train_loss[loss=3.231, NarTop10Accuracy=0.6749, over 6774.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6505, over 5968.09 frames. ], batch size: 17, lr: 2.36e-03 +2024-08-06 14:53:40,248 INFO [trainer.py:765] (5/8) Epoch 36, batch 1200, train_loss[loss=3.306, NarTop10Accuracy=0.66, over 7339.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6527, over 5977.88 frames. ], batch size: 30, lr: 2.36e-03 +2024-08-06 14:54:15,855 INFO [trainer.py:765] (5/8) Epoch 36, batch 1300, train_loss[loss=3.124, NarTop10Accuracy=0.6922, over 5076.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6522, over 6038.75 frames. ], batch size: 6, lr: 2.36e-03 +2024-08-06 14:54:51,540 INFO [trainer.py:765] (5/8) Epoch 36, batch 1400, train_loss[loss=3.223, NarTop10Accuracy=0.6635, over 6133.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6521, over 6059.09 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 14:55:21,802 INFO [trainer.py:765] (5/8) Epoch 36, batch 1500, train_loss[loss=3.589, NarTop10Accuracy=0.5921, over 6187.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.651, over 5965.81 frames. ], batch size: 49, lr: 2.36e-03 +2024-08-06 14:55:49,902 INFO [trainer.py:765] (5/8) Epoch 36, batch 1600, train_loss[loss=3.283, NarTop10Accuracy=0.6608, over 7037.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6501, over 5953.40 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 14:56:04,132 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 14:56:14,601 INFO [trainer.py:811] (5/8) Epoch 36, validation: loss=3.22, NarTop10Accuracy=0.6784, over 1907754.00 frames. +2024-08-06 14:56:14,601 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 14:56:15,103 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.063e+02 2.224e+02 2.398e+02 5.290e+02, threshold=4.447e+02, percent-clipped=0.1 +2024-08-06 14:56:27,177 INFO [trainer.py:765] (5/8) Epoch 36, batch 1700, train_loss[loss=3.307, NarTop10Accuracy=0.6599, over 6225.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6506, over 5939.99 frames. ], batch size: 13, lr: 2.35e-03 +2024-08-06 14:56:53,758 INFO [trainer.py:765] (5/8) Epoch 36, batch 1800, train_loss[loss=3.413, NarTop10Accuracy=0.6377, over 7141.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6487, over 6006.51 frames. ], batch size: 22, lr: 2.35e-03 +2024-08-06 14:57:20,335 INFO [trainer.py:765] (5/8) Epoch 36, batch 1900, train_loss[loss=3.403, NarTop10Accuracy=0.6331, over 5878.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6489, over 6046.80 frames. ], batch size: 48, lr: 2.35e-03 +2024-08-06 14:57:46,056 INFO [trainer.py:765] (5/8) Epoch 36, batch 2000, train_loss[loss=3.749, NarTop10Accuracy=0.5682, over 5927.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6472, over 6005.70 frames. ], batch size: 49, lr: 2.35e-03 +2024-08-06 14:58:11,404 INFO [trainer.py:765] (5/8) Epoch 36, batch 2100, train_loss[loss=2.857, NarTop10Accuracy=0.7306, over 3966.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6459, over 6001.02 frames. ], batch size: 4, lr: 2.35e-03 +2024-08-06 14:58:36,831 INFO [trainer.py:765] (5/8) Epoch 36, batch 2200, train_loss[loss=3.632, NarTop10Accuracy=0.5869, over 7091.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6455, over 6030.65 frames. ], batch size: 30, lr: 2.35e-03 +2024-08-06 14:59:02,343 INFO [trainer.py:765] (5/8) Epoch 36, batch 2300, train_loss[loss=3.201, NarTop10Accuracy=0.6791, over 5782.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.644, over 6062.77 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 14:59:27,093 INFO [trainer.py:765] (5/8) Epoch 36, batch 2400, train_loss[loss=3.402, NarTop10Accuracy=0.6471, over 5066.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6426, over 5849.01 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 14:59:50,502 INFO [trainer.py:765] (5/8) Epoch 36, batch 2500, train_loss[loss=3.544, NarTop10Accuracy=0.6164, over 5122.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6465, over 5524.55 frames. ], batch size: 6, lr: 2.34e-03 +2024-08-06 15:00:11,863 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 15:01:14,218 INFO [trainer.py:765] (5/8) Epoch 37, batch 100, train_loss[loss=3.392, NarTop10Accuracy=0.6508, over 7280.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6639, over 2350.17 frames. ], batch size: 31, lr: 2.31e-03 +2024-08-06 15:01:44,097 INFO [trainer.py:765] (5/8) Epoch 37, batch 200, train_loss[loss=3.08, NarTop10Accuracy=0.7053, over 6892.00 frames. ], tot_loss[loss=3.28, NarTop10Accuracy=0.6638, over 3851.63 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 15:02:17,382 INFO [trainer.py:765] (5/8) Epoch 37, batch 300, train_loss[loss=3.153, NarTop10Accuracy=0.6884, over 7043.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6624, over 4658.31 frames. ], batch size: 22, lr: 2.31e-03 +2024-08-06 15:02:48,345 INFO [trainer.py:765] (5/8) Epoch 37, batch 400, train_loss[loss=3.411, NarTop10Accuracy=0.6402, over 5790.00 frames. ], tot_loss[loss=3.289, NarTop10Accuracy=0.6613, over 5104.20 frames. ], batch size: 8, lr: 2.31e-03 +2024-08-06 15:03:26,569 INFO [trainer.py:765] (5/8) Epoch 37, batch 500, train_loss[loss=3.161, NarTop10Accuracy=0.6881, over 6281.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6603, over 5393.20 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 15:03:58,032 INFO [trainer.py:765] (5/8) Epoch 37, batch 600, train_loss[loss=3.028, NarTop10Accuracy=0.722, over 5950.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6596, over 5668.08 frames. ], batch size: 9, lr: 2.30e-03 +2024-08-06 15:04:30,247 INFO [trainer.py:765] (5/8) Epoch 37, batch 700, train_loss[loss=3.345, NarTop10Accuracy=0.6542, over 5167.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.658, over 5748.38 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:12,162 INFO [trainer.py:765] (5/8) Epoch 37, batch 800, train_loss[loss=3.079, NarTop10Accuracy=0.7015, over 5129.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6567, over 5793.70 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:40,605 INFO [trainer.py:765] (5/8) Epoch 37, batch 900, train_loss[loss=3.307, NarTop10Accuracy=0.6415, over 6355.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6548, over 5818.87 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 15:06:15,607 INFO [trainer.py:765] (5/8) Epoch 37, batch 1000, train_loss[loss=3.187, NarTop10Accuracy=0.6791, over 6233.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6537, over 5918.78 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 15:06:42,489 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 15:06:53,168 INFO [trainer.py:811] (5/8) Epoch 37, validation: loss=3.234, NarTop10Accuracy=0.6744, over 1907754.00 frames. +2024-08-06 15:06:53,169 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 15:06:53,809 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.068e+02 2.238e+02 2.409e+02 6.392e+02, threshold=4.475e+02, percent-clipped=0.1 +2024-08-06 15:07:01,306 INFO [trainer.py:765] (5/8) Epoch 37, batch 1100, train_loss[loss=3.418, NarTop10Accuracy=0.6341, over 6822.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6526, over 5957.57 frames. ], batch size: 17, lr: 2.30e-03 +2024-08-06 15:07:32,718 INFO [trainer.py:765] (5/8) Epoch 37, batch 1200, train_loss[loss=3.189, NarTop10Accuracy=0.6821, over 7332.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6539, over 5966.79 frames. ], batch size: 30, lr: 2.30e-03 +2024-08-06 15:08:04,777 INFO [trainer.py:765] (5/8) Epoch 37, batch 1300, train_loss[loss=3.573, NarTop10Accuracy=0.6006, over 4978.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6509, over 6014.77 frames. ], batch size: 6, lr: 2.29e-03 +2024-08-06 15:08:47,879 INFO [trainer.py:765] (5/8) Epoch 37, batch 1400, train_loss[loss=3.191, NarTop10Accuracy=0.694, over 5965.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6488, over 6029.53 frames. ], batch size: 11, lr: 2.29e-03 +2024-08-06 15:09:16,180 INFO [trainer.py:765] (5/8) Epoch 37, batch 1500, train_loss[loss=3.597, NarTop10Accuracy=0.6077, over 5983.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6498, over 5954.01 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:09:44,190 INFO [trainer.py:765] (5/8) Epoch 37, batch 1600, train_loss[loss=3.466, NarTop10Accuracy=0.6235, over 7091.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.65, over 5945.68 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:10:11,082 INFO [trainer.py:765] (5/8) Epoch 37, batch 1700, train_loss[loss=3.168, NarTop10Accuracy=0.6811, over 6299.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6499, over 5935.98 frames. ], batch size: 13, lr: 2.29e-03 +2024-08-06 15:10:37,752 INFO [trainer.py:765] (5/8) Epoch 37, batch 1800, train_loss[loss=3.471, NarTop10Accuracy=0.6201, over 7121.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6514, over 5993.41 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:11:04,270 INFO [trainer.py:765] (5/8) Epoch 37, batch 1900, train_loss[loss=3.464, NarTop10Accuracy=0.6283, over 6652.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6485, over 6039.49 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:11:29,940 INFO [trainer.py:765] (5/8) Epoch 37, batch 2000, train_loss[loss=3.639, NarTop10Accuracy=0.5946, over 6552.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6481, over 6019.86 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:11:58,797 INFO [trainer.py:765] (5/8) Epoch 37, batch 2100, train_loss[loss=3.15, NarTop10Accuracy=0.692, over 4648.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6502, over 6014.46 frames. ], batch size: 5, lr: 2.29e-03 +2024-08-06 15:12:24,311 INFO [trainer.py:765] (5/8) Epoch 37, batch 2200, train_loss[loss=3.403, NarTop10Accuracy=0.6443, over 7334.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6507, over 6057.80 frames. ], batch size: 31, lr: 2.28e-03 +2024-08-06 15:12:49,786 INFO [trainer.py:765] (5/8) Epoch 37, batch 2300, train_loss[loss=3.188, NarTop10Accuracy=0.6727, over 5759.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6493, over 6073.78 frames. ], batch size: 9, lr: 2.28e-03 +2024-08-06 15:13:14,526 INFO [trainer.py:765] (5/8) Epoch 37, batch 2400, train_loss[loss=3.111, NarTop10Accuracy=0.7042, over 4996.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.647, over 5871.67 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 15:13:37,942 INFO [trainer.py:765] (5/8) Epoch 37, batch 2500, train_loss[loss=3.533, NarTop10Accuracy=0.6043, over 5204.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6514, over 5522.64 frames. ], batch size: 6, lr: 2.28e-03 +2024-08-06 15:13:59,263 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 15:14:50,846 INFO [trainer.py:765] (5/8) Epoch 38, batch 100, train_loss[loss=3.443, NarTop10Accuracy=0.6298, over 7183.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.654, over 2377.46 frames. ], batch size: 31, lr: 2.25e-03 +2024-08-06 15:15:27,289 INFO [trainer.py:765] (5/8) Epoch 38, batch 200, train_loss[loss=3.088, NarTop10Accuracy=0.6986, over 6799.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.6612, over 3864.66 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 15:16:01,281 INFO [trainer.py:765] (5/8) Epoch 38, batch 300, train_loss[loss=3.063, NarTop10Accuracy=0.7107, over 7281.00 frames. ], tot_loss[loss=3.284, NarTop10Accuracy=0.6628, over 4683.77 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 15:16:32,595 INFO [trainer.py:765] (5/8) Epoch 38, batch 400, train_loss[loss=3.122, NarTop10Accuracy=0.6908, over 5204.00 frames. ], tot_loss[loss=3.28, NarTop10Accuracy=0.6628, over 5140.36 frames. ], batch size: 7, lr: 2.24e-03 +2024-08-06 15:17:04,258 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 15:17:14,104 INFO [trainer.py:811] (5/8) Epoch 38, validation: loss=3.229, NarTop10Accuracy=0.6755, over 1907754.00 frames. +2024-08-06 15:17:14,105 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 15:17:14,631 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.062e+02 2.214e+02 2.396e+02 3.845e+02, threshold=4.429e+02, percent-clipped=0.0 +2024-08-06 15:17:16,480 INFO [trainer.py:765] (5/8) Epoch 38, batch 500, train_loss[loss=3.41, NarTop10Accuracy=0.6295, over 6088.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6633, over 5413.21 frames. ], batch size: 11, lr: 2.24e-03 +2024-08-06 15:17:53,875 INFO [trainer.py:765] (5/8) Epoch 38, batch 600, train_loss[loss=3.204, NarTop10Accuracy=0.6723, over 5781.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6618, over 5681.89 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 15:18:26,466 INFO [trainer.py:765] (5/8) Epoch 38, batch 700, train_loss[loss=3.291, NarTop10Accuracy=0.6628, over 5256.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6591, over 5737.69 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:01,129 INFO [trainer.py:765] (5/8) Epoch 38, batch 800, train_loss[loss=3.385, NarTop10Accuracy=0.641, over 5097.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.659, over 5789.64 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:36,540 INFO [trainer.py:765] (5/8) Epoch 38, batch 900, train_loss[loss=3.715, NarTop10Accuracy=0.5731, over 6314.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6573, over 5811.72 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 15:20:09,134 INFO [trainer.py:765] (5/8) Epoch 38, batch 1000, train_loss[loss=3.26, NarTop10Accuracy=0.6516, over 6184.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6555, over 5911.95 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 15:20:47,346 INFO [trainer.py:765] (5/8) Epoch 38, batch 1100, train_loss[loss=3.507, NarTop10Accuracy=0.6091, over 6948.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6514, over 5947.52 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 15:21:25,594 INFO [trainer.py:765] (5/8) Epoch 38, batch 1200, train_loss[loss=3.484, NarTop10Accuracy=0.6247, over 7577.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6493, over 5949.07 frames. ], batch size: 32, lr: 2.23e-03 +2024-08-06 15:21:57,556 INFO [trainer.py:765] (5/8) Epoch 38, batch 1300, train_loss[loss=3.548, NarTop10Accuracy=0.6018, over 4979.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6528, over 6023.42 frames. ], batch size: 6, lr: 2.23e-03 +2024-08-06 15:22:29,468 INFO [trainer.py:765] (5/8) Epoch 38, batch 1400, train_loss[loss=3.09, NarTop10Accuracy=0.6973, over 6083.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6512, over 6037.06 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 15:23:06,615 INFO [trainer.py:765] (5/8) Epoch 38, batch 1500, train_loss[loss=3.338, NarTop10Accuracy=0.657, over 6082.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6479, over 5987.79 frames. ], batch size: 49, lr: 2.23e-03 +2024-08-06 15:23:34,641 INFO [trainer.py:765] (5/8) Epoch 38, batch 1600, train_loss[loss=3.433, NarTop10Accuracy=0.6296, over 7036.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6476, over 5967.03 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:01,433 INFO [trainer.py:765] (5/8) Epoch 38, batch 1700, train_loss[loss=3.373, NarTop10Accuracy=0.642, over 6260.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6481, over 5958.62 frames. ], batch size: 13, lr: 2.23e-03 +2024-08-06 15:24:28,065 INFO [trainer.py:765] (5/8) Epoch 38, batch 1800, train_loss[loss=3.367, NarTop10Accuracy=0.6489, over 7164.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.647, over 6010.76 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:54,672 INFO [trainer.py:765] (5/8) Epoch 38, batch 1900, train_loss[loss=3.349, NarTop10Accuracy=0.6559, over 6338.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6453, over 6040.96 frames. ], batch size: 49, lr: 2.23e-03 +2024-08-06 15:25:20,410 INFO [trainer.py:765] (5/8) Epoch 38, batch 2000, train_loss[loss=3.509, NarTop10Accuracy=0.6188, over 6451.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6472, over 6025.37 frames. ], batch size: 52, lr: 2.23e-03 +2024-08-06 15:25:45,856 INFO [trainer.py:765] (5/8) Epoch 38, batch 2100, train_loss[loss=3.477, NarTop10Accuracy=0.6307, over 4791.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6493, over 6007.96 frames. ], batch size: 5, lr: 2.22e-03 +2024-08-06 15:26:11,316 INFO [trainer.py:765] (5/8) Epoch 38, batch 2200, train_loss[loss=3.509, NarTop10Accuracy=0.6152, over 7443.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6489, over 6043.63 frames. ], batch size: 31, lr: 2.22e-03 +2024-08-06 15:26:36,708 INFO [trainer.py:765] (5/8) Epoch 38, batch 2300, train_loss[loss=3.284, NarTop10Accuracy=0.67, over 5789.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6466, over 6070.30 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 15:27:01,479 INFO [trainer.py:765] (5/8) Epoch 38, batch 2400, train_loss[loss=3.021, NarTop10Accuracy=0.7166, over 5047.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6452, over 5872.99 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 15:27:23,145 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 15:27:33,590 INFO [trainer.py:811] (5/8) Epoch 38, validation: loss=3.213, NarTop10Accuracy=0.6782, over 1907754.00 frames. +2024-08-06 15:27:33,590 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 15:27:34,075 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.098e+02 2.247e+02 2.437e+02 3.550e+02, threshold=4.494e+02, percent-clipped=0.0 +2024-08-06 15:27:35,514 INFO [trainer.py:765] (5/8) Epoch 38, batch 2500, train_loss[loss=3.021, NarTop10Accuracy=0.7039, over 5106.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.652, over 5558.18 frames. ], batch size: 6, lr: 2.22e-03 +2024-08-06 15:27:56,608 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 15:28:51,227 INFO [trainer.py:765] (5/8) Epoch 39, batch 100, train_loss[loss=3.305, NarTop10Accuracy=0.6656, over 7071.00 frames. ], tot_loss[loss=3.267, NarTop10Accuracy=0.666, over 2357.14 frames. ], batch size: 30, lr: 2.19e-03 +2024-08-06 15:29:28,052 INFO [trainer.py:765] (5/8) Epoch 39, batch 200, train_loss[loss=3.36, NarTop10Accuracy=0.6375, over 6932.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.664, over 3864.46 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 15:30:02,018 INFO [trainer.py:765] (5/8) Epoch 39, batch 300, train_loss[loss=3.118, NarTop10Accuracy=0.6947, over 7102.00 frames. ], tot_loss[loss=3.284, NarTop10Accuracy=0.6625, over 4667.37 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 15:30:32,993 INFO [trainer.py:765] (5/8) Epoch 39, batch 400, train_loss[loss=3.156, NarTop10Accuracy=0.6915, over 5751.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6618, over 5132.14 frames. ], batch size: 8, lr: 2.19e-03 +2024-08-06 15:31:03,569 INFO [trainer.py:765] (5/8) Epoch 39, batch 500, train_loss[loss=3.031, NarTop10Accuracy=0.709, over 6169.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6594, over 5417.00 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 15:31:40,850 INFO [trainer.py:765] (5/8) Epoch 39, batch 600, train_loss[loss=3.245, NarTop10Accuracy=0.668, over 5689.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6587, over 5686.33 frames. ], batch size: 9, lr: 2.18e-03 +2024-08-06 15:32:14,451 INFO [trainer.py:765] (5/8) Epoch 39, batch 700, train_loss[loss=3.286, NarTop10Accuracy=0.6822, over 5069.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6579, over 5751.76 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:32:44,165 INFO [trainer.py:765] (5/8) Epoch 39, batch 800, train_loss[loss=3.205, NarTop10Accuracy=0.6821, over 5140.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.658, over 5801.48 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:33:21,117 INFO [trainer.py:765] (5/8) Epoch 39, batch 900, train_loss[loss=3.11, NarTop10Accuracy=0.6973, over 6207.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6596, over 5813.78 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 15:34:02,655 INFO [trainer.py:765] (5/8) Epoch 39, batch 1000, train_loss[loss=3.008, NarTop10Accuracy=0.7032, over 6286.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6594, over 5918.04 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 15:34:33,094 INFO [trainer.py:765] (5/8) Epoch 39, batch 1100, train_loss[loss=3.156, NarTop10Accuracy=0.6897, over 6943.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6563, over 5940.38 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 15:35:09,244 INFO [trainer.py:765] (5/8) Epoch 39, batch 1200, train_loss[loss=3.221, NarTop10Accuracy=0.672, over 7381.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6563, over 5951.71 frames. ], batch size: 30, lr: 2.18e-03 +2024-08-06 15:35:46,813 INFO [trainer.py:765] (5/8) Epoch 39, batch 1300, train_loss[loss=3.724, NarTop10Accuracy=0.5683, over 4185.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.654, over 6021.57 frames. ], batch size: 5, lr: 2.18e-03 +2024-08-06 15:36:18,850 INFO [trainer.py:765] (5/8) Epoch 39, batch 1400, train_loss[loss=3.165, NarTop10Accuracy=0.6836, over 6172.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.653, over 6047.00 frames. ], batch size: 11, lr: 2.17e-03 +2024-08-06 15:36:47,214 INFO [trainer.py:765] (5/8) Epoch 39, batch 1500, train_loss[loss=3.46, NarTop10Accuracy=0.6218, over 6030.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6509, over 5970.02 frames. ], batch size: 48, lr: 2.17e-03 +2024-08-06 15:37:15,216 INFO [trainer.py:765] (5/8) Epoch 39, batch 1600, train_loss[loss=3.292, NarTop10Accuracy=0.6567, over 7251.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6509, over 5954.20 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:37:41,883 INFO [trainer.py:765] (5/8) Epoch 39, batch 1700, train_loss[loss=3.181, NarTop10Accuracy=0.6792, over 6707.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6494, over 5946.30 frames. ], batch size: 14, lr: 2.17e-03 +2024-08-06 15:38:08,509 INFO [trainer.py:765] (5/8) Epoch 39, batch 1800, train_loss[loss=3.31, NarTop10Accuracy=0.6562, over 7101.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6508, over 6012.83 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:38:35,253 INFO [trainer.py:765] (5/8) Epoch 39, batch 1900, train_loss[loss=3.451, NarTop10Accuracy=0.6365, over 6629.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6481, over 6049.34 frames. ], batch size: 48, lr: 2.17e-03 +2024-08-06 15:38:37,990 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 15:38:48,262 INFO [trainer.py:811] (5/8) Epoch 39, validation: loss=3.177, NarTop10Accuracy=0.6866, over 1907754.00 frames. +2024-08-06 15:38:48,263 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 15:38:48,768 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.106e+02 2.266e+02 2.462e+02 4.274e+02, threshold=4.532e+02, percent-clipped=0.0 +2024-08-06 15:39:11,226 INFO [trainer.py:765] (5/8) Epoch 39, batch 2000, train_loss[loss=3.347, NarTop10Accuracy=0.6541, over 6312.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6508, over 6031.95 frames. ], batch size: 51, lr: 2.17e-03 +2024-08-06 15:39:36,692 INFO [trainer.py:765] (5/8) Epoch 39, batch 2100, train_loss[loss=3.356, NarTop10Accuracy=0.6281, over 3890.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6503, over 5992.04 frames. ], batch size: 4, lr: 2.17e-03 +2024-08-06 15:40:02,086 INFO [trainer.py:765] (5/8) Epoch 39, batch 2200, train_loss[loss=3.512, NarTop10Accuracy=0.6204, over 7289.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6524, over 6039.45 frames. ], batch size: 30, lr: 2.17e-03 +2024-08-06 15:40:27,496 INFO [trainer.py:765] (5/8) Epoch 39, batch 2300, train_loss[loss=3.517, NarTop10Accuracy=0.6306, over 5923.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6509, over 6068.26 frames. ], batch size: 9, lr: 2.16e-03 +2024-08-06 15:40:52,331 INFO [trainer.py:765] (5/8) Epoch 39, batch 2400, train_loss[loss=3.287, NarTop10Accuracy=0.6624, over 5180.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6483, over 5864.16 frames. ], batch size: 7, lr: 2.16e-03 +2024-08-06 15:41:15,695 INFO [trainer.py:765] (5/8) Epoch 39, batch 2500, train_loss[loss=3.45, NarTop10Accuracy=0.626, over 4909.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6524, over 5536.14 frames. ], batch size: 6, lr: 2.16e-03 +2024-08-06 15:41:37,172 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 15:42:35,254 INFO [trainer.py:765] (5/8) Epoch 40, batch 100, train_loss[loss=3.746, NarTop10Accuracy=0.5685, over 7305.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6566, over 2382.00 frames. ], batch size: 30, lr: 2.13e-03 +2024-08-06 15:43:09,645 INFO [trainer.py:765] (5/8) Epoch 40, batch 200, train_loss[loss=3.544, NarTop10Accuracy=0.6138, over 6757.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6621, over 3865.67 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 15:43:43,738 INFO [trainer.py:765] (5/8) Epoch 40, batch 300, train_loss[loss=3.261, NarTop10Accuracy=0.663, over 6928.00 frames. ], tot_loss[loss=3.282, NarTop10Accuracy=0.6623, over 4674.30 frames. ], batch size: 21, lr: 2.13e-03 +2024-08-06 15:44:18,201 INFO [trainer.py:765] (5/8) Epoch 40, batch 400, train_loss[loss=3.097, NarTop10Accuracy=0.7004, over 5137.00 frames. ], tot_loss[loss=3.272, NarTop10Accuracy=0.6642, over 5116.31 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 15:44:50,257 INFO [trainer.py:765] (5/8) Epoch 40, batch 500, train_loss[loss=3.052, NarTop10Accuracy=0.7218, over 6094.00 frames. ], tot_loss[loss=3.27, NarTop10Accuracy=0.6647, over 5396.92 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 15:45:25,431 INFO [trainer.py:765] (5/8) Epoch 40, batch 600, train_loss[loss=3.488, NarTop10Accuracy=0.6208, over 5789.00 frames. ], tot_loss[loss=3.278, NarTop10Accuracy=0.6633, over 5658.93 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 15:45:58,647 INFO [trainer.py:765] (5/8) Epoch 40, batch 700, train_loss[loss=3.192, NarTop10Accuracy=0.6602, over 5126.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6593, over 5730.08 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 15:46:34,887 INFO [trainer.py:765] (5/8) Epoch 40, batch 800, train_loss[loss=3.376, NarTop10Accuracy=0.6478, over 5054.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.6578, over 5804.18 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 15:47:07,290 INFO [trainer.py:765] (5/8) Epoch 40, batch 900, train_loss[loss=3.299, NarTop10Accuracy=0.6728, over 6739.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6592, over 5827.21 frames. ], batch size: 14, lr: 2.12e-03 +2024-08-06 15:47:43,510 INFO [trainer.py:765] (5/8) Epoch 40, batch 1000, train_loss[loss=3.443, NarTop10Accuracy=0.6223, over 6295.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6563, over 5923.23 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:48:18,709 INFO [trainer.py:765] (5/8) Epoch 40, batch 1100, train_loss[loss=3.34, NarTop10Accuracy=0.6575, over 6841.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6546, over 5973.64 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 15:48:52,093 INFO [trainer.py:765] (5/8) Epoch 40, batch 1200, train_loss[loss=3.254, NarTop10Accuracy=0.6599, over 7511.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6532, over 5952.47 frames. ], batch size: 31, lr: 2.12e-03 +2024-08-06 15:49:29,782 INFO [trainer.py:765] (5/8) Epoch 40, batch 1300, train_loss[loss=3.173, NarTop10Accuracy=0.6822, over 5073.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6545, over 6019.02 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 15:49:38,246 INFO [trainer.py:803] (5/8) Computing validation loss +2024-08-06 15:49:48,934 INFO [trainer.py:811] (5/8) Epoch 40, validation: loss=3.171, NarTop10Accuracy=0.6871, over 1907754.00 frames. +2024-08-06 15:49:48,935 INFO [trainer.py:814] (5/8) Maximum memory allocated so far is 30485MB +2024-08-06 15:49:49,615 INFO [optim.py:386] (5/8) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.095e+02 2.264e+02 2.441e+02 4.960e+02, threshold=4.528e+02, percent-clipped=0.1 +2024-08-06 15:50:12,460 INFO [trainer.py:765] (5/8) Epoch 40, batch 1400, train_loss[loss=3.205, NarTop10Accuracy=0.6843, over 6086.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6536, over 6035.73 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 15:50:45,930 INFO [trainer.py:765] (5/8) Epoch 40, batch 1500, train_loss[loss=3.502, NarTop10Accuracy=0.6094, over 6018.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6546, over 5958.08 frames. ], batch size: 50, lr: 2.12e-03 +2024-08-06 15:51:13,820 INFO [trainer.py:765] (5/8) Epoch 40, batch 1600, train_loss[loss=3.173, NarTop10Accuracy=0.6884, over 7189.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6558, over 5945.32 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:51:40,571 INFO [trainer.py:765] (5/8) Epoch 40, batch 1700, train_loss[loss=3.402, NarTop10Accuracy=0.6479, over 6203.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6551, over 5933.50 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:52:07,236 INFO [trainer.py:765] (5/8) Epoch 40, batch 1800, train_loss[loss=3.511, NarTop10Accuracy=0.6189, over 7133.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6551, over 5995.62 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:52:33,820 INFO [trainer.py:765] (5/8) Epoch 40, batch 1900, train_loss[loss=3.472, NarTop10Accuracy=0.6154, over 6508.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6512, over 6044.37 frames. ], batch size: 49, lr: 2.11e-03 +2024-08-06 15:52:59,511 INFO [trainer.py:765] (5/8) Epoch 40, batch 2000, train_loss[loss=3.411, NarTop10Accuracy=0.6345, over 5426.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6523, over 6011.06 frames. ], batch size: 49, lr: 2.11e-03 +2024-08-06 15:53:24,913 INFO [trainer.py:765] (5/8) Epoch 40, batch 2100, train_loss[loss=2.967, NarTop10Accuracy=0.72, over 4664.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6521, over 5999.08 frames. ], batch size: 5, lr: 2.11e-03 +2024-08-06 15:53:50,419 INFO [trainer.py:765] (5/8) Epoch 40, batch 2200, train_loss[loss=3.277, NarTop10Accuracy=0.6652, over 7264.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6514, over 6041.07 frames. ], batch size: 30, lr: 2.11e-03 +2024-08-06 15:54:15,886 INFO [trainer.py:765] (5/8) Epoch 40, batch 2300, train_loss[loss=3.341, NarTop10Accuracy=0.6503, over 5867.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6502, over 6081.19 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 15:54:43,787 INFO [trainer.py:765] (5/8) Epoch 40, batch 2400, train_loss[loss=3.588, NarTop10Accuracy=0.6007, over 5016.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6491, over 5886.84 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 15:55:07,364 INFO [trainer.py:765] (5/8) Epoch 40, batch 2500, train_loss[loss=3.274, NarTop10Accuracy=0.6697, over 5147.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6545, over 5550.87 frames. ], batch size: 6, lr: 2.11e-03 +2024-08-06 15:55:28,696 INFO [trainer.py:650] (5/8) Reaches end of dataloader. +2024-08-06 15:55:28,699 INFO [trainer.py:1069] (5/8) Done! diff --git a/libritts/log/log-train-2024-08-06-06-41-41-6 b/libritts/log/log-train-2024-08-06-06-41-41-6 new file mode 100644 index 0000000000000000000000000000000000000000..ba5baea0c7f3c2eea569a9d528020c8a03a719cd --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-41-41-6 @@ -0,0 +1,1260 @@ +2024-08-06 06:41:41,483 INFO [trainer.py:870] (6/8) Training started +2024-08-06 06:41:41,485 INFO [trainer.py:889] (6/8) Device: cuda:6 +2024-08-06 06:41:41,485 INFO [trainer.py:890] (6/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:41:41,485 INFO [trainer.py:892] (6/8) About to create model +2024-08-06 06:41:42,244 INFO [trainer.py:899] (6/8) Number of model parameters: 367386628 +2024-08-06 06:41:42,244 INFO [checkpoint.py:112] (6/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 06:41:44,338 INFO [trainer.py:914] (6/8) Using DDP +2024-08-06 06:41:46,897 INFO [datamodule.py:427] (6/8) About to get train cuts +2024-08-06 06:41:46,899 INFO [datamodule.py:434] (6/8) About to get dev cuts +2024-08-06 06:41:46,900 INFO [datamodule.py:292] (6/8) Disable SpecAugment +2024-08-06 06:41:46,900 INFO [datamodule.py:294] (6/8) About to create train dataset +2024-08-06 06:41:46,902 INFO [datamodule.py:323] (6/8) Using DynamicBucketingSampler +2024-08-06 06:41:47,533 INFO [datamodule.py:344] (6/8) About to create train dataloader +2024-08-06 06:41:47,534 INFO [datamodule.py:367] (6/8) About to create dev dataset +2024-08-06 06:41:47,877 INFO [datamodule.py:388] (6/8) About to create dev dataloader +2024-08-06 06:42:36,135 INFO [trainer.py:765] (6/8) Epoch 1, batch 100, train_loss[loss=94.19, NarTop10Accuracy=0.01773, over 7212.00 frames. ], tot_loss[loss=80.63, NarTop10Accuracy=0.05157, over 2365.97 frames. ], batch size: 31, lr: 2.25e-02 +2024-08-06 06:43:05,818 INFO [trainer.py:765] (6/8) Epoch 1, batch 200, train_loss[loss=120.8, NarTop10Accuracy=0.01869, over 6947.00 frames. ], tot_loss[loss=99.48, NarTop10Accuracy=0.0453, over 3869.91 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 06:43:33,851 INFO [trainer.py:765] (6/8) Epoch 1, batch 300, train_loss[loss=73.31, NarTop10Accuracy=0.02292, over 7132.00 frames. ], tot_loss[loss=86.88, NarTop10Accuracy=0.04553, over 4676.24 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 06:44:05,252 INFO [trainer.py:765] (6/8) Epoch 1, batch 400, train_loss[loss=33.03, NarTop10Accuracy=0.05727, over 5067.00 frames. ], tot_loss[loss=67.74, NarTop10Accuracy=0.04985, over 5110.83 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 06:44:33,445 INFO [trainer.py:765] (6/8) Epoch 1, batch 500, train_loss[loss=18.02, NarTop10Accuracy=0.02154, over 6089.00 frames. ], tot_loss[loss=48.46, NarTop10Accuracy=0.0569, over 5398.69 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 06:45:02,924 INFO [trainer.py:765] (6/8) Epoch 1, batch 600, train_loss[loss=5.988, NarTop10Accuracy=0.1798, over 5748.00 frames. ], tot_loss[loss=33.17, NarTop10Accuracy=0.06275, over 5669.42 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 06:45:40,480 INFO [trainer.py:765] (6/8) Epoch 1, batch 700, train_loss[loss=7.054, NarTop10Accuracy=0.08612, over 5112.00 frames. ], tot_loss[loss=23.4, NarTop10Accuracy=0.06956, over 5755.89 frames. ], batch size: 6, lr: 2.99e-02 +2024-08-06 06:46:09,662 INFO [trainer.py:765] (6/8) Epoch 1, batch 800, train_loss[loss=6.598, NarTop10Accuracy=0.1075, over 4972.00 frames. ], tot_loss[loss=17.43, NarTop10Accuracy=0.08029, over 5815.74 frames. ], batch size: 6, lr: 2.98e-02 +2024-08-06 06:46:37,732 INFO [trainer.py:765] (6/8) Epoch 1, batch 900, train_loss[loss=6.243, NarTop10Accuracy=0.129, over 6331.00 frames. ], tot_loss[loss=12.96, NarTop10Accuracy=0.1106, over 5845.20 frames. ], batch size: 13, lr: 2.98e-02 +2024-08-06 06:47:13,908 INFO [trainer.py:765] (6/8) Epoch 1, batch 1000, train_loss[loss=5.888, NarTop10Accuracy=0.2005, over 6316.00 frames. ], tot_loss[loss=10.14, NarTop10Accuracy=0.1363, over 5940.08 frames. ], batch size: 13, lr: 2.97e-02 +2024-08-06 06:47:47,140 INFO [trainer.py:765] (6/8) Epoch 1, batch 1100, train_loss[loss=5.535, NarTop10Accuracy=0.2038, over 6801.00 frames. ], tot_loss[loss=8.404, NarTop10Accuracy=0.1561, over 5963.70 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 06:48:15,709 INFO [trainer.py:765] (6/8) Epoch 1, batch 1200, train_loss[loss=6.214, NarTop10Accuracy=0.1309, over 7053.00 frames. ], tot_loss[loss=7.299, NarTop10Accuracy=0.1744, over 5966.47 frames. ], batch size: 30, lr: 2.96e-02 +2024-08-06 06:48:47,236 INFO [trainer.py:765] (6/8) Epoch 1, batch 1300, train_loss[loss=5.547, NarTop10Accuracy=0.2166, over 5075.00 frames. ], tot_loss[loss=6.61, NarTop10Accuracy=0.1849, over 6023.56 frames. ], batch size: 6, lr: 2.95e-02 +2024-08-06 06:49:23,568 INFO [trainer.py:765] (6/8) Epoch 1, batch 1400, train_loss[loss=5.654, NarTop10Accuracy=0.1758, over 6167.00 frames. ], tot_loss[loss=6.191, NarTop10Accuracy=0.1921, over 6029.85 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 06:49:51,507 INFO [trainer.py:765] (6/8) Epoch 1, batch 1500, train_loss[loss=5.692, NarTop10Accuracy=0.1827, over 6207.00 frames. ], tot_loss[loss=5.927, NarTop10Accuracy=0.1975, over 5978.83 frames. ], batch size: 48, lr: 2.94e-02 +2024-08-06 06:50:19,163 INFO [trainer.py:765] (6/8) Epoch 1, batch 1600, train_loss[loss=5.265, NarTop10Accuracy=0.2561, over 7283.00 frames. ], tot_loss[loss=5.749, NarTop10Accuracy=0.2042, over 5963.24 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 06:50:45,596 INFO [trainer.py:765] (6/8) Epoch 1, batch 1700, train_loss[loss=5.547, NarTop10Accuracy=0.1993, over 6701.00 frames. ], tot_loss[loss=5.636, NarTop10Accuracy=0.2092, over 5936.68 frames. ], batch size: 14, lr: 2.92e-02 +2024-08-06 06:51:11,955 INFO [trainer.py:765] (6/8) Epoch 1, batch 1800, train_loss[loss=5.466, NarTop10Accuracy=0.223, over 7147.00 frames. ], tot_loss[loss=5.548, NarTop10Accuracy=0.2162, over 5996.60 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 06:51:38,223 INFO [trainer.py:765] (6/8) Epoch 1, batch 1900, train_loss[loss=5.494, NarTop10Accuracy=0.2192, over 6245.00 frames. ], tot_loss[loss=5.494, NarTop10Accuracy=0.2208, over 6033.37 frames. ], batch size: 48, lr: 2.90e-02 +2024-08-06 06:52:03,653 INFO [trainer.py:765] (6/8) Epoch 1, batch 2000, train_loss[loss=5.366, NarTop10Accuracy=0.2413, over 6249.00 frames. ], tot_loss[loss=5.441, NarTop10Accuracy=0.2283, over 6009.46 frames. ], batch size: 49, lr: 2.89e-02 +2024-08-06 06:52:03,654 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 06:52:13,994 INFO [trainer.py:811] (6/8) Epoch 1, validation: loss=5.351, NarTop10Accuracy=0.2423, over 1907754.00 frames. +2024-08-06 06:52:13,994 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 26609MB +2024-08-06 06:52:14,534 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 4.341e+01 2.262e+02 7.241e+02 2.074e+04 7.259e+05, threshold=1.448e+03, percent-clipped=0.0 +2024-08-06 06:52:39,585 INFO [trainer.py:765] (6/8) Epoch 1, batch 2100, train_loss[loss=5.008, NarTop10Accuracy=0.2945, over 4796.00 frames. ], tot_loss[loss=5.385, NarTop10Accuracy=0.2376, over 6005.90 frames. ], batch size: 5, lr: 2.88e-02 +2024-08-06 06:53:05,355 INFO [trainer.py:765] (6/8) Epoch 1, batch 2200, train_loss[loss=5.349, NarTop10Accuracy=0.2551, over 7197.00 frames. ], tot_loss[loss=5.353, NarTop10Accuracy=0.2419, over 6031.98 frames. ], batch size: 30, lr: 2.87e-02 +2024-08-06 06:53:30,701 INFO [trainer.py:765] (6/8) Epoch 1, batch 2300, train_loss[loss=5.24, NarTop10Accuracy=0.2577, over 5785.00 frames. ], tot_loss[loss=5.334, NarTop10Accuracy=0.2454, over 6048.89 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 06:53:55,359 INFO [trainer.py:765] (6/8) Epoch 1, batch 2400, train_loss[loss=5.155, NarTop10Accuracy=0.2821, over 5208.00 frames. ], tot_loss[loss=5.297, NarTop10Accuracy=0.2529, over 5871.96 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 06:54:18,659 INFO [trainer.py:765] (6/8) Epoch 1, batch 2500, train_loss[loss=5.116, NarTop10Accuracy=0.2922, over 5139.00 frames. ], tot_loss[loss=5.254, NarTop10Accuracy=0.2606, over 5517.11 frames. ], batch size: 6, lr: 2.84e-02 +2024-08-06 06:54:39,775 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 06:55:37,936 INFO [trainer.py:765] (6/8) Epoch 2, batch 100, train_loss[loss=5.16, NarTop10Accuracy=0.2793, over 7203.00 frames. ], tot_loss[loss=5.187, NarTop10Accuracy=0.2776, over 2372.06 frames. ], batch size: 30, lr: 2.77e-02 +2024-08-06 06:56:16,405 INFO [trainer.py:765] (6/8) Epoch 2, batch 200, train_loss[loss=4.894, NarTop10Accuracy=0.3351, over 6840.00 frames. ], tot_loss[loss=5.152, NarTop10Accuracy=0.284, over 3853.99 frames. ], batch size: 17, lr: 2.76e-02 +2024-08-06 06:56:44,972 INFO [trainer.py:765] (6/8) Epoch 2, batch 300, train_loss[loss=5.199, NarTop10Accuracy=0.2862, over 7343.00 frames. ], tot_loss[loss=5.15, NarTop10Accuracy=0.2846, over 4670.70 frames. ], batch size: 23, lr: 2.75e-02 +2024-08-06 06:57:13,938 INFO [trainer.py:765] (6/8) Epoch 2, batch 400, train_loss[loss=5.502, NarTop10Accuracy=0.2174, over 5108.00 frames. ], tot_loss[loss=5.138, NarTop10Accuracy=0.287, over 5112.65 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 06:57:56,209 INFO [trainer.py:765] (6/8) Epoch 2, batch 500, train_loss[loss=4.746, NarTop10Accuracy=0.3693, over 6206.00 frames. ], tot_loss[loss=5.106, NarTop10Accuracy=0.2926, over 5396.62 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 06:58:25,425 INFO [trainer.py:765] (6/8) Epoch 2, batch 600, train_loss[loss=4.767, NarTop10Accuracy=0.3518, over 5689.00 frames. ], tot_loss[loss=5.091, NarTop10Accuracy=0.2959, over 5657.40 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 06:58:55,282 INFO [trainer.py:765] (6/8) Epoch 2, batch 700, train_loss[loss=4.98, NarTop10Accuracy=0.3219, over 4932.00 frames. ], tot_loss[loss=5.087, NarTop10Accuracy=0.2968, over 5750.20 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 06:59:31,889 INFO [trainer.py:765] (6/8) Epoch 2, batch 800, train_loss[loss=4.903, NarTop10Accuracy=0.3357, over 5050.00 frames. ], tot_loss[loss=5.087, NarTop10Accuracy=0.2967, over 5801.05 frames. ], batch size: 6, lr: 2.69e-02 +2024-08-06 07:00:03,183 INFO [trainer.py:765] (6/8) Epoch 2, batch 900, train_loss[loss=5.461, NarTop10Accuracy=0.2165, over 6274.00 frames. ], tot_loss[loss=5.056, NarTop10Accuracy=0.3036, over 5831.54 frames. ], batch size: 13, lr: 2.68e-02 +2024-08-06 07:00:33,142 INFO [trainer.py:765] (6/8) Epoch 2, batch 1000, train_loss[loss=4.746, NarTop10Accuracy=0.362, over 6237.00 frames. ], tot_loss[loss=5.024, NarTop10Accuracy=0.3099, over 5924.25 frames. ], batch size: 13, lr: 2.66e-02 +2024-08-06 07:01:05,573 INFO [trainer.py:765] (6/8) Epoch 2, batch 1100, train_loss[loss=4.823, NarTop10Accuracy=0.3403, over 6956.00 frames. ], tot_loss[loss=5.012, NarTop10Accuracy=0.3114, over 5944.44 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 07:01:46,285 INFO [trainer.py:765] (6/8) Epoch 2, batch 1200, train_loss[loss=4.914, NarTop10Accuracy=0.3355, over 7214.00 frames. ], tot_loss[loss=5.001, NarTop10Accuracy=0.3134, over 5943.09 frames. ], batch size: 30, lr: 2.64e-02 +2024-08-06 07:02:15,644 INFO [trainer.py:765] (6/8) Epoch 2, batch 1300, train_loss[loss=5.383, NarTop10Accuracy=0.2205, over 4870.00 frames. ], tot_loss[loss=4.957, NarTop10Accuracy=0.3213, over 6018.67 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 07:02:45,252 INFO [trainer.py:765] (6/8) Epoch 2, batch 1400, train_loss[loss=4.668, NarTop10Accuracy=0.3753, over 6119.00 frames. ], tot_loss[loss=4.944, NarTop10Accuracy=0.3239, over 6051.14 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 07:02:50,267 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 07:03:02,094 INFO [trainer.py:811] (6/8) Epoch 2, validation: loss=4.943, NarTop10Accuracy=0.3266, over 1907754.00 frames. +2024-08-06 07:03:02,095 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 26979MB +2024-08-06 07:03:02,638 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 5.429e+01 1.166e+02 1.425e+02 1.750e+02 6.435e+02, threshold=2.851e+02, percent-clipped=0.0 +2024-08-06 07:03:25,471 INFO [trainer.py:765] (6/8) Epoch 2, batch 1500, train_loss[loss=5.018, NarTop10Accuracy=0.3185, over 5964.00 frames. ], tot_loss[loss=4.928, NarTop10Accuracy=0.3268, over 5982.76 frames. ], batch size: 49, lr: 2.60e-02 +2024-08-06 07:03:53,554 INFO [trainer.py:765] (6/8) Epoch 2, batch 1600, train_loss[loss=4.804, NarTop10Accuracy=0.3494, over 7347.00 frames. ], tot_loss[loss=4.915, NarTop10Accuracy=0.3297, over 5965.25 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 07:04:20,313 INFO [trainer.py:765] (6/8) Epoch 2, batch 1700, train_loss[loss=4.884, NarTop10Accuracy=0.3347, over 6641.00 frames. ], tot_loss[loss=4.912, NarTop10Accuracy=0.3314, over 5935.29 frames. ], batch size: 14, lr: 2.58e-02 +2024-08-06 07:04:46,888 INFO [trainer.py:765] (6/8) Epoch 2, batch 1800, train_loss[loss=4.741, NarTop10Accuracy=0.3638, over 6918.00 frames. ], tot_loss[loss=4.894, NarTop10Accuracy=0.3345, over 6002.99 frames. ], batch size: 21, lr: 2.56e-02 +2024-08-06 07:05:13,587 INFO [trainer.py:765] (6/8) Epoch 2, batch 1900, train_loss[loss=4.744, NarTop10Accuracy=0.3574, over 5423.00 frames. ], tot_loss[loss=4.872, NarTop10Accuracy=0.3391, over 6049.88 frames. ], batch size: 49, lr: 2.55e-02 +2024-08-06 07:05:39,285 INFO [trainer.py:765] (6/8) Epoch 2, batch 2000, train_loss[loss=4.769, NarTop10Accuracy=0.3646, over 6570.00 frames. ], tot_loss[loss=4.844, NarTop10Accuracy=0.3439, over 6020.34 frames. ], batch size: 49, lr: 2.54e-02 +2024-08-06 07:06:04,829 INFO [trainer.py:765] (6/8) Epoch 2, batch 2100, train_loss[loss=4.421, NarTop10Accuracy=0.4355, over 3931.00 frames. ], tot_loss[loss=4.847, NarTop10Accuracy=0.3433, over 5996.69 frames. ], batch size: 4, lr: 2.52e-02 +2024-08-06 07:06:30,372 INFO [trainer.py:765] (6/8) Epoch 2, batch 2200, train_loss[loss=4.452, NarTop10Accuracy=0.4176, over 7142.00 frames. ], tot_loss[loss=4.804, NarTop10Accuracy=0.3523, over 6028.90 frames. ], batch size: 30, lr: 2.51e-02 +2024-08-06 07:06:55,874 INFO [trainer.py:765] (6/8) Epoch 2, batch 2300, train_loss[loss=4.499, NarTop10Accuracy=0.4112, over 5746.00 frames. ], tot_loss[loss=4.802, NarTop10Accuracy=0.3535, over 6063.00 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 07:07:20,576 INFO [trainer.py:765] (6/8) Epoch 2, batch 2400, train_loss[loss=4.712, NarTop10Accuracy=0.369, over 5147.00 frames. ], tot_loss[loss=4.774, NarTop10Accuracy=0.3593, over 5868.58 frames. ], batch size: 7, lr: 2.49e-02 +2024-08-06 07:07:47,111 INFO [trainer.py:765] (6/8) Epoch 2, batch 2500, train_loss[loss=4.632, NarTop10Accuracy=0.3846, over 5092.00 frames. ], tot_loss[loss=4.747, NarTop10Accuracy=0.3643, over 5525.76 frames. ], batch size: 6, lr: 2.47e-02 +2024-08-06 07:08:07,902 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 07:09:08,537 INFO [trainer.py:765] (6/8) Epoch 3, batch 100, train_loss[loss=4.894, NarTop10Accuracy=0.3372, over 7142.00 frames. ], tot_loss[loss=4.651, NarTop10Accuracy=0.3842, over 2370.51 frames. ], batch size: 30, lr: 2.35e-02 +2024-08-06 07:09:41,498 INFO [trainer.py:765] (6/8) Epoch 3, batch 200, train_loss[loss=4.561, NarTop10Accuracy=0.4092, over 6965.00 frames. ], tot_loss[loss=4.627, NarTop10Accuracy=0.3902, over 3859.80 frames. ], batch size: 17, lr: 2.34e-02 +2024-08-06 07:10:16,975 INFO [trainer.py:765] (6/8) Epoch 3, batch 300, train_loss[loss=4.52, NarTop10Accuracy=0.3979, over 7120.00 frames. ], tot_loss[loss=4.598, NarTop10Accuracy=0.395, over 4663.98 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 07:10:49,791 INFO [trainer.py:765] (6/8) Epoch 3, batch 400, train_loss[loss=4.326, NarTop10Accuracy=0.452, over 5210.00 frames. ], tot_loss[loss=4.578, NarTop10Accuracy=0.3987, over 5119.06 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 07:11:18,179 INFO [trainer.py:765] (6/8) Epoch 3, batch 500, train_loss[loss=4.802, NarTop10Accuracy=0.3498, over 6095.00 frames. ], tot_loss[loss=4.573, NarTop10Accuracy=0.3994, over 5410.74 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 07:11:51,262 INFO [trainer.py:765] (6/8) Epoch 3, batch 600, train_loss[loss=4.715, NarTop10Accuracy=0.3755, over 5774.00 frames. ], tot_loss[loss=4.557, NarTop10Accuracy=0.4029, over 5690.48 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 07:12:32,101 INFO [trainer.py:765] (6/8) Epoch 3, batch 700, train_loss[loss=4.686, NarTop10Accuracy=0.3868, over 5052.00 frames. ], tot_loss[loss=4.547, NarTop10Accuracy=0.4047, over 5739.46 frames. ], batch size: 6, lr: 2.29e-02 +2024-08-06 07:13:01,919 INFO [trainer.py:765] (6/8) Epoch 3, batch 800, train_loss[loss=4.47, NarTop10Accuracy=0.4177, over 5054.00 frames. ], tot_loss[loss=4.532, NarTop10Accuracy=0.4072, over 5788.03 frames. ], batch size: 6, lr: 2.27e-02 +2024-08-06 07:13:12,668 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 07:13:22,883 INFO [trainer.py:811] (6/8) Epoch 3, validation: loss=4.43, NarTop10Accuracy=0.4285, over 1907754.00 frames. +2024-08-06 07:13:22,884 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 26979MB +2024-08-06 07:13:23,429 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 6.823e+01 1.318e+02 1.583e+02 1.978e+02 8.364e+02, threshold=3.166e+02, percent-clipped=5.2 +2024-08-06 07:13:42,435 INFO [trainer.py:765] (6/8) Epoch 3, batch 900, train_loss[loss=4.412, NarTop10Accuracy=0.4278, over 6248.00 frames. ], tot_loss[loss=4.508, NarTop10Accuracy=0.4117, over 5803.10 frames. ], batch size: 13, lr: 2.26e-02 +2024-08-06 07:14:25,627 INFO [trainer.py:765] (6/8) Epoch 3, batch 1000, train_loss[loss=4.206, NarTop10Accuracy=0.4668, over 6263.00 frames. ], tot_loss[loss=4.5, NarTop10Accuracy=0.413, over 5909.57 frames. ], batch size: 13, lr: 2.25e-02 +2024-08-06 07:14:56,325 INFO [trainer.py:765] (6/8) Epoch 3, batch 1100, train_loss[loss=4.558, NarTop10Accuracy=0.4029, over 6838.00 frames. ], tot_loss[loss=4.491, NarTop10Accuracy=0.4151, over 5937.17 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 07:15:29,867 INFO [trainer.py:765] (6/8) Epoch 3, batch 1200, train_loss[loss=4.373, NarTop10Accuracy=0.4246, over 7472.00 frames. ], tot_loss[loss=4.478, NarTop10Accuracy=0.4174, over 5929.97 frames. ], batch size: 30, lr: 2.23e-02 +2024-08-06 07:16:12,665 INFO [trainer.py:765] (6/8) Epoch 3, batch 1300, train_loss[loss=4.529, NarTop10Accuracy=0.4166, over 4946.00 frames. ], tot_loss[loss=4.467, NarTop10Accuracy=0.4194, over 6013.64 frames. ], batch size: 6, lr: 2.22e-02 +2024-08-06 07:16:42,204 INFO [trainer.py:765] (6/8) Epoch 3, batch 1400, train_loss[loss=4.183, NarTop10Accuracy=0.4682, over 6060.00 frames. ], tot_loss[loss=4.45, NarTop10Accuracy=0.4224, over 6038.05 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 07:17:10,664 INFO [trainer.py:765] (6/8) Epoch 3, batch 1500, train_loss[loss=4.696, NarTop10Accuracy=0.3716, over 5624.00 frames. ], tot_loss[loss=4.446, NarTop10Accuracy=0.4231, over 5972.22 frames. ], batch size: 49, lr: 2.20e-02 +2024-08-06 07:17:38,769 INFO [trainer.py:765] (6/8) Epoch 3, batch 1600, train_loss[loss=4.244, NarTop10Accuracy=0.4704, over 7075.00 frames. ], tot_loss[loss=4.424, NarTop10Accuracy=0.4274, over 5954.07 frames. ], batch size: 22, lr: 2.19e-02 +2024-08-06 07:18:05,503 INFO [trainer.py:765] (6/8) Epoch 3, batch 1700, train_loss[loss=4.387, NarTop10Accuracy=0.4393, over 6684.00 frames. ], tot_loss[loss=4.404, NarTop10Accuracy=0.4308, over 5929.05 frames. ], batch size: 14, lr: 2.18e-02 +2024-08-06 07:18:32,161 INFO [trainer.py:765] (6/8) Epoch 3, batch 1800, train_loss[loss=4.539, NarTop10Accuracy=0.4026, over 7197.00 frames. ], tot_loss[loss=4.391, NarTop10Accuracy=0.4336, over 6000.83 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 07:19:01,959 INFO [trainer.py:765] (6/8) Epoch 3, batch 1900, train_loss[loss=4.67, NarTop10Accuracy=0.3909, over 6187.00 frames. ], tot_loss[loss=4.374, NarTop10Accuracy=0.4373, over 6044.27 frames. ], batch size: 52, lr: 2.16e-02 +2024-08-06 07:19:27,622 INFO [trainer.py:765] (6/8) Epoch 3, batch 2000, train_loss[loss=4.422, NarTop10Accuracy=0.4353, over 5679.00 frames. ], tot_loss[loss=4.358, NarTop10Accuracy=0.4403, over 6015.97 frames. ], batch size: 48, lr: 2.15e-02 +2024-08-06 07:19:53,071 INFO [trainer.py:765] (6/8) Epoch 3, batch 2100, train_loss[loss=4.045, NarTop10Accuracy=0.502, over 3946.00 frames. ], tot_loss[loss=4.327, NarTop10Accuracy=0.4466, over 5991.67 frames. ], batch size: 4, lr: 2.14e-02 +2024-08-06 07:20:18,554 INFO [trainer.py:765] (6/8) Epoch 3, batch 2200, train_loss[loss=4.644, NarTop10Accuracy=0.3852, over 7119.00 frames. ], tot_loss[loss=4.32, NarTop10Accuracy=0.4479, over 6049.69 frames. ], batch size: 30, lr: 2.13e-02 +2024-08-06 07:20:44,051 INFO [trainer.py:765] (6/8) Epoch 3, batch 2300, train_loss[loss=4.067, NarTop10Accuracy=0.4983, over 5756.00 frames. ], tot_loss[loss=4.322, NarTop10Accuracy=0.4476, over 6065.66 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 07:21:08,678 INFO [trainer.py:765] (6/8) Epoch 3, batch 2400, train_loss[loss=4.038, NarTop10Accuracy=0.4993, over 5224.00 frames. ], tot_loss[loss=4.311, NarTop10Accuracy=0.4496, over 5866.12 frames. ], batch size: 7, lr: 2.11e-02 +2024-08-06 07:21:32,172 INFO [trainer.py:765] (6/8) Epoch 3, batch 2500, train_loss[loss=4.251, NarTop10Accuracy=0.4501, over 5014.00 frames. ], tot_loss[loss=4.259, NarTop10Accuracy=0.4599, over 5531.38 frames. ], batch size: 6, lr: 2.10e-02 +2024-08-06 07:21:53,585 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 07:23:00,977 INFO [trainer.py:765] (6/8) Epoch 4, batch 100, train_loss[loss=4.156, NarTop10Accuracy=0.4878, over 7061.00 frames. ], tot_loss[loss=4.184, NarTop10Accuracy=0.4772, over 2375.17 frames. ], batch size: 30, lr: 1.97e-02 +2024-08-06 07:23:33,304 INFO [trainer.py:765] (6/8) Epoch 4, batch 200, train_loss[loss=4.06, NarTop10Accuracy=0.5046, over 6850.00 frames. ], tot_loss[loss=4.183, NarTop10Accuracy=0.4767, over 3875.41 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 07:23:51,466 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 07:24:01,516 INFO [trainer.py:811] (6/8) Epoch 4, validation: loss=4.035, NarTop10Accuracy=0.5085, over 1907754.00 frames. +2024-08-06 07:24:01,517 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27460MB +2024-08-06 07:24:02,097 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 9.910e+01 1.530e+02 1.750e+02 2.064e+02 5.317e+02, threshold=3.500e+02, percent-clipped=3.3 +2024-08-06 07:24:14,362 INFO [trainer.py:765] (6/8) Epoch 4, batch 300, train_loss[loss=3.815, NarTop10Accuracy=0.5499, over 7173.00 frames. ], tot_loss[loss=4.167, NarTop10Accuracy=0.4793, over 4685.01 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 07:24:53,596 INFO [trainer.py:765] (6/8) Epoch 4, batch 400, train_loss[loss=4.28, NarTop10Accuracy=0.4623, over 5302.00 frames. ], tot_loss[loss=4.18, NarTop10Accuracy=0.4767, over 5128.38 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 07:25:25,295 INFO [trainer.py:765] (6/8) Epoch 4, batch 500, train_loss[loss=4.289, NarTop10Accuracy=0.4585, over 6351.00 frames. ], tot_loss[loss=4.154, NarTop10Accuracy=0.4817, over 5408.34 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 07:25:56,975 INFO [trainer.py:765] (6/8) Epoch 4, batch 600, train_loss[loss=4.267, NarTop10Accuracy=0.4624, over 5857.00 frames. ], tot_loss[loss=4.145, NarTop10Accuracy=0.4839, over 5689.40 frames. ], batch size: 9, lr: 1.92e-02 +2024-08-06 07:26:37,607 INFO [trainer.py:765] (6/8) Epoch 4, batch 700, train_loss[loss=4.089, NarTop10Accuracy=0.4794, over 5067.00 frames. ], tot_loss[loss=4.154, NarTop10Accuracy=0.4822, over 5761.14 frames. ], batch size: 6, lr: 1.92e-02 +2024-08-06 07:27:07,434 INFO [trainer.py:765] (6/8) Epoch 4, batch 800, train_loss[loss=4.066, NarTop10Accuracy=0.5015, over 4929.00 frames. ], tot_loss[loss=4.148, NarTop10Accuracy=0.4829, over 5814.48 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 07:27:42,043 INFO [trainer.py:765] (6/8) Epoch 4, batch 900, train_loss[loss=4.243, NarTop10Accuracy=0.4666, over 6729.00 frames. ], tot_loss[loss=4.118, NarTop10Accuracy=0.4889, over 5813.75 frames. ], batch size: 14, lr: 1.90e-02 +2024-08-06 07:28:20,671 INFO [trainer.py:765] (6/8) Epoch 4, batch 1000, train_loss[loss=3.87, NarTop10Accuracy=0.5458, over 6286.00 frames. ], tot_loss[loss=4.112, NarTop10Accuracy=0.4902, over 5921.12 frames. ], batch size: 13, lr: 1.89e-02 +2024-08-06 07:28:54,072 INFO [trainer.py:765] (6/8) Epoch 4, batch 1100, train_loss[loss=3.866, NarTop10Accuracy=0.5511, over 6779.00 frames. ], tot_loss[loss=4.117, NarTop10Accuracy=0.4892, over 5952.22 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 07:29:29,599 INFO [trainer.py:765] (6/8) Epoch 4, batch 1200, train_loss[loss=4.204, NarTop10Accuracy=0.4748, over 7308.00 frames. ], tot_loss[loss=4.099, NarTop10Accuracy=0.4927, over 5958.55 frames. ], batch size: 31, lr: 1.87e-02 +2024-08-06 07:30:04,992 INFO [trainer.py:765] (6/8) Epoch 4, batch 1300, train_loss[loss=3.899, NarTop10Accuracy=0.5272, over 4958.00 frames. ], tot_loss[loss=4.067, NarTop10Accuracy=0.4988, over 6026.35 frames. ], batch size: 6, lr: 1.87e-02 +2024-08-06 07:30:43,380 INFO [trainer.py:765] (6/8) Epoch 4, batch 1400, train_loss[loss=4.007, NarTop10Accuracy=0.5112, over 6129.00 frames. ], tot_loss[loss=4.073, NarTop10Accuracy=0.4981, over 6037.72 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 07:31:11,832 INFO [trainer.py:765] (6/8) Epoch 4, batch 1500, train_loss[loss=4.04, NarTop10Accuracy=0.5077, over 6024.00 frames. ], tot_loss[loss=4.07, NarTop10Accuracy=0.4989, over 5973.90 frames. ], batch size: 48, lr: 1.85e-02 +2024-08-06 07:31:39,961 INFO [trainer.py:765] (6/8) Epoch 4, batch 1600, train_loss[loss=4.139, NarTop10Accuracy=0.4789, over 7079.00 frames. ], tot_loss[loss=4.072, NarTop10Accuracy=0.4981, over 5954.79 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 07:32:06,854 INFO [trainer.py:765] (6/8) Epoch 4, batch 1700, train_loss[loss=4.319, NarTop10Accuracy=0.4491, over 6244.00 frames. ], tot_loss[loss=4.052, NarTop10Accuracy=0.5024, over 5927.48 frames. ], batch size: 13, lr: 1.84e-02 +2024-08-06 07:32:33,483 INFO [trainer.py:765] (6/8) Epoch 4, batch 1800, train_loss[loss=4.327, NarTop10Accuracy=0.4502, over 7008.00 frames. ], tot_loss[loss=4.043, NarTop10Accuracy=0.5042, over 5994.11 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 07:33:00,194 INFO [trainer.py:765] (6/8) Epoch 4, batch 1900, train_loss[loss=4.322, NarTop10Accuracy=0.4506, over 6107.00 frames. ], tot_loss[loss=4.06, NarTop10Accuracy=0.5008, over 6038.56 frames. ], batch size: 48, lr: 1.82e-02 +2024-08-06 07:33:25,990 INFO [trainer.py:765] (6/8) Epoch 4, batch 2000, train_loss[loss=4.396, NarTop10Accuracy=0.4381, over 5803.00 frames. ], tot_loss[loss=4.046, NarTop10Accuracy=0.5039, over 6021.34 frames. ], batch size: 49, lr: 1.81e-02 +2024-08-06 07:33:51,513 INFO [trainer.py:765] (6/8) Epoch 4, batch 2100, train_loss[loss=3.852, NarTop10Accuracy=0.5624, over 4131.00 frames. ], tot_loss[loss=4.034, NarTop10Accuracy=0.5065, over 6001.86 frames. ], batch size: 4, lr: 1.81e-02 +2024-08-06 07:34:16,906 INFO [trainer.py:765] (6/8) Epoch 4, batch 2200, train_loss[loss=4.212, NarTop10Accuracy=0.4702, over 7069.00 frames. ], tot_loss[loss=4.033, NarTop10Accuracy=0.5063, over 6043.41 frames. ], batch size: 30, lr: 1.80e-02 +2024-08-06 07:34:31,431 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 07:34:41,462 INFO [trainer.py:811] (6/8) Epoch 4, validation: loss=3.858, NarTop10Accuracy=0.5445, over 1907754.00 frames. +2024-08-06 07:34:41,463 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27460MB +2024-08-06 07:34:41,980 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.721e+02 1.919e+02 2.225e+02 9.682e+02, threshold=3.839e+02, percent-clipped=2.3 +2024-08-06 07:34:52,442 INFO [trainer.py:765] (6/8) Epoch 4, batch 2300, train_loss[loss=3.565, NarTop10Accuracy=0.5838, over 5766.00 frames. ], tot_loss[loss=4.025, NarTop10Accuracy=0.5081, over 6068.75 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 07:35:17,166 INFO [trainer.py:765] (6/8) Epoch 4, batch 2400, train_loss[loss=3.952, NarTop10Accuracy=0.5339, over 5231.00 frames. ], tot_loss[loss=4.017, NarTop10Accuracy=0.5095, over 5889.08 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 07:35:40,622 INFO [trainer.py:765] (6/8) Epoch 4, batch 2500, train_loss[loss=3.966, NarTop10Accuracy=0.522, over 5032.00 frames. ], tot_loss[loss=4, NarTop10Accuracy=0.5127, over 5545.51 frames. ], batch size: 6, lr: 1.78e-02 +2024-08-06 07:36:01,717 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 07:37:02,524 INFO [trainer.py:765] (6/8) Epoch 5, batch 100, train_loss[loss=3.967, NarTop10Accuracy=0.5225, over 7612.00 frames. ], tot_loss[loss=3.966, NarTop10Accuracy=0.522, over 2377.53 frames. ], batch size: 31, lr: 1.66e-02 +2024-08-06 07:37:39,815 INFO [trainer.py:765] (6/8) Epoch 5, batch 200, train_loss[loss=3.995, NarTop10Accuracy=0.5176, over 6768.00 frames. ], tot_loss[loss=3.94, NarTop10Accuracy=0.5267, over 3876.33 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 07:38:13,472 INFO [trainer.py:765] (6/8) Epoch 5, batch 300, train_loss[loss=4.305, NarTop10Accuracy=0.4587, over 7169.00 frames. ], tot_loss[loss=3.932, NarTop10Accuracy=0.5282, over 4678.45 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 07:38:42,429 INFO [trainer.py:765] (6/8) Epoch 5, batch 400, train_loss[loss=3.744, NarTop10Accuracy=0.563, over 5049.00 frames. ], tot_loss[loss=3.929, NarTop10Accuracy=0.5284, over 5131.63 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 07:39:17,020 INFO [trainer.py:765] (6/8) Epoch 5, batch 500, train_loss[loss=4.079, NarTop10Accuracy=0.5008, over 6121.00 frames. ], tot_loss[loss=3.925, NarTop10Accuracy=0.5291, over 5408.74 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 07:39:51,943 INFO [trainer.py:765] (6/8) Epoch 5, batch 600, train_loss[loss=4.009, NarTop10Accuracy=0.5086, over 5891.00 frames. ], tot_loss[loss=3.911, NarTop10Accuracy=0.5323, over 5680.09 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 07:40:28,627 INFO [trainer.py:765] (6/8) Epoch 5, batch 700, train_loss[loss=3.903, NarTop10Accuracy=0.5384, over 5013.00 frames. ], tot_loss[loss=3.908, NarTop10Accuracy=0.5328, over 5731.01 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:02,366 INFO [trainer.py:765] (6/8) Epoch 5, batch 800, train_loss[loss=4.247, NarTop10Accuracy=0.4643, over 5106.00 frames. ], tot_loss[loss=3.916, NarTop10Accuracy=0.5312, over 5786.31 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:37,937 INFO [trainer.py:765] (6/8) Epoch 5, batch 900, train_loss[loss=4.203, NarTop10Accuracy=0.4717, over 6832.00 frames. ], tot_loss[loss=3.909, NarTop10Accuracy=0.5325, over 5816.19 frames. ], batch size: 14, lr: 1.61e-02 +2024-08-06 07:42:13,846 INFO [trainer.py:765] (6/8) Epoch 5, batch 1000, train_loss[loss=4.051, NarTop10Accuracy=0.5027, over 6680.00 frames. ], tot_loss[loss=3.891, NarTop10Accuracy=0.5364, over 5937.48 frames. ], batch size: 14, lr: 1.60e-02 +2024-08-06 07:42:46,468 INFO [trainer.py:765] (6/8) Epoch 5, batch 1100, train_loss[loss=3.864, NarTop10Accuracy=0.5336, over 6903.00 frames. ], tot_loss[loss=3.899, NarTop10Accuracy=0.535, over 5969.53 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 07:43:25,226 INFO [trainer.py:765] (6/8) Epoch 5, batch 1200, train_loss[loss=4.013, NarTop10Accuracy=0.5136, over 7299.00 frames. ], tot_loss[loss=3.905, NarTop10Accuracy=0.5338, over 5949.04 frames. ], batch size: 30, lr: 1.59e-02 +2024-08-06 07:44:00,557 INFO [trainer.py:765] (6/8) Epoch 5, batch 1300, train_loss[loss=4.024, NarTop10Accuracy=0.4942, over 4897.00 frames. ], tot_loss[loss=3.906, NarTop10Accuracy=0.5326, over 6011.50 frames. ], batch size: 6, lr: 1.59e-02 +2024-08-06 07:44:30,238 INFO [trainer.py:765] (6/8) Epoch 5, batch 1400, train_loss[loss=3.894, NarTop10Accuracy=0.5315, over 6358.00 frames. ], tot_loss[loss=3.899, NarTop10Accuracy=0.5341, over 6025.96 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 07:45:02,845 INFO [trainer.py:765] (6/8) Epoch 5, batch 1500, train_loss[loss=3.979, NarTop10Accuracy=0.5179, over 6227.00 frames. ], tot_loss[loss=3.907, NarTop10Accuracy=0.532, over 5950.68 frames. ], batch size: 49, lr: 1.57e-02 +2024-08-06 07:45:31,008 INFO [trainer.py:765] (6/8) Epoch 5, batch 1600, train_loss[loss=4.112, NarTop10Accuracy=0.4966, over 7182.00 frames. ], tot_loss[loss=3.909, NarTop10Accuracy=0.5325, over 5945.35 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 07:45:51,058 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 07:46:01,621 INFO [trainer.py:811] (6/8) Epoch 5, validation: loss=3.749, NarTop10Accuracy=0.5672, over 1907754.00 frames. +2024-08-06 07:46:01,622 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27460MB +2024-08-06 07:46:02,122 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.669e+02 1.884e+02 2.190e+02 6.243e+02, threshold=3.768e+02, percent-clipped=1.8 +2024-08-06 07:46:08,362 INFO [trainer.py:765] (6/8) Epoch 5, batch 1700, train_loss[loss=3.92, NarTop10Accuracy=0.5387, over 6274.00 frames. ], tot_loss[loss=3.905, NarTop10Accuracy=0.5333, over 5927.64 frames. ], batch size: 13, lr: 1.56e-02 +2024-08-06 07:46:34,967 INFO [trainer.py:765] (6/8) Epoch 5, batch 1800, train_loss[loss=3.858, NarTop10Accuracy=0.5367, over 7237.00 frames. ], tot_loss[loss=3.896, NarTop10Accuracy=0.5349, over 5991.70 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 07:47:01,489 INFO [trainer.py:765] (6/8) Epoch 5, batch 1900, train_loss[loss=3.75, NarTop10Accuracy=0.5632, over 6108.00 frames. ], tot_loss[loss=3.903, NarTop10Accuracy=0.5336, over 6039.61 frames. ], batch size: 50, lr: 1.55e-02 +2024-08-06 07:47:27,147 INFO [trainer.py:765] (6/8) Epoch 5, batch 2000, train_loss[loss=3.895, NarTop10Accuracy=0.5372, over 6236.00 frames. ], tot_loss[loss=3.896, NarTop10Accuracy=0.5347, over 6005.98 frames. ], batch size: 49, lr: 1.55e-02 +2024-08-06 07:47:52,618 INFO [trainer.py:765] (6/8) Epoch 5, batch 2100, train_loss[loss=3.884, NarTop10Accuracy=0.5492, over 4923.00 frames. ], tot_loss[loss=3.904, NarTop10Accuracy=0.533, over 5996.19 frames. ], batch size: 5, lr: 1.54e-02 +2024-08-06 07:48:17,993 INFO [trainer.py:765] (6/8) Epoch 5, batch 2200, train_loss[loss=3.919, NarTop10Accuracy=0.5358, over 7092.00 frames. ], tot_loss[loss=3.889, NarTop10Accuracy=0.5369, over 6034.64 frames. ], batch size: 30, lr: 1.54e-02 +2024-08-06 07:48:43,421 INFO [trainer.py:765] (6/8) Epoch 5, batch 2300, train_loss[loss=4.136, NarTop10Accuracy=0.4928, over 5832.00 frames. ], tot_loss[loss=3.9, NarTop10Accuracy=0.5344, over 6053.56 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 07:49:08,169 INFO [trainer.py:765] (6/8) Epoch 5, batch 2400, train_loss[loss=3.464, NarTop10Accuracy=0.6106, over 5817.00 frames. ], tot_loss[loss=3.894, NarTop10Accuracy=0.5358, over 5875.89 frames. ], batch size: 8, lr: 1.53e-02 +2024-08-06 07:49:31,645 INFO [trainer.py:765] (6/8) Epoch 5, batch 2500, train_loss[loss=3.523, NarTop10Accuracy=0.6129, over 5137.00 frames. ], tot_loss[loss=3.864, NarTop10Accuracy=0.5416, over 5542.31 frames. ], batch size: 6, lr: 1.52e-02 +2024-08-06 07:49:53,672 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 07:50:58,969 INFO [trainer.py:765] (6/8) Epoch 6, batch 100, train_loss[loss=3.571, NarTop10Accuracy=0.6119, over 7182.00 frames. ], tot_loss[loss=3.782, NarTop10Accuracy=0.5591, over 2389.31 frames. ], batch size: 30, lr: 1.42e-02 +2024-08-06 07:51:31,788 INFO [trainer.py:765] (6/8) Epoch 6, batch 200, train_loss[loss=3.624, NarTop10Accuracy=0.6065, over 6965.00 frames. ], tot_loss[loss=3.791, NarTop10Accuracy=0.5577, over 3881.04 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 07:52:04,696 INFO [trainer.py:765] (6/8) Epoch 6, batch 300, train_loss[loss=3.71, NarTop10Accuracy=0.5744, over 7239.00 frames. ], tot_loss[loss=3.789, NarTop10Accuracy=0.5588, over 4699.18 frames. ], batch size: 22, lr: 1.41e-02 +2024-08-06 07:52:36,200 INFO [trainer.py:765] (6/8) Epoch 6, batch 400, train_loss[loss=3.957, NarTop10Accuracy=0.5269, over 5105.00 frames. ], tot_loss[loss=3.786, NarTop10Accuracy=0.5585, over 5141.18 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 07:53:06,102 INFO [trainer.py:765] (6/8) Epoch 6, batch 500, train_loss[loss=3.985, NarTop10Accuracy=0.5226, over 6145.00 frames. ], tot_loss[loss=3.787, NarTop10Accuracy=0.5584, over 5403.75 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 07:53:43,285 INFO [trainer.py:765] (6/8) Epoch 6, batch 600, train_loss[loss=3.845, NarTop10Accuracy=0.5492, over 5894.00 frames. ], tot_loss[loss=3.786, NarTop10Accuracy=0.5585, over 5675.19 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 07:54:15,438 INFO [trainer.py:765] (6/8) Epoch 6, batch 700, train_loss[loss=3.441, NarTop10Accuracy=0.6264, over 5100.00 frames. ], tot_loss[loss=3.785, NarTop10Accuracy=0.5584, over 5749.60 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:54:49,526 INFO [trainer.py:765] (6/8) Epoch 6, batch 800, train_loss[loss=3.486, NarTop10Accuracy=0.6156, over 5031.00 frames. ], tot_loss[loss=3.796, NarTop10Accuracy=0.5562, over 5786.46 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:55:21,984 INFO [trainer.py:765] (6/8) Epoch 6, batch 900, train_loss[loss=3.492, NarTop10Accuracy=0.6091, over 6310.00 frames. ], tot_loss[loss=3.791, NarTop10Accuracy=0.5571, over 5824.61 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 07:56:00,804 INFO [trainer.py:765] (6/8) Epoch 6, batch 1000, train_loss[loss=3.552, NarTop10Accuracy=0.6057, over 6271.00 frames. ], tot_loss[loss=3.802, NarTop10Accuracy=0.5545, over 5921.77 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 07:56:34,170 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 07:56:44,742 INFO [trainer.py:811] (6/8) Epoch 6, validation: loss=3.634, NarTop10Accuracy=0.5919, over 1907754.00 frames. +2024-08-06 07:56:44,743 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27460MB +2024-08-06 07:56:45,277 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.300e+02 1.714e+02 1.918e+02 2.211e+02 6.360e+02, threshold=3.836e+02, percent-clipped=1.6 +2024-08-06 07:56:46,639 INFO [trainer.py:765] (6/8) Epoch 6, batch 1100, train_loss[loss=3.71, NarTop10Accuracy=0.5706, over 6781.00 frames. ], tot_loss[loss=3.795, NarTop10Accuracy=0.556, over 5956.53 frames. ], batch size: 17, lr: 1.37e-02 +2024-08-06 07:57:24,888 INFO [trainer.py:765] (6/8) Epoch 6, batch 1200, train_loss[loss=3.899, NarTop10Accuracy=0.5408, over 6930.00 frames. ], tot_loss[loss=3.787, NarTop10Accuracy=0.5576, over 5958.58 frames. ], batch size: 30, lr: 1.37e-02 +2024-08-06 07:57:56,612 INFO [trainer.py:765] (6/8) Epoch 6, batch 1300, train_loss[loss=3.898, NarTop10Accuracy=0.5452, over 5198.00 frames. ], tot_loss[loss=3.791, NarTop10Accuracy=0.5564, over 6035.92 frames. ], batch size: 6, lr: 1.37e-02 +2024-08-06 07:58:30,736 INFO [trainer.py:765] (6/8) Epoch 6, batch 1400, train_loss[loss=4.134, NarTop10Accuracy=0.4853, over 6192.00 frames. ], tot_loss[loss=3.797, NarTop10Accuracy=0.555, over 6059.40 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 07:59:00,999 INFO [trainer.py:765] (6/8) Epoch 6, batch 1500, train_loss[loss=4.028, NarTop10Accuracy=0.5096, over 6143.00 frames. ], tot_loss[loss=3.798, NarTop10Accuracy=0.555, over 5982.95 frames. ], batch size: 49, lr: 1.36e-02 +2024-08-06 07:59:28,933 INFO [trainer.py:765] (6/8) Epoch 6, batch 1600, train_loss[loss=3.609, NarTop10Accuracy=0.5943, over 6991.00 frames. ], tot_loss[loss=3.789, NarTop10Accuracy=0.5573, over 5952.62 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 07:59:55,618 INFO [trainer.py:765] (6/8) Epoch 6, batch 1700, train_loss[loss=3.63, NarTop10Accuracy=0.5827, over 6768.00 frames. ], tot_loss[loss=3.785, NarTop10Accuracy=0.5583, over 5944.31 frames. ], batch size: 14, lr: 1.35e-02 +2024-08-06 08:00:22,187 INFO [trainer.py:765] (6/8) Epoch 6, batch 1800, train_loss[loss=3.606, NarTop10Accuracy=0.5852, over 6907.00 frames. ], tot_loss[loss=3.78, NarTop10Accuracy=0.5592, over 6008.04 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 08:00:48,795 INFO [trainer.py:765] (6/8) Epoch 6, batch 1900, train_loss[loss=4.039, NarTop10Accuracy=0.5037, over 6233.00 frames. ], tot_loss[loss=3.813, NarTop10Accuracy=0.5524, over 6048.59 frames. ], batch size: 49, lr: 1.34e-02 +2024-08-06 08:01:14,461 INFO [trainer.py:765] (6/8) Epoch 6, batch 2000, train_loss[loss=4.11, NarTop10Accuracy=0.4914, over 5909.00 frames. ], tot_loss[loss=3.797, NarTop10Accuracy=0.5557, over 6019.67 frames. ], batch size: 50, lr: 1.34e-02 +2024-08-06 08:01:43,133 INFO [trainer.py:765] (6/8) Epoch 6, batch 2100, train_loss[loss=3.602, NarTop10Accuracy=0.5841, over 3795.00 frames. ], tot_loss[loss=3.79, NarTop10Accuracy=0.5566, over 5995.99 frames. ], batch size: 4, lr: 1.33e-02 +2024-08-06 08:02:08,518 INFO [trainer.py:765] (6/8) Epoch 6, batch 2200, train_loss[loss=3.6, NarTop10Accuracy=0.5894, over 7540.00 frames. ], tot_loss[loss=3.794, NarTop10Accuracy=0.556, over 6040.92 frames. ], batch size: 32, lr: 1.33e-02 +2024-08-06 08:02:33,916 INFO [trainer.py:765] (6/8) Epoch 6, batch 2300, train_loss[loss=3.582, NarTop10Accuracy=0.5905, over 5638.00 frames. ], tot_loss[loss=3.8, NarTop10Accuracy=0.5548, over 6066.28 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 08:02:58,616 INFO [trainer.py:765] (6/8) Epoch 6, batch 2400, train_loss[loss=3.726, NarTop10Accuracy=0.5734, over 5044.00 frames. ], tot_loss[loss=3.793, NarTop10Accuracy=0.5564, over 5868.01 frames. ], batch size: 7, lr: 1.32e-02 +2024-08-06 08:03:21,939 INFO [trainer.py:765] (6/8) Epoch 6, batch 2500, train_loss[loss=3.834, NarTop10Accuracy=0.5466, over 5080.00 frames. ], tot_loss[loss=3.762, NarTop10Accuracy=0.5619, over 5526.47 frames. ], batch size: 6, lr: 1.32e-02 +2024-08-06 08:03:42,894 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 08:04:42,818 INFO [trainer.py:765] (6/8) Epoch 7, batch 100, train_loss[loss=3.785, NarTop10Accuracy=0.5584, over 7383.00 frames. ], tot_loss[loss=3.682, NarTop10Accuracy=0.5802, over 2376.56 frames. ], batch size: 31, lr: 1.23e-02 +2024-08-06 08:05:18,347 INFO [trainer.py:765] (6/8) Epoch 7, batch 200, train_loss[loss=3.864, NarTop10Accuracy=0.5475, over 6544.00 frames. ], tot_loss[loss=3.709, NarTop10Accuracy=0.5752, over 3865.60 frames. ], batch size: 16, lr: 1.23e-02 +2024-08-06 08:05:46,773 INFO [trainer.py:765] (6/8) Epoch 7, batch 300, train_loss[loss=3.536, NarTop10Accuracy=0.6204, over 7123.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.5725, over 4665.53 frames. ], batch size: 22, lr: 1.23e-02 +2024-08-06 08:06:22,091 INFO [trainer.py:765] (6/8) Epoch 7, batch 400, train_loss[loss=3.867, NarTop10Accuracy=0.5399, over 5053.00 frames. ], tot_loss[loss=3.719, NarTop10Accuracy=0.5728, over 5108.42 frames. ], batch size: 7, lr: 1.22e-02 +2024-08-06 08:06:52,315 INFO [trainer.py:765] (6/8) Epoch 7, batch 500, train_loss[loss=3.668, NarTop10Accuracy=0.5751, over 6327.00 frames. ], tot_loss[loss=3.711, NarTop10Accuracy=0.5741, over 5401.10 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 08:06:56,086 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 08:07:06,251 INFO [trainer.py:811] (6/8) Epoch 7, validation: loss=3.56, NarTop10Accuracy=0.6069, over 1907754.00 frames. +2024-08-06 08:07:06,252 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27460MB +2024-08-06 08:07:06,837 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 1.760e+02 1.958e+02 2.227e+02 5.399e+02, threshold=3.916e+02, percent-clipped=0.8 +2024-08-06 08:07:33,151 INFO [trainer.py:765] (6/8) Epoch 7, batch 600, train_loss[loss=3.564, NarTop10Accuracy=0.6111, over 5843.00 frames. ], tot_loss[loss=3.713, NarTop10Accuracy=0.5738, over 5684.38 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 08:08:11,333 INFO [trainer.py:765] (6/8) Epoch 7, batch 700, train_loss[loss=3.44, NarTop10Accuracy=0.6154, over 5034.00 frames. ], tot_loss[loss=3.716, NarTop10Accuracy=0.5727, over 5740.79 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 08:08:45,557 INFO [trainer.py:765] (6/8) Epoch 7, batch 800, train_loss[loss=3.617, NarTop10Accuracy=0.5969, over 5034.00 frames. ], tot_loss[loss=3.706, NarTop10Accuracy=0.5748, over 5793.38 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 08:09:17,739 INFO [trainer.py:765] (6/8) Epoch 7, batch 900, train_loss[loss=3.715, NarTop10Accuracy=0.5674, over 6155.00 frames. ], tot_loss[loss=3.712, NarTop10Accuracy=0.5739, over 5819.93 frames. ], batch size: 13, lr: 1.21e-02 +2024-08-06 08:09:54,192 INFO [trainer.py:765] (6/8) Epoch 7, batch 1000, train_loss[loss=3.711, NarTop10Accuracy=0.5744, over 6578.00 frames. ], tot_loss[loss=3.712, NarTop10Accuracy=0.5733, over 5928.51 frames. ], batch size: 14, lr: 1.20e-02 +2024-08-06 08:10:29,571 INFO [trainer.py:765] (6/8) Epoch 7, batch 1100, train_loss[loss=3.665, NarTop10Accuracy=0.5745, over 6911.00 frames. ], tot_loss[loss=3.712, NarTop10Accuracy=0.5734, over 5963.17 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 08:11:02,491 INFO [trainer.py:765] (6/8) Epoch 7, batch 1200, train_loss[loss=3.813, NarTop10Accuracy=0.5554, over 7053.00 frames. ], tot_loss[loss=3.711, NarTop10Accuracy=0.573, over 5947.44 frames. ], batch size: 30, lr: 1.20e-02 +2024-08-06 08:11:33,447 INFO [trainer.py:765] (6/8) Epoch 7, batch 1300, train_loss[loss=3.69, NarTop10Accuracy=0.5789, over 5221.00 frames. ], tot_loss[loss=3.719, NarTop10Accuracy=0.5712, over 6010.49 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 08:12:10,912 INFO [trainer.py:765] (6/8) Epoch 7, batch 1400, train_loss[loss=3.712, NarTop10Accuracy=0.5757, over 6291.00 frames. ], tot_loss[loss=3.723, NarTop10Accuracy=0.5709, over 6039.01 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 08:12:42,109 INFO [trainer.py:765] (6/8) Epoch 7, batch 1500, train_loss[loss=3.821, NarTop10Accuracy=0.5519, over 6169.00 frames. ], tot_loss[loss=3.728, NarTop10Accuracy=0.5703, over 5969.90 frames. ], batch size: 49, lr: 1.19e-02 +2024-08-06 08:13:13,238 INFO [trainer.py:765] (6/8) Epoch 7, batch 1600, train_loss[loss=3.634, NarTop10Accuracy=0.584, over 7314.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.5716, over 5957.39 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:13:40,016 INFO [trainer.py:765] (6/8) Epoch 7, batch 1700, train_loss[loss=3.664, NarTop10Accuracy=0.5866, over 6171.00 frames. ], tot_loss[loss=3.734, NarTop10Accuracy=0.5691, over 5941.75 frames. ], batch size: 13, lr: 1.18e-02 +2024-08-06 08:14:06,584 INFO [trainer.py:765] (6/8) Epoch 7, batch 1800, train_loss[loss=3.756, NarTop10Accuracy=0.5732, over 7132.00 frames. ], tot_loss[loss=3.735, NarTop10Accuracy=0.5686, over 5990.94 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:14:33,223 INFO [trainer.py:765] (6/8) Epoch 7, batch 1900, train_loss[loss=4.018, NarTop10Accuracy=0.5125, over 6439.00 frames. ], tot_loss[loss=3.735, NarTop10Accuracy=0.5687, over 6023.63 frames. ], batch size: 48, lr: 1.17e-02 +2024-08-06 08:14:58,995 INFO [trainer.py:765] (6/8) Epoch 7, batch 2000, train_loss[loss=3.869, NarTop10Accuracy=0.5412, over 6121.00 frames. ], tot_loss[loss=3.729, NarTop10Accuracy=0.5703, over 5999.88 frames. ], batch size: 49, lr: 1.17e-02 +2024-08-06 08:15:24,423 INFO [trainer.py:765] (6/8) Epoch 7, batch 2100, train_loss[loss=3.724, NarTop10Accuracy=0.5497, over 4765.00 frames. ], tot_loss[loss=3.735, NarTop10Accuracy=0.5686, over 5997.52 frames. ], batch size: 5, lr: 1.17e-02 +2024-08-06 08:15:49,960 INFO [trainer.py:765] (6/8) Epoch 7, batch 2200, train_loss[loss=3.828, NarTop10Accuracy=0.5415, over 7349.00 frames. ], tot_loss[loss=3.736, NarTop10Accuracy=0.5687, over 6039.75 frames. ], batch size: 31, lr: 1.17e-02 +2024-08-06 08:16:15,490 INFO [trainer.py:765] (6/8) Epoch 7, batch 2300, train_loss[loss=3.883, NarTop10Accuracy=0.5378, over 5900.00 frames. ], tot_loss[loss=3.74, NarTop10Accuracy=0.5682, over 6067.16 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 08:16:40,319 INFO [trainer.py:765] (6/8) Epoch 7, batch 2400, train_loss[loss=3.757, NarTop10Accuracy=0.558, over 5181.00 frames. ], tot_loss[loss=3.741, NarTop10Accuracy=0.5675, over 5868.30 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 08:17:03,739 INFO [trainer.py:765] (6/8) Epoch 7, batch 2500, train_loss[loss=3.721, NarTop10Accuracy=0.5764, over 5099.00 frames. ], tot_loss[loss=3.721, NarTop10Accuracy=0.5713, over 5522.04 frames. ], batch size: 6, lr: 1.16e-02 +2024-08-06 08:17:06,844 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 08:17:17,433 INFO [trainer.py:811] (6/8) Epoch 7, validation: loss=3.591, NarTop10Accuracy=0.6002, over 1907754.00 frames. +2024-08-06 08:17:17,433 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27460MB +2024-08-06 08:17:17,902 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.794e+02 1.981e+02 2.246e+02 4.644e+02, threshold=3.962e+02, percent-clipped=1.0 +2024-08-06 08:17:35,216 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 08:18:36,192 INFO [trainer.py:765] (6/8) Epoch 8, batch 100, train_loss[loss=3.76, NarTop10Accuracy=0.5639, over 7166.00 frames. ], tot_loss[loss=3.662, NarTop10Accuracy=0.5836, over 2382.59 frames. ], batch size: 30, lr: 1.09e-02 +2024-08-06 08:19:15,018 INFO [trainer.py:765] (6/8) Epoch 8, batch 200, train_loss[loss=3.401, NarTop10Accuracy=0.6272, over 6797.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.5838, over 3883.91 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 08:19:43,560 INFO [trainer.py:765] (6/8) Epoch 8, batch 300, train_loss[loss=3.583, NarTop10Accuracy=0.5992, over 7543.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.5845, over 4690.24 frames. ], batch size: 23, lr: 1.08e-02 +2024-08-06 08:20:16,267 INFO [trainer.py:765] (6/8) Epoch 8, batch 400, train_loss[loss=3.664, NarTop10Accuracy=0.5805, over 5091.00 frames. ], tot_loss[loss=3.651, NarTop10Accuracy=0.5864, over 5126.84 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 08:20:48,420 INFO [trainer.py:765] (6/8) Epoch 8, batch 500, train_loss[loss=3.655, NarTop10Accuracy=0.5836, over 6198.00 frames. ], tot_loss[loss=3.646, NarTop10Accuracy=0.5878, over 5403.01 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 08:21:23,736 INFO [trainer.py:765] (6/8) Epoch 8, batch 600, train_loss[loss=3.738, NarTop10Accuracy=0.5722, over 6196.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5832, over 5656.86 frames. ], batch size: 10, lr: 1.07e-02 +2024-08-06 08:21:57,606 INFO [trainer.py:765] (6/8) Epoch 8, batch 700, train_loss[loss=3.669, NarTop10Accuracy=0.58, over 5017.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.5834, over 5724.46 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:22:27,340 INFO [trainer.py:765] (6/8) Epoch 8, batch 800, train_loss[loss=3.608, NarTop10Accuracy=0.6081, over 4999.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5821, over 5790.81 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:23:06,891 INFO [trainer.py:765] (6/8) Epoch 8, batch 900, train_loss[loss=3.565, NarTop10Accuracy=0.6062, over 6245.00 frames. ], tot_loss[loss=3.652, NarTop10Accuracy=0.5852, over 5814.49 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 08:23:42,942 INFO [trainer.py:765] (6/8) Epoch 8, batch 1000, train_loss[loss=3.427, NarTop10Accuracy=0.6383, over 6719.00 frames. ], tot_loss[loss=3.652, NarTop10Accuracy=0.5857, over 5913.99 frames. ], batch size: 14, lr: 1.06e-02 +2024-08-06 08:24:15,104 INFO [trainer.py:765] (6/8) Epoch 8, batch 1100, train_loss[loss=3.577, NarTop10Accuracy=0.5961, over 6948.00 frames. ], tot_loss[loss=3.663, NarTop10Accuracy=0.5836, over 5959.56 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 08:24:57,338 INFO [trainer.py:765] (6/8) Epoch 8, batch 1200, train_loss[loss=3.629, NarTop10Accuracy=0.5934, over 7196.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5819, over 5950.86 frames. ], batch size: 30, lr: 1.06e-02 +2024-08-06 08:25:26,603 INFO [trainer.py:765] (6/8) Epoch 8, batch 1300, train_loss[loss=3.68, NarTop10Accuracy=0.5925, over 5004.00 frames. ], tot_loss[loss=3.656, NarTop10Accuracy=0.5841, over 6020.02 frames. ], batch size: 6, lr: 1.06e-02 +2024-08-06 08:26:00,604 INFO [trainer.py:765] (6/8) Epoch 8, batch 1400, train_loss[loss=3.771, NarTop10Accuracy=0.5631, over 6227.00 frames. ], tot_loss[loss=3.676, NarTop10Accuracy=0.5803, over 6037.66 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 08:26:28,985 INFO [trainer.py:765] (6/8) Epoch 8, batch 1500, train_loss[loss=3.76, NarTop10Accuracy=0.57, over 6396.00 frames. ], tot_loss[loss=3.659, NarTop10Accuracy=0.5836, over 5974.34 frames. ], batch size: 49, lr: 1.05e-02 +2024-08-06 08:26:56,931 INFO [trainer.py:765] (6/8) Epoch 8, batch 1600, train_loss[loss=3.717, NarTop10Accuracy=0.5644, over 7203.00 frames. ], tot_loss[loss=3.66, NarTop10Accuracy=0.5835, over 5961.62 frames. ], batch size: 22, lr: 1.05e-02 +2024-08-06 08:27:23,762 INFO [trainer.py:765] (6/8) Epoch 8, batch 1700, train_loss[loss=3.668, NarTop10Accuracy=0.5901, over 6246.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5813, over 5942.51 frames. ], batch size: 13, lr: 1.05e-02 +2024-08-06 08:27:50,460 INFO [trainer.py:765] (6/8) Epoch 8, batch 1800, train_loss[loss=3.708, NarTop10Accuracy=0.5827, over 7134.00 frames. ], tot_loss[loss=3.672, NarTop10Accuracy=0.5815, over 6005.55 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 08:28:17,178 INFO [trainer.py:765] (6/8) Epoch 8, batch 1900, train_loss[loss=4.097, NarTop10Accuracy=0.4885, over 6084.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5822, over 6039.26 frames. ], batch size: 50, lr: 1.04e-02 +2024-08-06 08:28:25,162 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 08:28:35,290 INFO [trainer.py:811] (6/8) Epoch 8, validation: loss=3.507, NarTop10Accuracy=0.6181, over 1907754.00 frames. +2024-08-06 08:28:35,291 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27460MB +2024-08-06 08:28:35,795 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.304e+02 1.789e+02 1.988e+02 2.230e+02 4.452e+02, threshold=3.975e+02, percent-clipped=0.5 +2024-08-06 08:28:52,982 INFO [trainer.py:765] (6/8) Epoch 8, batch 2000, train_loss[loss=3.827, NarTop10Accuracy=0.5577, over 6498.00 frames. ], tot_loss[loss=3.665, NarTop10Accuracy=0.5833, over 6011.63 frames. ], batch size: 49, lr: 1.04e-02 +2024-08-06 08:29:18,484 INFO [trainer.py:765] (6/8) Epoch 8, batch 2100, train_loss[loss=3.408, NarTop10Accuracy=0.6471, over 3960.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5822, over 5989.90 frames. ], batch size: 4, lr: 1.04e-02 +2024-08-06 08:29:43,789 INFO [trainer.py:765] (6/8) Epoch 8, batch 2200, train_loss[loss=3.975, NarTop10Accuracy=0.5168, over 7341.00 frames. ], tot_loss[loss=3.678, NarTop10Accuracy=0.5807, over 6044.52 frames. ], batch size: 30, lr: 1.03e-02 +2024-08-06 08:30:09,133 INFO [trainer.py:765] (6/8) Epoch 8, batch 2300, train_loss[loss=3.555, NarTop10Accuracy=0.6036, over 5854.00 frames. ], tot_loss[loss=3.683, NarTop10Accuracy=0.58, over 6079.41 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 08:30:33,790 INFO [trainer.py:765] (6/8) Epoch 8, batch 2400, train_loss[loss=3.529, NarTop10Accuracy=0.6071, over 5064.00 frames. ], tot_loss[loss=3.693, NarTop10Accuracy=0.5778, over 5887.37 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 08:30:57,139 INFO [trainer.py:765] (6/8) Epoch 8, batch 2500, train_loss[loss=3.452, NarTop10Accuracy=0.6321, over 4994.00 frames. ], tot_loss[loss=3.661, NarTop10Accuracy=0.5835, over 5551.31 frames. ], batch size: 6, lr: 1.03e-02 +2024-08-06 08:31:18,124 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 08:32:19,098 INFO [trainer.py:765] (6/8) Epoch 9, batch 100, train_loss[loss=3.952, NarTop10Accuracy=0.5249, over 7028.00 frames. ], tot_loss[loss=3.607, NarTop10Accuracy=0.5961, over 2366.24 frames. ], batch size: 30, lr: 9.71e-03 +2024-08-06 08:32:51,460 INFO [trainer.py:765] (6/8) Epoch 9, batch 200, train_loss[loss=3.567, NarTop10Accuracy=0.6054, over 6924.00 frames. ], tot_loss[loss=3.595, NarTop10Accuracy=0.5994, over 3878.16 frames. ], batch size: 17, lr: 9.69e-03 +2024-08-06 08:33:27,115 INFO [trainer.py:765] (6/8) Epoch 9, batch 300, train_loss[loss=3.807, NarTop10Accuracy=0.555, over 7345.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.599, over 4684.61 frames. ], batch size: 22, lr: 9.67e-03 +2024-08-06 08:34:00,963 INFO [trainer.py:765] (6/8) Epoch 9, batch 400, train_loss[loss=3.476, NarTop10Accuracy=0.6265, over 5129.00 frames. ], tot_loss[loss=3.586, NarTop10Accuracy=0.6002, over 5117.74 frames. ], batch size: 7, lr: 9.64e-03 +2024-08-06 08:34:32,880 INFO [trainer.py:765] (6/8) Epoch 9, batch 500, train_loss[loss=3.731, NarTop10Accuracy=0.576, over 6164.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.6027, over 5387.83 frames. ], batch size: 11, lr: 9.62e-03 +2024-08-06 08:35:07,498 INFO [trainer.py:765] (6/8) Epoch 9, batch 600, train_loss[loss=3.535, NarTop10Accuracy=0.6084, over 5789.00 frames. ], tot_loss[loss=3.589, NarTop10Accuracy=0.5996, over 5659.96 frames. ], batch size: 9, lr: 9.60e-03 +2024-08-06 08:35:42,824 INFO [trainer.py:765] (6/8) Epoch 9, batch 700, train_loss[loss=3.611, NarTop10Accuracy=0.5752, over 5114.00 frames. ], tot_loss[loss=3.593, NarTop10Accuracy=0.5984, over 5726.98 frames. ], batch size: 6, lr: 9.58e-03 +2024-08-06 08:36:14,821 INFO [trainer.py:765] (6/8) Epoch 9, batch 800, train_loss[loss=3.384, NarTop10Accuracy=0.6392, over 4980.00 frames. ], tot_loss[loss=3.602, NarTop10Accuracy=0.5963, over 5764.14 frames. ], batch size: 6, lr: 9.56e-03 +2024-08-06 08:36:46,454 INFO [trainer.py:765] (6/8) Epoch 9, batch 900, train_loss[loss=3.552, NarTop10Accuracy=0.6105, over 6305.00 frames. ], tot_loss[loss=3.609, NarTop10Accuracy=0.5948, over 5807.50 frames. ], batch size: 13, lr: 9.54e-03 +2024-08-06 08:37:26,564 INFO [trainer.py:765] (6/8) Epoch 9, batch 1000, train_loss[loss=3.546, NarTop10Accuracy=0.6032, over 6326.00 frames. ], tot_loss[loss=3.618, NarTop10Accuracy=0.5928, over 5919.62 frames. ], batch size: 13, lr: 9.52e-03 +2024-08-06 08:37:59,420 INFO [trainer.py:765] (6/8) Epoch 9, batch 1100, train_loss[loss=3.841, NarTop10Accuracy=0.5419, over 6857.00 frames. ], tot_loss[loss=3.631, NarTop10Accuracy=0.5904, over 5952.62 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 08:38:31,995 INFO [trainer.py:765] (6/8) Epoch 9, batch 1200, train_loss[loss=3.675, NarTop10Accuracy=0.582, over 7007.00 frames. ], tot_loss[loss=3.636, NarTop10Accuracy=0.5894, over 5948.88 frames. ], batch size: 30, lr: 9.48e-03 +2024-08-06 08:39:11,840 INFO [trainer.py:765] (6/8) Epoch 9, batch 1300, train_loss[loss=3.465, NarTop10Accuracy=0.6236, over 5072.00 frames. ], tot_loss[loss=3.636, NarTop10Accuracy=0.5889, over 6012.64 frames. ], batch size: 6, lr: 9.46e-03 +2024-08-06 08:39:27,116 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 08:39:38,196 INFO [trainer.py:811] (6/8) Epoch 9, validation: loss=3.495, NarTop10Accuracy=0.6214, over 1907754.00 frames. +2024-08-06 08:39:38,197 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27460MB +2024-08-06 08:39:38,758 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 1.781e+02 1.970e+02 2.189e+02 6.315e+02, threshold=3.940e+02, percent-clipped=0.6 +2024-08-06 08:39:52,278 INFO [trainer.py:765] (6/8) Epoch 9, batch 1400, train_loss[loss=3.445, NarTop10Accuracy=0.6239, over 6234.00 frames. ], tot_loss[loss=3.629, NarTop10Accuracy=0.5904, over 6028.66 frames. ], batch size: 11, lr: 9.43e-03 +2024-08-06 08:40:22,332 INFO [trainer.py:765] (6/8) Epoch 9, batch 1500, train_loss[loss=3.934, NarTop10Accuracy=0.5343, over 6477.00 frames. ], tot_loss[loss=3.631, NarTop10Accuracy=0.5902, over 5970.55 frames. ], batch size: 49, lr: 9.41e-03 +2024-08-06 08:40:50,368 INFO [trainer.py:765] (6/8) Epoch 9, batch 1600, train_loss[loss=3.628, NarTop10Accuracy=0.5928, over 7297.00 frames. ], tot_loss[loss=3.627, NarTop10Accuracy=0.5906, over 5961.44 frames. ], batch size: 22, lr: 9.39e-03 +2024-08-06 08:41:17,153 INFO [trainer.py:765] (6/8) Epoch 9, batch 1700, train_loss[loss=3.544, NarTop10Accuracy=0.6123, over 6319.00 frames. ], tot_loss[loss=3.637, NarTop10Accuracy=0.5885, over 5946.02 frames. ], batch size: 13, lr: 9.37e-03 +2024-08-06 08:41:43,812 INFO [trainer.py:765] (6/8) Epoch 9, batch 1800, train_loss[loss=3.679, NarTop10Accuracy=0.5765, over 7342.00 frames. ], tot_loss[loss=3.626, NarTop10Accuracy=0.5909, over 6010.06 frames. ], batch size: 22, lr: 9.35e-03 +2024-08-06 08:42:10,496 INFO [trainer.py:765] (6/8) Epoch 9, batch 1900, train_loss[loss=3.713, NarTop10Accuracy=0.5769, over 5937.00 frames. ], tot_loss[loss=3.634, NarTop10Accuracy=0.5892, over 6048.63 frames. ], batch size: 52, lr: 9.33e-03 +2024-08-06 08:42:36,203 INFO [trainer.py:765] (6/8) Epoch 9, batch 2000, train_loss[loss=3.858, NarTop10Accuracy=0.5446, over 5686.00 frames. ], tot_loss[loss=3.639, NarTop10Accuracy=0.5883, over 6013.75 frames. ], batch size: 49, lr: 9.31e-03 +2024-08-06 08:43:01,668 INFO [trainer.py:765] (6/8) Epoch 9, batch 2100, train_loss[loss=3.106, NarTop10Accuracy=0.6738, over 4030.00 frames. ], tot_loss[loss=3.631, NarTop10Accuracy=0.59, over 5986.21 frames. ], batch size: 4, lr: 9.30e-03 +2024-08-06 08:43:27,179 INFO [trainer.py:765] (6/8) Epoch 9, batch 2200, train_loss[loss=3.635, NarTop10Accuracy=0.592, over 7236.00 frames. ], tot_loss[loss=3.642, NarTop10Accuracy=0.588, over 6032.21 frames. ], batch size: 30, lr: 9.28e-03 +2024-08-06 08:43:52,671 INFO [trainer.py:765] (6/8) Epoch 9, batch 2300, train_loss[loss=3.65, NarTop10Accuracy=0.5899, over 5810.00 frames. ], tot_loss[loss=3.659, NarTop10Accuracy=0.5847, over 6058.88 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 08:44:20,550 INFO [trainer.py:765] (6/8) Epoch 9, batch 2400, train_loss[loss=3.417, NarTop10Accuracy=0.631, over 5153.00 frames. ], tot_loss[loss=3.654, NarTop10Accuracy=0.5854, over 5874.88 frames. ], batch size: 7, lr: 9.24e-03 +2024-08-06 08:44:44,002 INFO [trainer.py:765] (6/8) Epoch 9, batch 2500, train_loss[loss=3.79, NarTop10Accuracy=0.544, over 5160.00 frames. ], tot_loss[loss=3.638, NarTop10Accuracy=0.5883, over 5536.75 frames. ], batch size: 6, lr: 9.22e-03 +2024-08-06 08:45:05,081 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 08:46:09,064 INFO [trainer.py:765] (6/8) Epoch 10, batch 100, train_loss[loss=3.502, NarTop10Accuracy=0.6257, over 7096.00 frames. ], tot_loss[loss=3.589, NarTop10Accuracy=0.6002, over 2376.52 frames. ], batch size: 30, lr: 8.75e-03 +2024-08-06 08:46:44,074 INFO [trainer.py:765] (6/8) Epoch 10, batch 200, train_loss[loss=3.546, NarTop10Accuracy=0.6049, over 6938.00 frames. ], tot_loss[loss=3.566, NarTop10Accuracy=0.6047, over 3871.91 frames. ], batch size: 17, lr: 8.73e-03 +2024-08-06 08:47:14,444 INFO [trainer.py:765] (6/8) Epoch 10, batch 300, train_loss[loss=3.633, NarTop10Accuracy=0.5943, over 7138.00 frames. ], tot_loss[loss=3.562, NarTop10Accuracy=0.6047, over 4682.73 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 08:47:46,119 INFO [trainer.py:765] (6/8) Epoch 10, batch 400, train_loss[loss=3.777, NarTop10Accuracy=0.5687, over 5118.00 frames. ], tot_loss[loss=3.562, NarTop10Accuracy=0.6048, over 5129.73 frames. ], batch size: 7, lr: 8.70e-03 +2024-08-06 08:48:22,371 INFO [trainer.py:765] (6/8) Epoch 10, batch 500, train_loss[loss=3.454, NarTop10Accuracy=0.6236, over 6038.00 frames. ], tot_loss[loss=3.557, NarTop10Accuracy=0.6056, over 5409.42 frames. ], batch size: 11, lr: 8.68e-03 +2024-08-06 08:48:53,460 INFO [trainer.py:765] (6/8) Epoch 10, batch 600, train_loss[loss=3.375, NarTop10Accuracy=0.6424, over 5693.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6018, over 5674.17 frames. ], batch size: 9, lr: 8.66e-03 +2024-08-06 08:49:26,707 INFO [trainer.py:765] (6/8) Epoch 10, batch 700, train_loss[loss=3.145, NarTop10Accuracy=0.6795, over 5079.00 frames. ], tot_loss[loss=3.587, NarTop10Accuracy=0.5995, over 5720.82 frames. ], batch size: 6, lr: 8.65e-03 +2024-08-06 08:49:49,165 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 08:50:00,983 INFO [trainer.py:811] (6/8) Epoch 10, validation: loss=3.46, NarTop10Accuracy=0.6279, over 1907754.00 frames. +2024-08-06 08:50:00,984 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27460MB +2024-08-06 08:50:01,725 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.353e+02 1.818e+02 1.985e+02 2.213e+02 4.843e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 08:50:09,803 INFO [trainer.py:765] (6/8) Epoch 10, batch 800, train_loss[loss=3.361, NarTop10Accuracy=0.6431, over 4932.00 frames. ], tot_loss[loss=3.585, NarTop10Accuracy=0.5997, over 5781.59 frames. ], batch size: 6, lr: 8.63e-03 +2024-08-06 08:50:42,890 INFO [trainer.py:765] (6/8) Epoch 10, batch 900, train_loss[loss=3.43, NarTop10Accuracy=0.6271, over 6107.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.601, over 5810.09 frames. ], batch size: 13, lr: 8.61e-03 +2024-08-06 08:51:18,460 INFO [trainer.py:765] (6/8) Epoch 10, batch 1000, train_loss[loss=3.711, NarTop10Accuracy=0.564, over 6234.00 frames. ], tot_loss[loss=3.593, NarTop10Accuracy=0.5979, over 5907.94 frames. ], batch size: 13, lr: 8.59e-03 +2024-08-06 08:51:57,362 INFO [trainer.py:765] (6/8) Epoch 10, batch 1100, train_loss[loss=3.353, NarTop10Accuracy=0.6517, over 6842.00 frames. ], tot_loss[loss=3.6, NarTop10Accuracy=0.5968, over 5953.95 frames. ], batch size: 17, lr: 8.58e-03 +2024-08-06 08:52:32,048 INFO [trainer.py:765] (6/8) Epoch 10, batch 1200, train_loss[loss=3.503, NarTop10Accuracy=0.6164, over 7546.00 frames. ], tot_loss[loss=3.599, NarTop10Accuracy=0.5968, over 5962.20 frames. ], batch size: 31, lr: 8.56e-03 +2024-08-06 08:53:06,607 INFO [trainer.py:765] (6/8) Epoch 10, batch 1300, train_loss[loss=3.817, NarTop10Accuracy=0.5539, over 5015.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.5977, over 6026.99 frames. ], batch size: 6, lr: 8.54e-03 +2024-08-06 08:53:46,880 INFO [trainer.py:765] (6/8) Epoch 10, batch 1400, train_loss[loss=3.451, NarTop10Accuracy=0.6198, over 6171.00 frames. ], tot_loss[loss=3.603, NarTop10Accuracy=0.5954, over 6050.96 frames. ], batch size: 11, lr: 8.53e-03 +2024-08-06 08:54:17,501 INFO [trainer.py:765] (6/8) Epoch 10, batch 1500, train_loss[loss=3.669, NarTop10Accuracy=0.5811, over 5757.00 frames. ], tot_loss[loss=3.596, NarTop10Accuracy=0.5968, over 5970.85 frames. ], batch size: 49, lr: 8.51e-03 +2024-08-06 08:54:45,525 INFO [trainer.py:765] (6/8) Epoch 10, batch 1600, train_loss[loss=3.56, NarTop10Accuracy=0.6046, over 7143.00 frames. ], tot_loss[loss=3.6, NarTop10Accuracy=0.5963, over 5952.03 frames. ], batch size: 22, lr: 8.49e-03 +2024-08-06 08:55:12,299 INFO [trainer.py:765] (6/8) Epoch 10, batch 1700, train_loss[loss=3.472, NarTop10Accuracy=0.6332, over 6684.00 frames. ], tot_loss[loss=3.608, NarTop10Accuracy=0.5949, over 5942.74 frames. ], batch size: 14, lr: 8.48e-03 +2024-08-06 08:55:41,989 INFO [trainer.py:765] (6/8) Epoch 10, batch 1800, train_loss[loss=3.317, NarTop10Accuracy=0.6452, over 7177.00 frames. ], tot_loss[loss=3.604, NarTop10Accuracy=0.5957, over 6000.82 frames. ], batch size: 22, lr: 8.46e-03 +2024-08-06 08:56:08,571 INFO [trainer.py:765] (6/8) Epoch 10, batch 1900, train_loss[loss=3.982, NarTop10Accuracy=0.5187, over 5604.00 frames. ], tot_loss[loss=3.605, NarTop10Accuracy=0.5957, over 6045.16 frames. ], batch size: 49, lr: 8.45e-03 +2024-08-06 08:56:34,287 INFO [trainer.py:765] (6/8) Epoch 10, batch 2000, train_loss[loss=3.72, NarTop10Accuracy=0.584, over 6112.00 frames. ], tot_loss[loss=3.609, NarTop10Accuracy=0.5948, over 6027.38 frames. ], batch size: 48, lr: 8.43e-03 +2024-08-06 08:56:59,751 INFO [trainer.py:765] (6/8) Epoch 10, batch 2100, train_loss[loss=3.469, NarTop10Accuracy=0.62, over 4003.00 frames. ], tot_loss[loss=3.61, NarTop10Accuracy=0.5941, over 5996.50 frames. ], batch size: 4, lr: 8.41e-03 +2024-08-06 08:57:25,280 INFO [trainer.py:765] (6/8) Epoch 10, batch 2200, train_loss[loss=3.665, NarTop10Accuracy=0.5827, over 7305.00 frames. ], tot_loss[loss=3.606, NarTop10Accuracy=0.5952, over 6029.20 frames. ], batch size: 31, lr: 8.40e-03 +2024-08-06 08:57:50,682 INFO [trainer.py:765] (6/8) Epoch 10, batch 2300, train_loss[loss=3.518, NarTop10Accuracy=0.6154, over 5826.00 frames. ], tot_loss[loss=3.618, NarTop10Accuracy=0.5932, over 6061.35 frames. ], batch size: 9, lr: 8.38e-03 +2024-08-06 08:58:15,344 INFO [trainer.py:765] (6/8) Epoch 10, batch 2400, train_loss[loss=3.299, NarTop10Accuracy=0.6507, over 5077.00 frames. ], tot_loss[loss=3.625, NarTop10Accuracy=0.5918, over 5874.16 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 08:58:38,808 INFO [trainer.py:765] (6/8) Epoch 10, batch 2500, train_loss[loss=3.309, NarTop10Accuracy=0.6595, over 5086.00 frames. ], tot_loss[loss=3.599, NarTop10Accuracy=0.5961, over 5540.73 frames. ], batch size: 6, lr: 8.35e-03 +2024-08-06 08:58:59,941 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 09:00:03,681 INFO [trainer.py:765] (6/8) Epoch 11, batch 100, train_loss[loss=3.448, NarTop10Accuracy=0.633, over 7147.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.6119, over 2365.69 frames. ], batch size: 30, lr: 7.96e-03 +2024-08-06 09:00:30,915 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 09:00:41,217 INFO [trainer.py:811] (6/8) Epoch 11, validation: loss=3.404, NarTop10Accuracy=0.6396, over 1907754.00 frames. +2024-08-06 09:00:41,217 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27460MB +2024-08-06 09:00:41,774 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 1.800e+02 1.980e+02 2.200e+02 4.491e+02, threshold=3.959e+02, percent-clipped=0.2 +2024-08-06 09:00:46,859 INFO [trainer.py:765] (6/8) Epoch 11, batch 200, train_loss[loss=3.861, NarTop10Accuracy=0.5351, over 6746.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.6113, over 3864.94 frames. ], batch size: 17, lr: 7.94e-03 +2024-08-06 09:01:17,853 INFO [trainer.py:765] (6/8) Epoch 11, batch 300, train_loss[loss=3.271, NarTop10Accuracy=0.6684, over 7227.00 frames. ], tot_loss[loss=3.541, NarTop10Accuracy=0.6098, over 4665.10 frames. ], batch size: 22, lr: 7.93e-03 +2024-08-06 09:01:50,534 INFO [trainer.py:765] (6/8) Epoch 11, batch 400, train_loss[loss=3.282, NarTop10Accuracy=0.6663, over 5150.00 frames. ], tot_loss[loss=3.539, NarTop10Accuracy=0.6098, over 5122.75 frames. ], batch size: 7, lr: 7.91e-03 +2024-08-06 09:02:21,238 INFO [trainer.py:765] (6/8) Epoch 11, batch 500, train_loss[loss=3.254, NarTop10Accuracy=0.6534, over 6112.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.6105, over 5395.46 frames. ], batch size: 11, lr: 7.90e-03 +2024-08-06 09:03:01,742 INFO [trainer.py:765] (6/8) Epoch 11, batch 600, train_loss[loss=3.566, NarTop10Accuracy=0.5968, over 5728.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6094, over 5673.86 frames. ], batch size: 9, lr: 7.88e-03 +2024-08-06 09:03:38,236 INFO [trainer.py:765] (6/8) Epoch 11, batch 700, train_loss[loss=3.357, NarTop10Accuracy=0.6546, over 4226.00 frames. ], tot_loss[loss=3.546, NarTop10Accuracy=0.6079, over 5739.88 frames. ], batch size: 5, lr: 7.87e-03 +2024-08-06 09:04:10,756 INFO [trainer.py:765] (6/8) Epoch 11, batch 800, train_loss[loss=3.239, NarTop10Accuracy=0.6733, over 5082.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6026, over 5785.68 frames. ], batch size: 6, lr: 7.86e-03 +2024-08-06 09:04:50,083 INFO [trainer.py:765] (6/8) Epoch 11, batch 900, train_loss[loss=3.508, NarTop10Accuracy=0.6094, over 6225.00 frames. ], tot_loss[loss=3.561, NarTop10Accuracy=0.6046, over 5808.51 frames. ], batch size: 13, lr: 7.84e-03 +2024-08-06 09:05:27,016 INFO [trainer.py:765] (6/8) Epoch 11, batch 1000, train_loss[loss=3.409, NarTop10Accuracy=0.6345, over 6697.00 frames. ], tot_loss[loss=3.559, NarTop10Accuracy=0.6049, over 5909.40 frames. ], batch size: 14, lr: 7.83e-03 +2024-08-06 09:06:00,351 INFO [trainer.py:765] (6/8) Epoch 11, batch 1100, train_loss[loss=3.557, NarTop10Accuracy=0.6092, over 6704.00 frames. ], tot_loss[loss=3.573, NarTop10Accuracy=0.6026, over 5942.68 frames. ], batch size: 17, lr: 7.81e-03 +2024-08-06 09:06:40,946 INFO [trainer.py:765] (6/8) Epoch 11, batch 1200, train_loss[loss=3.619, NarTop10Accuracy=0.5919, over 7095.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6024, over 5939.55 frames. ], batch size: 30, lr: 7.80e-03 +2024-08-06 09:07:15,494 INFO [trainer.py:765] (6/8) Epoch 11, batch 1300, train_loss[loss=3.448, NarTop10Accuracy=0.6382, over 5164.00 frames. ], tot_loss[loss=3.574, NarTop10Accuracy=0.6025, over 6004.87 frames. ], batch size: 6, lr: 7.79e-03 +2024-08-06 09:07:47,628 INFO [trainer.py:765] (6/8) Epoch 11, batch 1400, train_loss[loss=3.542, NarTop10Accuracy=0.6107, over 6174.00 frames. ], tot_loss[loss=3.579, NarTop10Accuracy=0.6013, over 6012.20 frames. ], batch size: 11, lr: 7.77e-03 +2024-08-06 09:08:18,987 INFO [trainer.py:765] (6/8) Epoch 11, batch 1500, train_loss[loss=3.629, NarTop10Accuracy=0.5946, over 5762.00 frames. ], tot_loss[loss=3.582, NarTop10Accuracy=0.6006, over 5960.61 frames. ], batch size: 49, lr: 7.76e-03 +2024-08-06 09:08:47,149 INFO [trainer.py:765] (6/8) Epoch 11, batch 1600, train_loss[loss=3.526, NarTop10Accuracy=0.6164, over 7180.00 frames. ], tot_loss[loss=3.578, NarTop10Accuracy=0.6013, over 5959.23 frames. ], batch size: 22, lr: 7.74e-03 +2024-08-06 09:09:13,951 INFO [trainer.py:765] (6/8) Epoch 11, batch 1700, train_loss[loss=3.331, NarTop10Accuracy=0.6566, over 6691.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6026, over 5950.39 frames. ], batch size: 14, lr: 7.73e-03 +2024-08-06 09:09:40,734 INFO [trainer.py:765] (6/8) Epoch 11, batch 1800, train_loss[loss=3.529, NarTop10Accuracy=0.6119, over 7173.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6014, over 6012.89 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 09:10:07,342 INFO [trainer.py:765] (6/8) Epoch 11, batch 1900, train_loss[loss=3.608, NarTop10Accuracy=0.5924, over 6400.00 frames. ], tot_loss[loss=3.594, NarTop10Accuracy=0.5976, over 6053.24 frames. ], batch size: 51, lr: 7.70e-03 +2024-08-06 09:10:33,039 INFO [trainer.py:765] (6/8) Epoch 11, batch 2000, train_loss[loss=3.572, NarTop10Accuracy=0.6045, over 6424.00 frames. ], tot_loss[loss=3.587, NarTop10Accuracy=0.5996, over 6037.29 frames. ], batch size: 49, lr: 7.69e-03 +2024-08-06 09:10:58,442 INFO [trainer.py:765] (6/8) Epoch 11, batch 2100, train_loss[loss=3.316, NarTop10Accuracy=0.6623, over 4948.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6027, over 5999.61 frames. ], batch size: 5, lr: 7.68e-03 +2024-08-06 09:11:20,709 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 09:11:31,457 INFO [trainer.py:811] (6/8) Epoch 11, validation: loss=3.372, NarTop10Accuracy=0.6462, over 1907754.00 frames. +2024-08-06 09:11:31,458 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27460MB +2024-08-06 09:11:31,930 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.800e+02 1.966e+02 2.160e+02 4.000e+02, threshold=3.933e+02, percent-clipped=0.1 +2024-08-06 09:11:34,519 INFO [trainer.py:765] (6/8) Epoch 11, batch 2200, train_loss[loss=3.534, NarTop10Accuracy=0.6092, over 7146.00 frames. ], tot_loss[loss=3.577, NarTop10Accuracy=0.6014, over 6036.28 frames. ], batch size: 30, lr: 7.66e-03 +2024-08-06 09:11:59,939 INFO [trainer.py:765] (6/8) Epoch 11, batch 2300, train_loss[loss=3.541, NarTop10Accuracy=0.6142, over 5853.00 frames. ], tot_loss[loss=3.592, NarTop10Accuracy=0.5986, over 6069.19 frames. ], batch size: 9, lr: 7.65e-03 +2024-08-06 09:12:24,696 INFO [trainer.py:765] (6/8) Epoch 11, batch 2400, train_loss[loss=3.826, NarTop10Accuracy=0.5558, over 5177.00 frames. ], tot_loss[loss=3.602, NarTop10Accuracy=0.5964, over 5893.06 frames. ], batch size: 7, lr: 7.64e-03 +2024-08-06 09:12:47,879 INFO [trainer.py:765] (6/8) Epoch 11, batch 2500, train_loss[loss=3.793, NarTop10Accuracy=0.5529, over 4998.00 frames. ], tot_loss[loss=3.572, NarTop10Accuracy=0.6019, over 5543.15 frames. ], batch size: 6, lr: 7.62e-03 +2024-08-06 09:13:09,151 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 09:14:12,279 INFO [trainer.py:765] (6/8) Epoch 12, batch 100, train_loss[loss=3.476, NarTop10Accuracy=0.6184, over 7451.00 frames. ], tot_loss[loss=3.528, NarTop10Accuracy=0.6124, over 2380.79 frames. ], batch size: 31, lr: 7.29e-03 +2024-08-06 09:14:48,096 INFO [trainer.py:765] (6/8) Epoch 12, batch 200, train_loss[loss=3.302, NarTop10Accuracy=0.6566, over 6945.00 frames. ], tot_loss[loss=3.516, NarTop10Accuracy=0.6153, over 3899.68 frames. ], batch size: 17, lr: 7.28e-03 +2024-08-06 09:15:20,021 INFO [trainer.py:765] (6/8) Epoch 12, batch 300, train_loss[loss=3.383, NarTop10Accuracy=0.6454, over 7282.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6173, over 4692.88 frames. ], batch size: 23, lr: 7.27e-03 +2024-08-06 09:15:52,633 INFO [trainer.py:765] (6/8) Epoch 12, batch 400, train_loss[loss=3.583, NarTop10Accuracy=0.6044, over 5220.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6146, over 5139.48 frames. ], batch size: 7, lr: 7.25e-03 +2024-08-06 09:16:26,433 INFO [trainer.py:765] (6/8) Epoch 12, batch 500, train_loss[loss=3.44, NarTop10Accuracy=0.6216, over 6161.00 frames. ], tot_loss[loss=3.516, NarTop10Accuracy=0.614, over 5429.48 frames. ], batch size: 11, lr: 7.24e-03 +2024-08-06 09:16:59,239 INFO [trainer.py:765] (6/8) Epoch 12, batch 600, train_loss[loss=3.488, NarTop10Accuracy=0.6227, over 5837.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.6108, over 5700.26 frames. ], batch size: 9, lr: 7.23e-03 +2024-08-06 09:17:36,318 INFO [trainer.py:765] (6/8) Epoch 12, batch 700, train_loss[loss=3.397, NarTop10Accuracy=0.6333, over 5094.00 frames. ], tot_loss[loss=3.524, NarTop10Accuracy=0.6127, over 5754.45 frames. ], batch size: 6, lr: 7.22e-03 +2024-08-06 09:18:07,753 INFO [trainer.py:765] (6/8) Epoch 12, batch 800, train_loss[loss=3.562, NarTop10Accuracy=0.6122, over 5233.00 frames. ], tot_loss[loss=3.527, NarTop10Accuracy=0.6114, over 5821.66 frames. ], batch size: 6, lr: 7.21e-03 +2024-08-06 09:18:43,779 INFO [trainer.py:765] (6/8) Epoch 12, batch 900, train_loss[loss=3.764, NarTop10Accuracy=0.5659, over 6314.00 frames. ], tot_loss[loss=3.539, NarTop10Accuracy=0.6092, over 5843.47 frames. ], batch size: 13, lr: 7.19e-03 +2024-08-06 09:19:17,689 INFO [trainer.py:765] (6/8) Epoch 12, batch 1000, train_loss[loss=3.719, NarTop10Accuracy=0.5694, over 6779.00 frames. ], tot_loss[loss=3.54, NarTop10Accuracy=0.609, over 5938.95 frames. ], batch size: 14, lr: 7.18e-03 +2024-08-06 09:19:52,427 INFO [trainer.py:765] (6/8) Epoch 12, batch 1100, train_loss[loss=3.769, NarTop10Accuracy=0.5674, over 6987.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6092, over 5952.03 frames. ], batch size: 17, lr: 7.17e-03 +2024-08-06 09:20:29,443 INFO [trainer.py:765] (6/8) Epoch 12, batch 1200, train_loss[loss=3.372, NarTop10Accuracy=0.6421, over 6948.00 frames. ], tot_loss[loss=3.541, NarTop10Accuracy=0.6086, over 5941.20 frames. ], batch size: 30, lr: 7.16e-03 +2024-08-06 09:21:02,826 INFO [trainer.py:765] (6/8) Epoch 12, batch 1300, train_loss[loss=3.658, NarTop10Accuracy=0.5919, over 4926.00 frames. ], tot_loss[loss=3.558, NarTop10Accuracy=0.6053, over 6001.77 frames. ], batch size: 6, lr: 7.15e-03 +2024-08-06 09:21:36,981 INFO [trainer.py:765] (6/8) Epoch 12, batch 1400, train_loss[loss=3.313, NarTop10Accuracy=0.6635, over 6049.00 frames. ], tot_loss[loss=3.562, NarTop10Accuracy=0.6045, over 6020.11 frames. ], batch size: 11, lr: 7.13e-03 +2024-08-06 09:22:09,920 INFO [trainer.py:765] (6/8) Epoch 12, batch 1500, train_loss[loss=3.495, NarTop10Accuracy=0.6193, over 6383.00 frames. ], tot_loss[loss=3.556, NarTop10Accuracy=0.6058, over 5978.89 frames. ], batch size: 48, lr: 7.12e-03 +2024-08-06 09:22:38,027 INFO [trainer.py:765] (6/8) Epoch 12, batch 1600, train_loss[loss=3.625, NarTop10Accuracy=0.5946, over 7284.00 frames. ], tot_loss[loss=3.563, NarTop10Accuracy=0.6045, over 5961.34 frames. ], batch size: 22, lr: 7.11e-03 +2024-08-06 09:22:39,859 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 09:22:49,889 INFO [trainer.py:811] (6/8) Epoch 12, validation: loss=3.364, NarTop10Accuracy=0.6481, over 1907754.00 frames. +2024-08-06 09:22:49,889 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27460MB +2024-08-06 09:22:50,414 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.796e+02 1.978e+02 2.176e+02 4.603e+02, threshold=3.957e+02, percent-clipped=0.2 +2024-08-06 09:23:14,786 INFO [trainer.py:765] (6/8) Epoch 12, batch 1700, train_loss[loss=3.465, NarTop10Accuracy=0.6354, over 6321.00 frames. ], tot_loss[loss=3.56, NarTop10Accuracy=0.6053, over 5939.18 frames. ], batch size: 13, lr: 7.10e-03 +2024-08-06 09:23:41,387 INFO [trainer.py:765] (6/8) Epoch 12, batch 1800, train_loss[loss=3.196, NarTop10Accuracy=0.6779, over 7159.00 frames. ], tot_loss[loss=3.551, NarTop10Accuracy=0.6071, over 6015.35 frames. ], batch size: 22, lr: 7.09e-03 +2024-08-06 09:24:07,957 INFO [trainer.py:765] (6/8) Epoch 12, batch 1900, train_loss[loss=3.619, NarTop10Accuracy=0.5971, over 6476.00 frames. ], tot_loss[loss=3.566, NarTop10Accuracy=0.6039, over 6055.07 frames. ], batch size: 49, lr: 7.08e-03 +2024-08-06 09:24:33,619 INFO [trainer.py:765] (6/8) Epoch 12, batch 2000, train_loss[loss=3.511, NarTop10Accuracy=0.6189, over 5470.00 frames. ], tot_loss[loss=3.562, NarTop10Accuracy=0.6042, over 6022.02 frames. ], batch size: 50, lr: 7.07e-03 +2024-08-06 09:24:59,038 INFO [trainer.py:765] (6/8) Epoch 12, batch 2100, train_loss[loss=3.811, NarTop10Accuracy=0.5593, over 4810.00 frames. ], tot_loss[loss=3.549, NarTop10Accuracy=0.6069, over 6004.92 frames. ], batch size: 5, lr: 7.05e-03 +2024-08-06 09:25:24,509 INFO [trainer.py:765] (6/8) Epoch 12, batch 2200, train_loss[loss=3.531, NarTop10Accuracy=0.6131, over 7196.00 frames. ], tot_loss[loss=3.556, NarTop10Accuracy=0.6059, over 6047.17 frames. ], batch size: 30, lr: 7.04e-03 +2024-08-06 09:25:49,927 INFO [trainer.py:765] (6/8) Epoch 12, batch 2300, train_loss[loss=3.696, NarTop10Accuracy=0.5699, over 5859.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6032, over 6067.26 frames. ], batch size: 9, lr: 7.03e-03 +2024-08-06 09:26:14,656 INFO [trainer.py:765] (6/8) Epoch 12, batch 2400, train_loss[loss=3.379, NarTop10Accuracy=0.6379, over 5115.00 frames. ], tot_loss[loss=3.579, NarTop10Accuracy=0.6013, over 5870.90 frames. ], batch size: 7, lr: 7.02e-03 +2024-08-06 09:26:38,155 INFO [trainer.py:765] (6/8) Epoch 12, batch 2500, train_loss[loss=3.496, NarTop10Accuracy=0.6203, over 4987.00 frames. ], tot_loss[loss=3.551, NarTop10Accuracy=0.6061, over 5539.03 frames. ], batch size: 6, lr: 7.01e-03 +2024-08-06 09:26:59,399 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 09:28:03,611 INFO [trainer.py:765] (6/8) Epoch 13, batch 100, train_loss[loss=3.615, NarTop10Accuracy=0.5868, over 7222.00 frames. ], tot_loss[loss=3.526, NarTop10Accuracy=0.612, over 2383.60 frames. ], batch size: 31, lr: 6.72e-03 +2024-08-06 09:28:36,905 INFO [trainer.py:765] (6/8) Epoch 13, batch 200, train_loss[loss=3.228, NarTop10Accuracy=0.6707, over 6807.00 frames. ], tot_loss[loss=3.499, NarTop10Accuracy=0.6179, over 3880.96 frames. ], batch size: 17, lr: 6.71e-03 +2024-08-06 09:29:07,170 INFO [trainer.py:765] (6/8) Epoch 13, batch 300, train_loss[loss=3.231, NarTop10Accuracy=0.6615, over 7178.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6185, over 4684.43 frames. ], batch size: 22, lr: 6.70e-03 +2024-08-06 09:29:41,039 INFO [trainer.py:765] (6/8) Epoch 13, batch 400, train_loss[loss=3.293, NarTop10Accuracy=0.6559, over 5828.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6204, over 5122.84 frames. ], batch size: 8, lr: 6.69e-03 +2024-08-06 09:30:13,730 INFO [trainer.py:765] (6/8) Epoch 13, batch 500, train_loss[loss=3.73, NarTop10Accuracy=0.5606, over 6207.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6187, over 5400.81 frames. ], batch size: 11, lr: 6.68e-03 +2024-08-06 09:30:47,198 INFO [trainer.py:765] (6/8) Epoch 13, batch 600, train_loss[loss=3.552, NarTop10Accuracy=0.6018, over 5733.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6188, over 5682.63 frames. ], batch size: 9, lr: 6.67e-03 +2024-08-06 09:31:23,821 INFO [trainer.py:765] (6/8) Epoch 13, batch 700, train_loss[loss=3.535, NarTop10Accuracy=0.6083, over 5088.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6168, over 5746.49 frames. ], batch size: 6, lr: 6.66e-03 +2024-08-06 09:31:58,208 INFO [trainer.py:765] (6/8) Epoch 13, batch 800, train_loss[loss=3.579, NarTop10Accuracy=0.6008, over 5173.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.616, over 5803.18 frames. ], batch size: 6, lr: 6.65e-03 +2024-08-06 09:32:29,193 INFO [trainer.py:765] (6/8) Epoch 13, batch 900, train_loss[loss=3.383, NarTop10Accuracy=0.6412, over 6706.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.6152, over 5823.84 frames. ], batch size: 14, lr: 6.64e-03 +2024-08-06 09:33:03,133 INFO [trainer.py:765] (6/8) Epoch 13, batch 1000, train_loss[loss=3.734, NarTop10Accuracy=0.5691, over 6684.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.6143, over 5931.77 frames. ], batch size: 14, lr: 6.63e-03 +2024-08-06 09:33:14,219 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 09:33:24,525 INFO [trainer.py:811] (6/8) Epoch 13, validation: loss=3.389, NarTop10Accuracy=0.6428, over 1907754.00 frames. +2024-08-06 09:33:24,526 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27460MB +2024-08-06 09:33:25,132 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.457e+02 1.794e+02 1.964e+02 2.145e+02 3.608e+02, threshold=3.929e+02, percent-clipped=0.0 +2024-08-06 09:33:51,714 INFO [trainer.py:765] (6/8) Epoch 13, batch 1100, train_loss[loss=3.623, NarTop10Accuracy=0.593, over 6912.00 frames. ], tot_loss[loss=3.535, NarTop10Accuracy=0.6108, over 5962.34 frames. ], batch size: 17, lr: 6.62e-03 +2024-08-06 09:34:25,485 INFO [trainer.py:765] (6/8) Epoch 13, batch 1200, train_loss[loss=3.525, NarTop10Accuracy=0.6026, over 6988.00 frames. ], tot_loss[loss=3.532, NarTop10Accuracy=0.6111, over 5949.52 frames. ], batch size: 30, lr: 6.61e-03 +2024-08-06 09:35:05,085 INFO [trainer.py:765] (6/8) Epoch 13, batch 1300, train_loss[loss=3.678, NarTop10Accuracy=0.5788, over 5047.00 frames. ], tot_loss[loss=3.519, NarTop10Accuracy=0.6132, over 6024.03 frames. ], batch size: 6, lr: 6.60e-03 +2024-08-06 09:35:36,405 INFO [trainer.py:765] (6/8) Epoch 13, batch 1400, train_loss[loss=3.592, NarTop10Accuracy=0.5984, over 6180.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.61, over 6053.79 frames. ], batch size: 11, lr: 6.59e-03 +2024-08-06 09:36:07,320 INFO [trainer.py:765] (6/8) Epoch 13, batch 1500, train_loss[loss=3.844, NarTop10Accuracy=0.5479, over 5910.00 frames. ], tot_loss[loss=3.542, NarTop10Accuracy=0.6079, over 5986.59 frames. ], batch size: 49, lr: 6.58e-03 +2024-08-06 09:36:35,389 INFO [trainer.py:765] (6/8) Epoch 13, batch 1600, train_loss[loss=3.794, NarTop10Accuracy=0.5547, over 7165.00 frames. ], tot_loss[loss=3.543, NarTop10Accuracy=0.6076, over 5971.72 frames. ], batch size: 22, lr: 6.57e-03 +2024-08-06 09:37:02,143 INFO [trainer.py:765] (6/8) Epoch 13, batch 1700, train_loss[loss=3.599, NarTop10Accuracy=0.5866, over 6707.00 frames. ], tot_loss[loss=3.55, NarTop10Accuracy=0.6065, over 5930.70 frames. ], batch size: 14, lr: 6.56e-03 +2024-08-06 09:37:28,778 INFO [trainer.py:765] (6/8) Epoch 13, batch 1800, train_loss[loss=3.342, NarTop10Accuracy=0.6545, over 7374.00 frames. ], tot_loss[loss=3.547, NarTop10Accuracy=0.6073, over 5995.05 frames. ], batch size: 22, lr: 6.55e-03 +2024-08-06 09:37:55,386 INFO [trainer.py:765] (6/8) Epoch 13, batch 1900, train_loss[loss=3.622, NarTop10Accuracy=0.5966, over 6172.00 frames. ], tot_loss[loss=3.549, NarTop10Accuracy=0.6075, over 6039.15 frames. ], batch size: 50, lr: 6.54e-03 +2024-08-06 09:38:21,123 INFO [trainer.py:765] (6/8) Epoch 13, batch 2000, train_loss[loss=3.661, NarTop10Accuracy=0.5796, over 6374.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6092, over 6020.40 frames. ], batch size: 49, lr: 6.53e-03 +2024-08-06 09:38:49,691 INFO [trainer.py:765] (6/8) Epoch 13, batch 2100, train_loss[loss=3.223, NarTop10Accuracy=0.6697, over 3982.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6091, over 5990.68 frames. ], batch size: 4, lr: 6.52e-03 +2024-08-06 09:39:15,107 INFO [trainer.py:765] (6/8) Epoch 13, batch 2200, train_loss[loss=3.554, NarTop10Accuracy=0.6045, over 7274.00 frames. ], tot_loss[loss=3.54, NarTop10Accuracy=0.6085, over 6042.24 frames. ], batch size: 30, lr: 6.51e-03 +2024-08-06 09:39:40,618 INFO [trainer.py:765] (6/8) Epoch 13, batch 2300, train_loss[loss=3.613, NarTop10Accuracy=0.6025, over 5880.00 frames. ], tot_loss[loss=3.548, NarTop10Accuracy=0.6072, over 6069.81 frames. ], batch size: 9, lr: 6.50e-03 +2024-08-06 09:40:05,343 INFO [trainer.py:765] (6/8) Epoch 13, batch 2400, train_loss[loss=3.45, NarTop10Accuracy=0.6218, over 5171.00 frames. ], tot_loss[loss=3.556, NarTop10Accuracy=0.6056, over 5874.03 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 09:40:28,768 INFO [trainer.py:765] (6/8) Epoch 13, batch 2500, train_loss[loss=3.221, NarTop10Accuracy=0.6837, over 4960.00 frames. ], tot_loss[loss=3.525, NarTop10Accuracy=0.6112, over 5527.13 frames. ], batch size: 6, lr: 6.48e-03 +2024-08-06 09:40:50,371 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 09:41:48,980 INFO [trainer.py:765] (6/8) Epoch 14, batch 100, train_loss[loss=3.236, NarTop10Accuracy=0.6722, over 7148.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6206, over 2365.71 frames. ], batch size: 30, lr: 6.24e-03 +2024-08-06 09:42:22,937 INFO [trainer.py:765] (6/8) Epoch 14, batch 200, train_loss[loss=3.53, NarTop10Accuracy=0.6161, over 6997.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6258, over 3859.57 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 09:42:58,414 INFO [trainer.py:765] (6/8) Epoch 14, batch 300, train_loss[loss=3.748, NarTop10Accuracy=0.5632, over 7288.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6222, over 4660.45 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 09:43:30,439 INFO [trainer.py:765] (6/8) Epoch 14, batch 400, train_loss[loss=3.269, NarTop10Accuracy=0.6688, over 4960.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6218, over 5109.52 frames. ], batch size: 7, lr: 6.21e-03 +2024-08-06 09:43:42,486 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 09:43:53,651 INFO [trainer.py:811] (6/8) Epoch 14, validation: loss=3.321, NarTop10Accuracy=0.6566, over 1907754.00 frames. +2024-08-06 09:43:53,651 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27460MB +2024-08-06 09:43:54,212 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.805e+02 1.968e+02 2.158e+02 4.264e+02, threshold=3.936e+02, percent-clipped=0.2 +2024-08-06 09:44:11,700 INFO [trainer.py:765] (6/8) Epoch 14, batch 500, train_loss[loss=3.524, NarTop10Accuracy=0.6076, over 6157.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.624, over 5378.27 frames. ], batch size: 11, lr: 6.20e-03 +2024-08-06 09:44:47,166 INFO [trainer.py:765] (6/8) Epoch 14, batch 600, train_loss[loss=3.548, NarTop10Accuracy=0.5983, over 5801.00 frames. ], tot_loss[loss=3.474, NarTop10Accuracy=0.6229, over 5652.86 frames. ], batch size: 9, lr: 6.19e-03 +2024-08-06 09:45:19,804 INFO [trainer.py:765] (6/8) Epoch 14, batch 700, train_loss[loss=4.02, NarTop10Accuracy=0.5169, over 5049.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6222, over 5724.42 frames. ], batch size: 6, lr: 6.18e-03 +2024-08-06 09:45:58,435 INFO [trainer.py:765] (6/8) Epoch 14, batch 800, train_loss[loss=3.2, NarTop10Accuracy=0.6703, over 5075.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6203, over 5777.85 frames. ], batch size: 6, lr: 6.17e-03 +2024-08-06 09:46:35,419 INFO [trainer.py:765] (6/8) Epoch 14, batch 900, train_loss[loss=3.711, NarTop10Accuracy=0.5773, over 6294.00 frames. ], tot_loss[loss=3.484, NarTop10Accuracy=0.6209, over 5811.33 frames. ], batch size: 13, lr: 6.17e-03 +2024-08-06 09:47:08,399 INFO [trainer.py:765] (6/8) Epoch 14, batch 1000, train_loss[loss=3.581, NarTop10Accuracy=0.5982, over 6191.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.6203, over 5929.01 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 09:47:47,663 INFO [trainer.py:765] (6/8) Epoch 14, batch 1100, train_loss[loss=3.292, NarTop10Accuracy=0.6611, over 7010.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.619, over 5959.60 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 09:48:23,500 INFO [trainer.py:765] (6/8) Epoch 14, batch 1200, train_loss[loss=3.386, NarTop10Accuracy=0.641, over 7073.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6186, over 5968.89 frames. ], batch size: 30, lr: 6.14e-03 +2024-08-06 09:48:57,971 INFO [trainer.py:765] (6/8) Epoch 14, batch 1300, train_loss[loss=3.261, NarTop10Accuracy=0.6442, over 5099.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6174, over 6040.95 frames. ], batch size: 6, lr: 6.13e-03 +2024-08-06 09:49:30,234 INFO [trainer.py:765] (6/8) Epoch 14, batch 1400, train_loss[loss=3.171, NarTop10Accuracy=0.6721, over 6213.00 frames. ], tot_loss[loss=3.519, NarTop10Accuracy=0.6133, over 6057.97 frames. ], batch size: 11, lr: 6.12e-03 +2024-08-06 09:50:07,531 INFO [trainer.py:765] (6/8) Epoch 14, batch 1500, train_loss[loss=3.566, NarTop10Accuracy=0.6095, over 6001.00 frames. ], tot_loss[loss=3.521, NarTop10Accuracy=0.6128, over 5994.03 frames. ], batch size: 49, lr: 6.11e-03 +2024-08-06 09:50:35,637 INFO [trainer.py:765] (6/8) Epoch 14, batch 1600, train_loss[loss=3.387, NarTop10Accuracy=0.6411, over 6989.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6143, over 5974.02 frames. ], batch size: 22, lr: 6.10e-03 +2024-08-06 09:51:02,379 INFO [trainer.py:765] (6/8) Epoch 14, batch 1700, train_loss[loss=3.446, NarTop10Accuracy=0.6458, over 6250.00 frames. ], tot_loss[loss=3.509, NarTop10Accuracy=0.6151, over 5944.77 frames. ], batch size: 13, lr: 6.10e-03 +2024-08-06 09:51:28,994 INFO [trainer.py:765] (6/8) Epoch 14, batch 1800, train_loss[loss=3.635, NarTop10Accuracy=0.5943, over 6959.00 frames. ], tot_loss[loss=3.507, NarTop10Accuracy=0.6156, over 6007.15 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 09:51:55,729 INFO [trainer.py:765] (6/8) Epoch 14, batch 1900, train_loss[loss=3.815, NarTop10Accuracy=0.5639, over 5899.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.6137, over 6047.47 frames. ], batch size: 48, lr: 6.08e-03 +2024-08-06 09:52:21,503 INFO [trainer.py:765] (6/8) Epoch 14, batch 2000, train_loss[loss=3.743, NarTop10Accuracy=0.5669, over 6230.00 frames. ], tot_loss[loss=3.526, NarTop10Accuracy=0.6119, over 6007.14 frames. ], batch size: 49, lr: 6.07e-03 +2024-08-06 09:52:47,011 INFO [trainer.py:765] (6/8) Epoch 14, batch 2100, train_loss[loss=3.303, NarTop10Accuracy=0.6526, over 3934.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6149, over 5992.00 frames. ], batch size: 4, lr: 6.06e-03 +2024-08-06 09:53:12,481 INFO [trainer.py:765] (6/8) Epoch 14, batch 2200, train_loss[loss=3.472, NarTop10Accuracy=0.6407, over 7181.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.6162, over 6024.10 frames. ], batch size: 30, lr: 6.05e-03 +2024-08-06 09:53:37,975 INFO [trainer.py:765] (6/8) Epoch 14, batch 2300, train_loss[loss=3.776, NarTop10Accuracy=0.5746, over 5979.00 frames. ], tot_loss[loss=3.529, NarTop10Accuracy=0.6117, over 6058.09 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 09:54:02,717 INFO [trainer.py:765] (6/8) Epoch 14, batch 2400, train_loss[loss=3.49, NarTop10Accuracy=0.6093, over 5793.00 frames. ], tot_loss[loss=3.543, NarTop10Accuracy=0.6088, over 5876.39 frames. ], batch size: 8, lr: 6.04e-03 +2024-08-06 09:54:12,819 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 09:54:24,304 INFO [trainer.py:811] (6/8) Epoch 14, validation: loss=3.364, NarTop10Accuracy=0.6477, over 1907754.00 frames. +2024-08-06 09:54:24,304 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 27658MB +2024-08-06 09:54:24,752 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.815e+02 1.970e+02 2.165e+02 3.684e+02, threshold=3.939e+02, percent-clipped=0.0 +2024-08-06 09:54:37,619 INFO [trainer.py:765] (6/8) Epoch 14, batch 2500, train_loss[loss=3.713, NarTop10Accuracy=0.5745, over 5023.00 frames. ], tot_loss[loss=3.513, NarTop10Accuracy=0.6139, over 5552.12 frames. ], batch size: 6, lr: 6.03e-03 +2024-08-06 09:54:58,789 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 09:56:03,097 INFO [trainer.py:765] (6/8) Epoch 15, batch 100, train_loss[loss=3.538, NarTop10Accuracy=0.6085, over 7061.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6269, over 2369.08 frames. ], batch size: 30, lr: 5.81e-03 +2024-08-06 09:56:35,980 INFO [trainer.py:765] (6/8) Epoch 15, batch 200, train_loss[loss=3.373, NarTop10Accuracy=0.648, over 6828.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6274, over 3865.77 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 09:57:07,654 INFO [trainer.py:765] (6/8) Epoch 15, batch 300, train_loss[loss=3.377, NarTop10Accuracy=0.6552, over 7268.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6278, over 4662.93 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 09:57:38,464 INFO [trainer.py:765] (6/8) Epoch 15, batch 400, train_loss[loss=3.614, NarTop10Accuracy=0.5979, over 5233.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6258, over 5106.43 frames. ], batch size: 7, lr: 5.79e-03 +2024-08-06 09:58:12,235 INFO [trainer.py:765] (6/8) Epoch 15, batch 500, train_loss[loss=3.341, NarTop10Accuracy=0.648, over 6166.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6244, over 5376.01 frames. ], batch size: 11, lr: 5.78e-03 +2024-08-06 09:58:47,543 INFO [trainer.py:765] (6/8) Epoch 15, batch 600, train_loss[loss=3.537, NarTop10Accuracy=0.5992, over 5690.00 frames. ], tot_loss[loss=3.467, NarTop10Accuracy=0.6244, over 5643.55 frames. ], batch size: 9, lr: 5.77e-03 +2024-08-06 09:59:17,063 INFO [trainer.py:765] (6/8) Epoch 15, batch 700, train_loss[loss=3.444, NarTop10Accuracy=0.6314, over 5083.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6223, over 5732.21 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 09:59:55,588 INFO [trainer.py:765] (6/8) Epoch 15, batch 800, train_loss[loss=3.597, NarTop10Accuracy=0.5868, over 4929.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6216, over 5772.89 frames. ], batch size: 6, lr: 5.76e-03 +2024-08-06 10:00:32,024 INFO [trainer.py:765] (6/8) Epoch 15, batch 900, train_loss[loss=3.407, NarTop10Accuracy=0.6407, over 6447.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6233, over 5783.46 frames. ], batch size: 14, lr: 5.75e-03 +2024-08-06 10:01:05,539 INFO [trainer.py:765] (6/8) Epoch 15, batch 1000, train_loss[loss=3.255, NarTop10Accuracy=0.6541, over 6215.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6225, over 5897.78 frames. ], batch size: 13, lr: 5.74e-03 +2024-08-06 10:01:45,154 INFO [trainer.py:765] (6/8) Epoch 15, batch 1100, train_loss[loss=3.527, NarTop10Accuracy=0.6171, over 6941.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6199, over 5941.55 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 10:02:18,757 INFO [trainer.py:765] (6/8) Epoch 15, batch 1200, train_loss[loss=3.745, NarTop10Accuracy=0.5686, over 7454.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6217, over 5956.86 frames. ], batch size: 31, lr: 5.73e-03 +2024-08-06 10:02:51,921 INFO [trainer.py:765] (6/8) Epoch 15, batch 1300, train_loss[loss=3.614, NarTop10Accuracy=0.5979, over 4998.00 frames. ], tot_loss[loss=3.482, NarTop10Accuracy=0.6209, over 6033.41 frames. ], batch size: 6, lr: 5.72e-03 +2024-08-06 10:03:25,436 INFO [trainer.py:765] (6/8) Epoch 15, batch 1400, train_loss[loss=3.489, NarTop10Accuracy=0.6058, over 6143.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6178, over 6042.88 frames. ], batch size: 11, lr: 5.71e-03 +2024-08-06 10:03:59,042 INFO [trainer.py:765] (6/8) Epoch 15, batch 1500, train_loss[loss=3.389, NarTop10Accuracy=0.6365, over 6131.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6194, over 5984.23 frames. ], batch size: 48, lr: 5.71e-03 +2024-08-06 10:04:27,106 INFO [trainer.py:765] (6/8) Epoch 15, batch 1600, train_loss[loss=3.803, NarTop10Accuracy=0.5561, over 7033.00 frames. ], tot_loss[loss=3.482, NarTop10Accuracy=0.6208, over 5956.60 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 10:04:53,911 INFO [trainer.py:765] (6/8) Epoch 15, batch 1700, train_loss[loss=3.775, NarTop10Accuracy=0.561, over 6372.00 frames. ], tot_loss[loss=3.489, NarTop10Accuracy=0.6192, over 5945.71 frames. ], batch size: 13, lr: 5.69e-03 +2024-08-06 10:05:20,733 INFO [trainer.py:765] (6/8) Epoch 15, batch 1800, train_loss[loss=3.806, NarTop10Accuracy=0.5571, over 7149.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.6152, over 6006.78 frames. ], batch size: 22, lr: 5.68e-03 +2024-08-06 10:05:37,267 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 10:05:47,411 INFO [trainer.py:811] (6/8) Epoch 15, validation: loss=3.325, NarTop10Accuracy=0.6551, over 1907754.00 frames. +2024-08-06 10:05:47,412 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 10:05:47,919 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.835e+02 1.986e+02 2.156e+02 4.531e+02, threshold=3.972e+02, percent-clipped=0.1 +2024-08-06 10:05:57,569 INFO [trainer.py:765] (6/8) Epoch 15, batch 1900, train_loss[loss=3.732, NarTop10Accuracy=0.579, over 5884.00 frames. ], tot_loss[loss=3.517, NarTop10Accuracy=0.6142, over 6044.83 frames. ], batch size: 49, lr: 5.68e-03 +2024-08-06 10:06:23,372 INFO [trainer.py:765] (6/8) Epoch 15, batch 2000, train_loss[loss=3.581, NarTop10Accuracy=0.6005, over 6323.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6174, over 5998.86 frames. ], batch size: 49, lr: 5.67e-03 +2024-08-06 10:06:48,758 INFO [trainer.py:765] (6/8) Epoch 15, batch 2100, train_loss[loss=3.466, NarTop10Accuracy=0.609, over 4033.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6165, over 5984.49 frames. ], batch size: 4, lr: 5.66e-03 +2024-08-06 10:07:14,171 INFO [trainer.py:765] (6/8) Epoch 15, batch 2200, train_loss[loss=3.455, NarTop10Accuracy=0.6276, over 7069.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.617, over 6027.67 frames. ], batch size: 30, lr: 5.65e-03 +2024-08-06 10:07:39,629 INFO [trainer.py:765] (6/8) Epoch 15, batch 2300, train_loss[loss=3.119, NarTop10Accuracy=0.699, over 5683.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.6159, over 6058.98 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 10:08:04,361 INFO [trainer.py:765] (6/8) Epoch 15, batch 2400, train_loss[loss=3.71, NarTop10Accuracy=0.5702, over 5228.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.615, over 5871.67 frames. ], batch size: 7, lr: 5.64e-03 +2024-08-06 10:08:27,713 INFO [trainer.py:765] (6/8) Epoch 15, batch 2500, train_loss[loss=3.842, NarTop10Accuracy=0.546, over 4993.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.6164, over 5531.96 frames. ], batch size: 6, lr: 5.63e-03 +2024-08-06 10:08:49,192 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 10:09:44,183 INFO [trainer.py:765] (6/8) Epoch 16, batch 100, train_loss[loss=3.504, NarTop10Accuracy=0.6149, over 7106.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6266, over 2375.69 frames. ], batch size: 30, lr: 5.44e-03 +2024-08-06 10:10:23,207 INFO [trainer.py:765] (6/8) Epoch 16, batch 200, train_loss[loss=3.361, NarTop10Accuracy=0.6374, over 6828.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6285, over 3878.63 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 10:10:58,841 INFO [trainer.py:765] (6/8) Epoch 16, batch 300, train_loss[loss=3.31, NarTop10Accuracy=0.6494, over 7136.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6276, over 4669.04 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 10:11:29,594 INFO [trainer.py:765] (6/8) Epoch 16, batch 400, train_loss[loss=3.499, NarTop10Accuracy=0.622, over 5228.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6273, over 5115.54 frames. ], batch size: 7, lr: 5.42e-03 +2024-08-06 10:12:02,297 INFO [trainer.py:765] (6/8) Epoch 16, batch 500, train_loss[loss=3.396, NarTop10Accuracy=0.6341, over 6213.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6274, over 5408.55 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 10:12:42,340 INFO [trainer.py:765] (6/8) Epoch 16, batch 600, train_loss[loss=3.272, NarTop10Accuracy=0.6764, over 5746.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6273, over 5679.24 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 10:13:13,949 INFO [trainer.py:765] (6/8) Epoch 16, batch 700, train_loss[loss=3.005, NarTop10Accuracy=0.7165, over 5174.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6257, over 5741.67 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:13:46,284 INFO [trainer.py:765] (6/8) Epoch 16, batch 800, train_loss[loss=3.488, NarTop10Accuracy=0.615, over 5080.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.626, over 5786.21 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:14:23,295 INFO [trainer.py:765] (6/8) Epoch 16, batch 900, train_loss[loss=3.418, NarTop10Accuracy=0.6408, over 6113.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6286, over 5813.22 frames. ], batch size: 13, lr: 5.39e-03 +2024-08-06 10:15:00,059 INFO [trainer.py:765] (6/8) Epoch 16, batch 1000, train_loss[loss=3.757, NarTop10Accuracy=0.5592, over 6711.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.623, over 5928.04 frames. ], batch size: 14, lr: 5.38e-03 +2024-08-06 10:15:30,508 INFO [trainer.py:765] (6/8) Epoch 16, batch 1100, train_loss[loss=3.408, NarTop10Accuracy=0.6327, over 6917.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.621, over 5952.68 frames. ], batch size: 17, lr: 5.38e-03 +2024-08-06 10:16:11,383 INFO [trainer.py:765] (6/8) Epoch 16, batch 1200, train_loss[loss=3.599, NarTop10Accuracy=0.6034, over 7299.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.6209, over 5939.36 frames. ], batch size: 30, lr: 5.37e-03 +2024-08-06 10:16:39,396 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 10:16:49,676 INFO [trainer.py:811] (6/8) Epoch 16, validation: loss=3.375, NarTop10Accuracy=0.6455, over 1907754.00 frames. +2024-08-06 10:16:49,676 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 10:16:52,482 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 1.814e+02 1.975e+02 2.151e+02 4.776e+02, threshold=3.950e+02, percent-clipped=0.2 +2024-08-06 10:16:58,041 INFO [trainer.py:765] (6/8) Epoch 16, batch 1300, train_loss[loss=3.572, NarTop10Accuracy=0.5967, over 5030.00 frames. ], tot_loss[loss=3.467, NarTop10Accuracy=0.6238, over 6018.49 frames. ], batch size: 6, lr: 5.36e-03 +2024-08-06 10:17:29,374 INFO [trainer.py:765] (6/8) Epoch 16, batch 1400, train_loss[loss=3.209, NarTop10Accuracy=0.6745, over 6202.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6233, over 6040.36 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 10:18:02,352 INFO [trainer.py:765] (6/8) Epoch 16, batch 1500, train_loss[loss=3.506, NarTop10Accuracy=0.6199, over 6002.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6232, over 5965.16 frames. ], batch size: 48, lr: 5.35e-03 +2024-08-06 10:18:30,468 INFO [trainer.py:765] (6/8) Epoch 16, batch 1600, train_loss[loss=3.555, NarTop10Accuracy=0.6002, over 7120.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6204, over 5949.27 frames. ], batch size: 22, lr: 5.34e-03 +2024-08-06 10:18:57,271 INFO [trainer.py:765] (6/8) Epoch 16, batch 1700, train_loss[loss=3.847, NarTop10Accuracy=0.5447, over 6234.00 frames. ], tot_loss[loss=3.473, NarTop10Accuracy=0.6227, over 5923.89 frames. ], batch size: 13, lr: 5.34e-03 +2024-08-06 10:19:23,979 INFO [trainer.py:765] (6/8) Epoch 16, batch 1800, train_loss[loss=3.792, NarTop10Accuracy=0.5669, over 7083.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6211, over 6005.67 frames. ], batch size: 22, lr: 5.33e-03 +2024-08-06 10:19:50,772 INFO [trainer.py:765] (6/8) Epoch 16, batch 1900, train_loss[loss=3.672, NarTop10Accuracy=0.5808, over 5675.00 frames. ], tot_loss[loss=3.486, NarTop10Accuracy=0.6201, over 6036.93 frames. ], batch size: 49, lr: 5.32e-03 +2024-08-06 10:20:16,601 INFO [trainer.py:765] (6/8) Epoch 16, batch 2000, train_loss[loss=3.561, NarTop10Accuracy=0.6029, over 6301.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6189, over 6005.12 frames. ], batch size: 49, lr: 5.32e-03 +2024-08-06 10:20:42,160 INFO [trainer.py:765] (6/8) Epoch 16, batch 2100, train_loss[loss=3.678, NarTop10Accuracy=0.5899, over 4846.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6181, over 6001.57 frames. ], batch size: 5, lr: 5.31e-03 +2024-08-06 10:21:07,651 INFO [trainer.py:765] (6/8) Epoch 16, batch 2200, train_loss[loss=3.437, NarTop10Accuracy=0.6245, over 7016.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6182, over 6025.21 frames. ], batch size: 30, lr: 5.30e-03 +2024-08-06 10:21:36,082 INFO [trainer.py:765] (6/8) Epoch 16, batch 2300, train_loss[loss=3.292, NarTop10Accuracy=0.6509, over 5715.00 frames. ], tot_loss[loss=3.5, NarTop10Accuracy=0.6174, over 6041.78 frames. ], batch size: 9, lr: 5.30e-03 +2024-08-06 10:22:00,907 INFO [trainer.py:765] (6/8) Epoch 16, batch 2400, train_loss[loss=3.284, NarTop10Accuracy=0.6636, over 5142.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6179, over 5856.77 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 10:22:24,291 INFO [trainer.py:765] (6/8) Epoch 16, batch 2500, train_loss[loss=3.144, NarTop10Accuracy=0.6852, over 4968.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6229, over 5523.58 frames. ], batch size: 6, lr: 5.28e-03 +2024-08-06 10:22:45,364 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 10:23:45,727 INFO [trainer.py:765] (6/8) Epoch 17, batch 100, train_loss[loss=3.461, NarTop10Accuracy=0.6226, over 7636.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6335, over 2368.94 frames. ], batch size: 32, lr: 5.12e-03 +2024-08-06 10:24:19,033 INFO [trainer.py:765] (6/8) Epoch 17, batch 200, train_loss[loss=3.273, NarTop10Accuracy=0.671, over 6663.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6331, over 3870.67 frames. ], batch size: 17, lr: 5.11e-03 +2024-08-06 10:24:53,440 INFO [trainer.py:765] (6/8) Epoch 17, batch 300, train_loss[loss=3.606, NarTop10Accuracy=0.5931, over 7015.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6313, over 4664.05 frames. ], batch size: 22, lr: 5.10e-03 +2024-08-06 10:25:28,013 INFO [trainer.py:765] (6/8) Epoch 17, batch 400, train_loss[loss=3.697, NarTop10Accuracy=0.5713, over 5265.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6317, over 5118.27 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 10:25:58,606 INFO [trainer.py:765] (6/8) Epoch 17, batch 500, train_loss[loss=3.531, NarTop10Accuracy=0.606, over 6193.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6303, over 5416.25 frames. ], batch size: 11, lr: 5.09e-03 +2024-08-06 10:26:29,755 INFO [trainer.py:765] (6/8) Epoch 17, batch 600, train_loss[loss=3.477, NarTop10Accuracy=0.6241, over 5868.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6307, over 5694.31 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 10:27:07,498 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 10:27:17,547 INFO [trainer.py:811] (6/8) Epoch 17, validation: loss=3.327, NarTop10Accuracy=0.6554, over 1907754.00 frames. +2024-08-06 10:27:17,548 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 10:27:18,066 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 1.825e+02 1.985e+02 2.150e+02 4.169e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 10:27:18,072 INFO [trainer.py:765] (6/8) Epoch 17, batch 700, train_loss[loss=3.338, NarTop10Accuracy=0.6578, over 5202.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.628, over 5758.16 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 10:27:49,841 INFO [trainer.py:765] (6/8) Epoch 17, batch 800, train_loss[loss=2.96, NarTop10Accuracy=0.7266, over 5271.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6276, over 5823.56 frames. ], batch size: 6, lr: 5.07e-03 +2024-08-06 10:28:24,839 INFO [trainer.py:765] (6/8) Epoch 17, batch 900, train_loss[loss=3.131, NarTop10Accuracy=0.6825, over 6329.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6265, over 5824.26 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 10:28:59,684 INFO [trainer.py:765] (6/8) Epoch 17, batch 1000, train_loss[loss=3.489, NarTop10Accuracy=0.6261, over 6658.00 frames. ], tot_loss[loss=3.45, NarTop10Accuracy=0.6271, over 5919.11 frames. ], batch size: 14, lr: 5.06e-03 +2024-08-06 10:29:36,659 INFO [trainer.py:765] (6/8) Epoch 17, batch 1100, train_loss[loss=3.334, NarTop10Accuracy=0.6567, over 6960.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6248, over 5954.55 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 10:30:08,241 INFO [trainer.py:765] (6/8) Epoch 17, batch 1200, train_loss[loss=3.487, NarTop10Accuracy=0.6245, over 6978.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.625, over 5942.71 frames. ], batch size: 30, lr: 5.05e-03 +2024-08-06 10:30:47,102 INFO [trainer.py:765] (6/8) Epoch 17, batch 1300, train_loss[loss=3.549, NarTop10Accuracy=0.6139, over 5069.00 frames. ], tot_loss[loss=3.465, NarTop10Accuracy=0.6242, over 6019.49 frames. ], batch size: 6, lr: 5.04e-03 +2024-08-06 10:31:20,893 INFO [trainer.py:765] (6/8) Epoch 17, batch 1400, train_loss[loss=3.256, NarTop10Accuracy=0.6694, over 6219.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6245, over 6024.21 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 10:31:51,401 INFO [trainer.py:765] (6/8) Epoch 17, batch 1500, train_loss[loss=3.549, NarTop10Accuracy=0.6141, over 6278.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6262, over 5971.73 frames. ], batch size: 50, lr: 5.03e-03 +2024-08-06 10:32:19,401 INFO [trainer.py:765] (6/8) Epoch 17, batch 1600, train_loss[loss=3.415, NarTop10Accuracy=0.639, over 7215.00 frames. ], tot_loss[loss=3.465, NarTop10Accuracy=0.6247, over 5952.59 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 10:32:50,394 INFO [trainer.py:765] (6/8) Epoch 17, batch 1700, train_loss[loss=3.716, NarTop10Accuracy=0.5767, over 6734.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6222, over 5933.56 frames. ], batch size: 14, lr: 5.02e-03 +2024-08-06 10:33:17,035 INFO [trainer.py:765] (6/8) Epoch 17, batch 1800, train_loss[loss=3.801, NarTop10Accuracy=0.5542, over 7208.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6214, over 5995.84 frames. ], batch size: 22, lr: 5.02e-03 +2024-08-06 10:33:43,597 INFO [trainer.py:765] (6/8) Epoch 17, batch 1900, train_loss[loss=3.883, NarTop10Accuracy=0.5381, over 6088.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6197, over 6025.73 frames. ], batch size: 49, lr: 5.01e-03 +2024-08-06 10:34:09,287 INFO [trainer.py:765] (6/8) Epoch 17, batch 2000, train_loss[loss=3.839, NarTop10Accuracy=0.5422, over 5919.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6198, over 6005.79 frames. ], batch size: 49, lr: 5.00e-03 +2024-08-06 10:34:34,801 INFO [trainer.py:765] (6/8) Epoch 17, batch 2100, train_loss[loss=3.024, NarTop10Accuracy=0.6872, over 4731.00 frames. ], tot_loss[loss=3.499, NarTop10Accuracy=0.6175, over 6004.73 frames. ], batch size: 5, lr: 5.00e-03 +2024-08-06 10:35:00,245 INFO [trainer.py:765] (6/8) Epoch 17, batch 2200, train_loss[loss=3.306, NarTop10Accuracy=0.6461, over 7265.00 frames. ], tot_loss[loss=3.484, NarTop10Accuracy=0.6207, over 6042.58 frames. ], batch size: 30, lr: 4.99e-03 +2024-08-06 10:35:25,733 INFO [trainer.py:765] (6/8) Epoch 17, batch 2300, train_loss[loss=3.391, NarTop10Accuracy=0.6441, over 5888.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6194, over 6084.10 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 10:35:50,526 INFO [trainer.py:765] (6/8) Epoch 17, batch 2400, train_loss[loss=3.824, NarTop10Accuracy=0.5499, over 5063.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.6164, over 5893.89 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 10:36:14,104 INFO [trainer.py:765] (6/8) Epoch 17, batch 2500, train_loss[loss=3.645, NarTop10Accuracy=0.5864, over 4989.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6217, over 5535.46 frames. ], batch size: 6, lr: 4.98e-03 +2024-08-06 10:36:35,390 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 10:37:32,052 INFO [trainer.py:765] (6/8) Epoch 18, batch 100, train_loss[loss=3.377, NarTop10Accuracy=0.6535, over 7295.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6347, over 2379.83 frames. ], batch size: 31, lr: 4.83e-03 +2024-08-06 10:37:39,163 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 10:37:49,085 INFO [trainer.py:811] (6/8) Epoch 18, validation: loss=3.339, NarTop10Accuracy=0.6526, over 1907754.00 frames. +2024-08-06 10:37:49,086 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 10:37:49,685 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.841e+02 1.993e+02 2.161e+02 3.871e+02, threshold=3.985e+02, percent-clipped=0.0 +2024-08-06 10:38:18,144 INFO [trainer.py:765] (6/8) Epoch 18, batch 200, train_loss[loss=3.418, NarTop10Accuracy=0.623, over 6775.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6348, over 3870.94 frames. ], batch size: 17, lr: 4.82e-03 +2024-08-06 10:38:50,199 INFO [trainer.py:765] (6/8) Epoch 18, batch 300, train_loss[loss=3.442, NarTop10Accuracy=0.6355, over 7244.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6353, over 4674.08 frames. ], batch size: 22, lr: 4.81e-03 +2024-08-06 10:39:23,743 INFO [trainer.py:765] (6/8) Epoch 18, batch 400, train_loss[loss=3.346, NarTop10Accuracy=0.6437, over 5086.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6335, over 5119.53 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 10:39:54,104 INFO [trainer.py:765] (6/8) Epoch 18, batch 500, train_loss[loss=3.339, NarTop10Accuracy=0.6481, over 6068.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6352, over 5390.62 frames. ], batch size: 11, lr: 4.80e-03 +2024-08-06 10:40:28,526 INFO [trainer.py:765] (6/8) Epoch 18, batch 600, train_loss[loss=3.742, NarTop10Accuracy=0.5727, over 5720.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6336, over 5663.21 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 10:41:02,144 INFO [trainer.py:765] (6/8) Epoch 18, batch 700, train_loss[loss=3.177, NarTop10Accuracy=0.6677, over 5064.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6301, over 5747.49 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:41:38,519 INFO [trainer.py:765] (6/8) Epoch 18, batch 800, train_loss[loss=3.517, NarTop10Accuracy=0.6187, over 4977.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6315, over 5792.98 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:42:12,611 INFO [trainer.py:765] (6/8) Epoch 18, batch 900, train_loss[loss=3.431, NarTop10Accuracy=0.6161, over 6191.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6303, over 5801.41 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:42:46,703 INFO [trainer.py:765] (6/8) Epoch 18, batch 1000, train_loss[loss=3.248, NarTop10Accuracy=0.6654, over 6188.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6308, over 5916.71 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:43:24,183 INFO [trainer.py:765] (6/8) Epoch 18, batch 1100, train_loss[loss=3.532, NarTop10Accuracy=0.6104, over 6793.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.628, over 5948.46 frames. ], batch size: 17, lr: 4.77e-03 +2024-08-06 10:44:02,363 INFO [trainer.py:765] (6/8) Epoch 18, batch 1200, train_loss[loss=3.432, NarTop10Accuracy=0.6337, over 7288.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.6273, over 5946.94 frames. ], batch size: 30, lr: 4.77e-03 +2024-08-06 10:44:35,919 INFO [trainer.py:765] (6/8) Epoch 18, batch 1300, train_loss[loss=3.46, NarTop10Accuracy=0.6281, over 4980.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6293, over 6026.78 frames. ], batch size: 6, lr: 4.76e-03 +2024-08-06 10:45:10,238 INFO [trainer.py:765] (6/8) Epoch 18, batch 1400, train_loss[loss=3.339, NarTop10Accuracy=0.6507, over 6342.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6256, over 6042.05 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 10:45:40,976 INFO [trainer.py:765] (6/8) Epoch 18, batch 1500, train_loss[loss=3.842, NarTop10Accuracy=0.5462, over 6310.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6256, over 5973.76 frames. ], batch size: 49, lr: 4.75e-03 +2024-08-06 10:46:09,055 INFO [trainer.py:765] (6/8) Epoch 18, batch 1600, train_loss[loss=3.215, NarTop10Accuracy=0.6672, over 7196.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6252, over 5961.44 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 10:46:35,859 INFO [trainer.py:765] (6/8) Epoch 18, batch 1700, train_loss[loss=3.687, NarTop10Accuracy=0.5714, over 6694.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6256, over 5957.80 frames. ], batch size: 14, lr: 4.74e-03 +2024-08-06 10:47:02,438 INFO [trainer.py:765] (6/8) Epoch 18, batch 1800, train_loss[loss=3.564, NarTop10Accuracy=0.6025, over 7373.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6237, over 6011.22 frames. ], batch size: 22, lr: 4.74e-03 +2024-08-06 10:47:29,093 INFO [trainer.py:765] (6/8) Epoch 18, batch 1900, train_loss[loss=3.668, NarTop10Accuracy=0.5779, over 5616.00 frames. ], tot_loss[loss=3.482, NarTop10Accuracy=0.621, over 6048.15 frames. ], batch size: 49, lr: 4.73e-03 +2024-08-06 10:47:54,884 INFO [trainer.py:765] (6/8) Epoch 18, batch 2000, train_loss[loss=3.447, NarTop10Accuracy=0.6295, over 6278.00 frames. ], tot_loss[loss=3.48, NarTop10Accuracy=0.6217, over 6019.95 frames. ], batch size: 49, lr: 4.73e-03 +2024-08-06 10:48:20,370 INFO [trainer.py:765] (6/8) Epoch 18, batch 2100, train_loss[loss=3.167, NarTop10Accuracy=0.6708, over 3822.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6223, over 6002.74 frames. ], batch size: 4, lr: 4.72e-03 +2024-08-06 10:48:24,748 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 10:48:35,039 INFO [trainer.py:811] (6/8) Epoch 18, validation: loss=3.307, NarTop10Accuracy=0.6593, over 1907754.00 frames. +2024-08-06 10:48:35,040 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 10:48:35,535 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 1.855e+02 2.003e+02 2.193e+02 3.481e+02, threshold=4.005e+02, percent-clipped=0.0 +2024-08-06 10:48:56,096 INFO [trainer.py:765] (6/8) Epoch 18, batch 2200, train_loss[loss=3.436, NarTop10Accuracy=0.628, over 7156.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6216, over 6039.08 frames. ], batch size: 30, lr: 4.72e-03 +2024-08-06 10:49:21,521 INFO [trainer.py:765] (6/8) Epoch 18, batch 2300, train_loss[loss=3.237, NarTop10Accuracy=0.6566, over 5938.00 frames. ], tot_loss[loss=3.482, NarTop10Accuracy=0.6211, over 6073.88 frames. ], batch size: 9, lr: 4.71e-03 +2024-08-06 10:49:46,257 INFO [trainer.py:765] (6/8) Epoch 18, batch 2400, train_loss[loss=3.15, NarTop10Accuracy=0.6879, over 5209.00 frames. ], tot_loss[loss=3.484, NarTop10Accuracy=0.6204, over 5885.12 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 10:50:09,708 INFO [trainer.py:765] (6/8) Epoch 18, batch 2500, train_loss[loss=3.447, NarTop10Accuracy=0.6289, over 4953.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6251, over 5537.24 frames. ], batch size: 6, lr: 4.70e-03 +2024-08-06 10:50:31,107 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 10:51:33,564 INFO [trainer.py:765] (6/8) Epoch 19, batch 100, train_loss[loss=3.375, NarTop10Accuracy=0.6551, over 7155.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6364, over 2359.88 frames. ], batch size: 31, lr: 4.57e-03 +2024-08-06 10:52:06,164 INFO [trainer.py:765] (6/8) Epoch 19, batch 200, train_loss[loss=3.82, NarTop10Accuracy=0.5501, over 6842.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6361, over 3864.20 frames. ], batch size: 17, lr: 4.56e-03 +2024-08-06 10:52:40,032 INFO [trainer.py:765] (6/8) Epoch 19, batch 300, train_loss[loss=3.48, NarTop10Accuracy=0.6206, over 7012.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6383, over 4659.64 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 10:53:12,830 INFO [trainer.py:765] (6/8) Epoch 19, batch 400, train_loss[loss=3.291, NarTop10Accuracy=0.6718, over 5207.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6359, over 5126.34 frames. ], batch size: 7, lr: 4.55e-03 +2024-08-06 10:53:45,020 INFO [trainer.py:765] (6/8) Epoch 19, batch 500, train_loss[loss=3.28, NarTop10Accuracy=0.663, over 6290.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6361, over 5393.44 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 10:54:18,601 INFO [trainer.py:765] (6/8) Epoch 19, batch 600, train_loss[loss=3.445, NarTop10Accuracy=0.6383, over 5755.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6346, over 5670.16 frames. ], batch size: 9, lr: 4.54e-03 +2024-08-06 10:54:54,112 INFO [trainer.py:765] (6/8) Epoch 19, batch 700, train_loss[loss=3.443, NarTop10Accuracy=0.6293, over 5083.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6339, over 5746.21 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 10:55:29,925 INFO [trainer.py:765] (6/8) Epoch 19, batch 800, train_loss[loss=3.174, NarTop10Accuracy=0.6755, over 5217.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6337, over 5790.42 frames. ], batch size: 6, lr: 4.53e-03 +2024-08-06 10:56:02,239 INFO [trainer.py:765] (6/8) Epoch 19, batch 900, train_loss[loss=3.587, NarTop10Accuracy=0.6176, over 6325.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6327, over 5820.40 frames. ], batch size: 13, lr: 4.53e-03 +2024-08-06 10:56:38,300 INFO [trainer.py:765] (6/8) Epoch 19, batch 1000, train_loss[loss=3.294, NarTop10Accuracy=0.6682, over 6202.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6314, over 5910.44 frames. ], batch size: 13, lr: 4.52e-03 +2024-08-06 10:57:15,188 INFO [trainer.py:765] (6/8) Epoch 19, batch 1100, train_loss[loss=3.24, NarTop10Accuracy=0.6682, over 7120.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6291, over 5959.48 frames. ], batch size: 18, lr: 4.52e-03 +2024-08-06 10:57:46,665 INFO [trainer.py:765] (6/8) Epoch 19, batch 1200, train_loss[loss=3.347, NarTop10Accuracy=0.6526, over 7050.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6276, over 5959.09 frames. ], batch size: 30, lr: 4.51e-03 +2024-08-06 10:58:23,901 INFO [trainer.py:765] (6/8) Epoch 19, batch 1300, train_loss[loss=3.101, NarTop10Accuracy=0.7057, over 5020.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6266, over 6019.16 frames. ], batch size: 6, lr: 4.51e-03 +2024-08-06 10:58:58,029 INFO [trainer.py:765] (6/8) Epoch 19, batch 1400, train_loss[loss=3.519, NarTop10Accuracy=0.6015, over 6129.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6262, over 6031.60 frames. ], batch size: 11, lr: 4.50e-03 +2024-08-06 10:59:30,770 INFO [trainer.py:765] (6/8) Epoch 19, batch 1500, train_loss[loss=3.69, NarTop10Accuracy=0.575, over 6012.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6267, over 5966.90 frames. ], batch size: 49, lr: 4.50e-03 +2024-08-06 10:59:40,832 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 10:59:50,899 INFO [trainer.py:811] (6/8) Epoch 19, validation: loss=3.276, NarTop10Accuracy=0.6653, over 1907754.00 frames. +2024-08-06 10:59:50,899 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 10:59:51,426 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.829e+02 1.984e+02 2.176e+02 3.542e+02, threshold=3.967e+02, percent-clipped=0.0 +2024-08-06 11:00:08,816 INFO [trainer.py:765] (6/8) Epoch 19, batch 1600, train_loss[loss=3.679, NarTop10Accuracy=0.5727, over 7169.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6264, over 5957.02 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:00:35,588 INFO [trainer.py:765] (6/8) Epoch 19, batch 1700, train_loss[loss=3.721, NarTop10Accuracy=0.5762, over 6270.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6252, over 5946.39 frames. ], batch size: 13, lr: 4.49e-03 +2024-08-06 11:01:02,257 INFO [trainer.py:765] (6/8) Epoch 19, batch 1800, train_loss[loss=3.252, NarTop10Accuracy=0.6581, over 7083.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6278, over 6009.40 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:01:28,930 INFO [trainer.py:765] (6/8) Epoch 19, batch 1900, train_loss[loss=3.532, NarTop10Accuracy=0.6155, over 6317.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6248, over 6038.90 frames. ], batch size: 51, lr: 4.48e-03 +2024-08-06 11:01:54,633 INFO [trainer.py:765] (6/8) Epoch 19, batch 2000, train_loss[loss=3.413, NarTop10Accuracy=0.6371, over 6347.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6257, over 6021.87 frames. ], batch size: 49, lr: 4.48e-03 +2024-08-06 11:02:20,186 INFO [trainer.py:765] (6/8) Epoch 19, batch 2100, train_loss[loss=3.439, NarTop10Accuracy=0.6376, over 3941.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6257, over 6010.17 frames. ], batch size: 4, lr: 4.47e-03 +2024-08-06 11:02:45,695 INFO [trainer.py:765] (6/8) Epoch 19, batch 2200, train_loss[loss=3.542, NarTop10Accuracy=0.6132, over 7433.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6255, over 6053.59 frames. ], batch size: 31, lr: 4.47e-03 +2024-08-06 11:03:11,131 INFO [trainer.py:765] (6/8) Epoch 19, batch 2300, train_loss[loss=3.295, NarTop10Accuracy=0.6582, over 5878.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.6252, over 6082.52 frames. ], batch size: 9, lr: 4.46e-03 +2024-08-06 11:03:35,951 INFO [trainer.py:765] (6/8) Epoch 19, batch 2400, train_loss[loss=3.081, NarTop10Accuracy=0.6919, over 5321.00 frames. ], tot_loss[loss=3.472, NarTop10Accuracy=0.6232, over 5865.44 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 11:03:59,406 INFO [trainer.py:765] (6/8) Epoch 19, batch 2500, train_loss[loss=3.783, NarTop10Accuracy=0.5564, over 5126.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6294, over 5518.04 frames. ], batch size: 6, lr: 4.45e-03 +2024-08-06 11:04:23,805 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 11:05:26,561 INFO [trainer.py:765] (6/8) Epoch 20, batch 100, train_loss[loss=3.536, NarTop10Accuracy=0.612, over 6875.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6394, over 2385.44 frames. ], batch size: 30, lr: 4.33e-03 +2024-08-06 11:05:57,409 INFO [trainer.py:765] (6/8) Epoch 20, batch 200, train_loss[loss=3.364, NarTop10Accuracy=0.6443, over 6968.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6408, over 3868.85 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 11:06:30,634 INFO [trainer.py:765] (6/8) Epoch 20, batch 300, train_loss[loss=3.21, NarTop10Accuracy=0.6755, over 6962.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6392, over 4659.50 frames. ], batch size: 22, lr: 4.32e-03 +2024-08-06 11:07:06,396 INFO [trainer.py:765] (6/8) Epoch 20, batch 400, train_loss[loss=3.128, NarTop10Accuracy=0.696, over 5105.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6392, over 5113.47 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 11:07:38,166 INFO [trainer.py:765] (6/8) Epoch 20, batch 500, train_loss[loss=3.216, NarTop10Accuracy=0.6785, over 6155.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.641, over 5411.23 frames. ], batch size: 11, lr: 4.31e-03 +2024-08-06 11:08:11,568 INFO [trainer.py:765] (6/8) Epoch 20, batch 600, train_loss[loss=2.981, NarTop10Accuracy=0.7244, over 5824.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6414, over 5671.09 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 11:08:46,275 INFO [trainer.py:765] (6/8) Epoch 20, batch 700, train_loss[loss=3.221, NarTop10Accuracy=0.6714, over 4932.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6395, over 5750.25 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 11:09:23,427 INFO [trainer.py:765] (6/8) Epoch 20, batch 800, train_loss[loss=3.4, NarTop10Accuracy=0.6284, over 4972.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6357, over 5808.47 frames. ], batch size: 6, lr: 4.30e-03 +2024-08-06 11:09:53,513 INFO [trainer.py:765] (6/8) Epoch 20, batch 900, train_loss[loss=3.389, NarTop10Accuracy=0.632, over 6166.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6344, over 5825.03 frames. ], batch size: 13, lr: 4.30e-03 +2024-08-06 11:10:12,200 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 11:10:23,738 INFO [trainer.py:811] (6/8) Epoch 20, validation: loss=3.279, NarTop10Accuracy=0.6658, over 1907754.00 frames. +2024-08-06 11:10:23,739 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 11:10:24,298 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.847e+02 2.007e+02 2.180e+02 4.417e+02, threshold=4.013e+02, percent-clipped=0.1 +2024-08-06 11:10:42,966 INFO [trainer.py:765] (6/8) Epoch 20, batch 1000, train_loss[loss=3.371, NarTop10Accuracy=0.6421, over 6177.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6319, over 5920.81 frames. ], batch size: 13, lr: 4.29e-03 +2024-08-06 11:11:21,023 INFO [trainer.py:765] (6/8) Epoch 20, batch 1100, train_loss[loss=3.359, NarTop10Accuracy=0.6448, over 6764.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6287, over 5952.79 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 11:11:55,394 INFO [trainer.py:765] (6/8) Epoch 20, batch 1200, train_loss[loss=3.312, NarTop10Accuracy=0.6499, over 7360.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6289, over 5961.27 frames. ], batch size: 31, lr: 4.28e-03 +2024-08-06 11:12:30,753 INFO [trainer.py:765] (6/8) Epoch 20, batch 1300, train_loss[loss=3.683, NarTop10Accuracy=0.577, over 5147.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6315, over 6026.15 frames. ], batch size: 6, lr: 4.28e-03 +2024-08-06 11:13:10,292 INFO [trainer.py:765] (6/8) Epoch 20, batch 1400, train_loss[loss=3.53, NarTop10Accuracy=0.6023, over 6261.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6292, over 6039.45 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 11:13:38,990 INFO [trainer.py:765] (6/8) Epoch 20, batch 1500, train_loss[loss=3.385, NarTop10Accuracy=0.6422, over 5910.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6284, over 5963.55 frames. ], batch size: 49, lr: 4.27e-03 +2024-08-06 11:14:07,052 INFO [trainer.py:765] (6/8) Epoch 20, batch 1600, train_loss[loss=3.309, NarTop10Accuracy=0.6529, over 7364.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.627, over 5944.08 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 11:14:33,911 INFO [trainer.py:765] (6/8) Epoch 20, batch 1700, train_loss[loss=3.33, NarTop10Accuracy=0.6538, over 6145.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6284, over 5933.37 frames. ], batch size: 13, lr: 4.26e-03 +2024-08-06 11:15:00,591 INFO [trainer.py:765] (6/8) Epoch 20, batch 1800, train_loss[loss=3.241, NarTop10Accuracy=0.6679, over 7169.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6274, over 6005.29 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 11:15:27,277 INFO [trainer.py:765] (6/8) Epoch 20, batch 1900, train_loss[loss=3.386, NarTop10Accuracy=0.6451, over 6339.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6258, over 6037.39 frames. ], batch size: 50, lr: 4.26e-03 +2024-08-06 11:15:56,439 INFO [trainer.py:765] (6/8) Epoch 20, batch 2000, train_loss[loss=3.391, NarTop10Accuracy=0.6376, over 6113.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6265, over 6018.98 frames. ], batch size: 49, lr: 4.25e-03 +2024-08-06 11:16:21,959 INFO [trainer.py:765] (6/8) Epoch 20, batch 2100, train_loss[loss=2.884, NarTop10Accuracy=0.7257, over 3919.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6249, over 6010.61 frames. ], batch size: 4, lr: 4.25e-03 +2024-08-06 11:16:47,406 INFO [trainer.py:765] (6/8) Epoch 20, batch 2200, train_loss[loss=3.358, NarTop10Accuracy=0.6349, over 7392.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6281, over 6055.25 frames. ], batch size: 31, lr: 4.24e-03 +2024-08-06 11:17:12,909 INFO [trainer.py:765] (6/8) Epoch 20, batch 2300, train_loss[loss=3.596, NarTop10Accuracy=0.5989, over 5666.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6253, over 6073.90 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 11:17:37,715 INFO [trainer.py:765] (6/8) Epoch 20, batch 2400, train_loss[loss=3.396, NarTop10Accuracy=0.6384, over 5076.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6249, over 5886.35 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 11:18:01,248 INFO [trainer.py:765] (6/8) Epoch 20, batch 2500, train_loss[loss=3.424, NarTop10Accuracy=0.6209, over 4966.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6305, over 5538.04 frames. ], batch size: 6, lr: 4.23e-03 +2024-08-06 11:18:22,085 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 11:19:21,460 INFO [trainer.py:765] (6/8) Epoch 21, batch 100, train_loss[loss=3.333, NarTop10Accuracy=0.653, over 7198.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6384, over 2373.83 frames. ], batch size: 31, lr: 4.12e-03 +2024-08-06 11:19:56,522 INFO [trainer.py:765] (6/8) Epoch 21, batch 200, train_loss[loss=3.432, NarTop10Accuracy=0.6282, over 6858.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6365, over 3866.92 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 11:20:26,598 INFO [trainer.py:765] (6/8) Epoch 21, batch 300, train_loss[loss=3.573, NarTop10Accuracy=0.6031, over 7040.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6402, over 4672.35 frames. ], batch size: 22, lr: 4.11e-03 +2024-08-06 11:20:54,241 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 11:21:04,970 INFO [trainer.py:811] (6/8) Epoch 21, validation: loss=3.291, NarTop10Accuracy=0.6625, over 1907754.00 frames. +2024-08-06 11:21:04,970 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 11:21:05,486 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 1.858e+02 2.007e+02 2.193e+02 3.729e+02, threshold=4.015e+02, percent-clipped=0.0 +2024-08-06 11:21:12,220 INFO [trainer.py:765] (6/8) Epoch 21, batch 400, train_loss[loss=3.719, NarTop10Accuracy=0.5803, over 5273.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6392, over 5117.67 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 11:21:47,569 INFO [trainer.py:765] (6/8) Epoch 21, batch 500, train_loss[loss=3.258, NarTop10Accuracy=0.661, over 6295.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6427, over 5400.66 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 11:22:18,237 INFO [trainer.py:765] (6/8) Epoch 21, batch 600, train_loss[loss=3.596, NarTop10Accuracy=0.6072, over 5609.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6394, over 5667.36 frames. ], batch size: 9, lr: 4.10e-03 +2024-08-06 11:22:56,842 INFO [trainer.py:765] (6/8) Epoch 21, batch 700, train_loss[loss=3.146, NarTop10Accuracy=0.6814, over 4981.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6375, over 5731.10 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 11:23:33,075 INFO [trainer.py:765] (6/8) Epoch 21, batch 800, train_loss[loss=3.395, NarTop10Accuracy=0.6398, over 5062.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6368, over 5800.54 frames. ], batch size: 6, lr: 4.09e-03 +2024-08-06 11:24:03,021 INFO [trainer.py:765] (6/8) Epoch 21, batch 900, train_loss[loss=3.584, NarTop10Accuracy=0.5984, over 6736.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6372, over 5813.52 frames. ], batch size: 14, lr: 4.09e-03 +2024-08-06 11:24:37,089 INFO [trainer.py:765] (6/8) Epoch 21, batch 1000, train_loss[loss=3.394, NarTop10Accuracy=0.6423, over 6196.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6328, over 5913.87 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 11:25:16,427 INFO [trainer.py:765] (6/8) Epoch 21, batch 1100, train_loss[loss=3.516, NarTop10Accuracy=0.5949, over 6981.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6315, over 5944.57 frames. ], batch size: 17, lr: 4.08e-03 +2024-08-06 11:25:47,740 INFO [trainer.py:765] (6/8) Epoch 21, batch 1200, train_loss[loss=3.379, NarTop10Accuracy=0.6455, over 7549.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.635, over 5947.31 frames. ], batch size: 31, lr: 4.08e-03 +2024-08-06 11:26:23,057 INFO [trainer.py:765] (6/8) Epoch 21, batch 1300, train_loss[loss=3.902, NarTop10Accuracy=0.5456, over 4977.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6356, over 6026.12 frames. ], batch size: 6, lr: 4.07e-03 +2024-08-06 11:27:00,081 INFO [trainer.py:765] (6/8) Epoch 21, batch 1400, train_loss[loss=3.5, NarTop10Accuracy=0.6322, over 6149.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6317, over 6034.55 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 11:27:35,326 INFO [trainer.py:765] (6/8) Epoch 21, batch 1500, train_loss[loss=3.846, NarTop10Accuracy=0.5419, over 6600.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.6306, over 5980.59 frames. ], batch size: 49, lr: 4.07e-03 +2024-08-06 11:28:03,315 INFO [trainer.py:765] (6/8) Epoch 21, batch 1600, train_loss[loss=3.371, NarTop10Accuracy=0.65, over 7154.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6302, over 5958.06 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 11:28:30,105 INFO [trainer.py:765] (6/8) Epoch 21, batch 1700, train_loss[loss=3.701, NarTop10Accuracy=0.578, over 6283.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6295, over 5940.32 frames. ], batch size: 13, lr: 4.06e-03 +2024-08-06 11:28:56,641 INFO [trainer.py:765] (6/8) Epoch 21, batch 1800, train_loss[loss=3.455, NarTop10Accuracy=0.6308, over 7160.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6298, over 5991.17 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 11:29:23,198 INFO [trainer.py:765] (6/8) Epoch 21, batch 1900, train_loss[loss=3.612, NarTop10Accuracy=0.592, over 5805.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6294, over 6045.47 frames. ], batch size: 53, lr: 4.05e-03 +2024-08-06 11:29:49,028 INFO [trainer.py:765] (6/8) Epoch 21, batch 2000, train_loss[loss=3.367, NarTop10Accuracy=0.6388, over 6177.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6294, over 6016.42 frames. ], batch size: 48, lr: 4.05e-03 +2024-08-06 11:30:14,528 INFO [trainer.py:765] (6/8) Epoch 21, batch 2100, train_loss[loss=3.222, NarTop10Accuracy=0.6703, over 4934.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6287, over 6013.12 frames. ], batch size: 5, lr: 4.04e-03 +2024-08-06 11:30:39,870 INFO [trainer.py:765] (6/8) Epoch 21, batch 2200, train_loss[loss=3.686, NarTop10Accuracy=0.5695, over 6994.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6278, over 6044.75 frames. ], batch size: 30, lr: 4.04e-03 +2024-08-06 11:31:05,472 INFO [trainer.py:765] (6/8) Epoch 21, batch 2300, train_loss[loss=3.372, NarTop10Accuracy=0.6469, over 5769.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6286, over 6065.54 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 11:31:23,873 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 11:31:34,439 INFO [trainer.py:811] (6/8) Epoch 21, validation: loss=3.272, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 11:31:34,439 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 11:31:34,937 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 1.892e+02 2.038e+02 2.210e+02 4.910e+02, threshold=4.076e+02, percent-clipped=0.1 +2024-08-06 11:31:40,754 INFO [trainer.py:765] (6/8) Epoch 21, batch 2400, train_loss[loss=3.1, NarTop10Accuracy=0.6841, over 5690.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6279, over 5881.01 frames. ], batch size: 8, lr: 4.03e-03 +2024-08-06 11:32:04,057 INFO [trainer.py:765] (6/8) Epoch 21, batch 2500, train_loss[loss=3.137, NarTop10Accuracy=0.691, over 5007.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6331, over 5534.23 frames. ], batch size: 6, lr: 4.03e-03 +2024-08-06 11:32:25,806 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 11:33:29,683 INFO [trainer.py:765] (6/8) Epoch 22, batch 100, train_loss[loss=3.656, NarTop10Accuracy=0.59, over 7059.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6437, over 2373.25 frames. ], batch size: 30, lr: 3.93e-03 +2024-08-06 11:34:05,037 INFO [trainer.py:765] (6/8) Epoch 22, batch 200, train_loss[loss=3.341, NarTop10Accuracy=0.652, over 6888.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6437, over 3868.15 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 11:34:37,619 INFO [trainer.py:765] (6/8) Epoch 22, batch 300, train_loss[loss=3.232, NarTop10Accuracy=0.6745, over 7021.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6456, over 4659.36 frames. ], batch size: 22, lr: 3.92e-03 +2024-08-06 11:35:09,968 INFO [trainer.py:765] (6/8) Epoch 22, batch 400, train_loss[loss=3.244, NarTop10Accuracy=0.6643, over 5120.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6438, over 5131.69 frames. ], batch size: 7, lr: 3.92e-03 +2024-08-06 11:35:42,509 INFO [trainer.py:765] (6/8) Epoch 22, batch 500, train_loss[loss=3.264, NarTop10Accuracy=0.668, over 6159.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6418, over 5418.79 frames. ], batch size: 11, lr: 3.91e-03 +2024-08-06 11:36:16,059 INFO [trainer.py:765] (6/8) Epoch 22, batch 600, train_loss[loss=3.276, NarTop10Accuracy=0.6647, over 5810.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6408, over 5692.13 frames. ], batch size: 9, lr: 3.91e-03 +2024-08-06 11:36:53,858 INFO [trainer.py:765] (6/8) Epoch 22, batch 700, train_loss[loss=3.497, NarTop10Accuracy=0.6203, over 5124.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6398, over 5764.64 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 11:37:28,480 INFO [trainer.py:765] (6/8) Epoch 22, batch 800, train_loss[loss=3.198, NarTop10Accuracy=0.6806, over 4963.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6381, over 5793.62 frames. ], batch size: 6, lr: 3.90e-03 +2024-08-06 11:38:03,950 INFO [trainer.py:765] (6/8) Epoch 22, batch 900, train_loss[loss=3.16, NarTop10Accuracy=0.6995, over 6646.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.64, over 5816.68 frames. ], batch size: 14, lr: 3.90e-03 +2024-08-06 11:38:38,329 INFO [trainer.py:765] (6/8) Epoch 22, batch 1000, train_loss[loss=3.377, NarTop10Accuracy=0.6456, over 6274.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.639, over 5918.59 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 11:39:14,789 INFO [trainer.py:765] (6/8) Epoch 22, batch 1100, train_loss[loss=3.431, NarTop10Accuracy=0.623, over 7016.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.637, over 5961.03 frames. ], batch size: 17, lr: 3.89e-03 +2024-08-06 11:39:48,523 INFO [trainer.py:765] (6/8) Epoch 22, batch 1200, train_loss[loss=3.349, NarTop10Accuracy=0.6488, over 6924.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6366, over 5961.23 frames. ], batch size: 30, lr: 3.89e-03 +2024-08-06 11:40:25,245 INFO [trainer.py:765] (6/8) Epoch 22, batch 1300, train_loss[loss=3.487, NarTop10Accuracy=0.6287, over 5094.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6362, over 6015.41 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 11:41:00,609 INFO [trainer.py:765] (6/8) Epoch 22, batch 1400, train_loss[loss=3.446, NarTop10Accuracy=0.6265, over 6145.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6354, over 6023.32 frames. ], batch size: 11, lr: 3.88e-03 +2024-08-06 11:41:31,584 INFO [trainer.py:765] (6/8) Epoch 22, batch 1500, train_loss[loss=3.731, NarTop10Accuracy=0.5719, over 5872.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6333, over 5936.68 frames. ], batch size: 49, lr: 3.88e-03 +2024-08-06 11:41:59,677 INFO [trainer.py:765] (6/8) Epoch 22, batch 1600, train_loss[loss=3.406, NarTop10Accuracy=0.6472, over 7190.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6316, over 5924.10 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 11:42:26,464 INFO [trainer.py:765] (6/8) Epoch 22, batch 1700, train_loss[loss=3.349, NarTop10Accuracy=0.6587, over 6609.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6288, over 5928.94 frames. ], batch size: 14, lr: 3.87e-03 +2024-08-06 11:42:50,724 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 11:43:00,818 INFO [trainer.py:811] (6/8) Epoch 22, validation: loss=3.305, NarTop10Accuracy=0.6597, over 1907754.00 frames. +2024-08-06 11:43:00,819 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 11:43:01,327 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.500e+02 1.900e+02 2.042e+02 2.234e+02 3.494e+02, threshold=4.085e+02, percent-clipped=0.0 +2024-08-06 11:43:03,219 INFO [trainer.py:765] (6/8) Epoch 22, batch 1800, train_loss[loss=3.315, NarTop10Accuracy=0.649, over 7203.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.632, over 5993.66 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 11:43:29,751 INFO [trainer.py:765] (6/8) Epoch 22, batch 1900, train_loss[loss=3.599, NarTop10Accuracy=0.6055, over 6626.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6324, over 6044.12 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 11:43:55,485 INFO [trainer.py:765] (6/8) Epoch 22, batch 2000, train_loss[loss=3.846, NarTop10Accuracy=0.5469, over 5931.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6324, over 6009.75 frames. ], batch size: 49, lr: 3.86e-03 +2024-08-06 11:44:20,932 INFO [trainer.py:765] (6/8) Epoch 22, batch 2100, train_loss[loss=3.035, NarTop10Accuracy=0.7098, over 4829.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6326, over 5999.80 frames. ], batch size: 5, lr: 3.86e-03 +2024-08-06 11:44:46,456 INFO [trainer.py:765] (6/8) Epoch 22, batch 2200, train_loss[loss=3.637, NarTop10Accuracy=0.5884, over 7087.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6322, over 6047.23 frames. ], batch size: 30, lr: 3.86e-03 +2024-08-06 11:45:11,882 INFO [trainer.py:765] (6/8) Epoch 22, batch 2300, train_loss[loss=3.28, NarTop10Accuracy=0.6516, over 5840.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6315, over 6067.92 frames. ], batch size: 9, lr: 3.85e-03 +2024-08-06 11:45:36,583 INFO [trainer.py:765] (6/8) Epoch 22, batch 2400, train_loss[loss=3.442, NarTop10Accuracy=0.6329, over 5108.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6286, over 5867.56 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 11:46:00,081 INFO [trainer.py:765] (6/8) Epoch 22, batch 2500, train_loss[loss=3.309, NarTop10Accuracy=0.6584, over 5094.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.634, over 5522.64 frames. ], batch size: 6, lr: 3.85e-03 +2024-08-06 11:46:21,586 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 11:47:20,476 INFO [trainer.py:765] (6/8) Epoch 23, batch 100, train_loss[loss=3.206, NarTop10Accuracy=0.6815, over 7236.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6453, over 2384.37 frames. ], batch size: 30, lr: 3.75e-03 +2024-08-06 11:47:52,036 INFO [trainer.py:765] (6/8) Epoch 23, batch 200, train_loss[loss=3.423, NarTop10Accuracy=0.6336, over 6856.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6471, over 3878.22 frames. ], batch size: 17, lr: 3.75e-03 +2024-08-06 11:48:33,922 INFO [trainer.py:765] (6/8) Epoch 23, batch 300, train_loss[loss=3.269, NarTop10Accuracy=0.6716, over 7193.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6419, over 4665.57 frames. ], batch size: 22, lr: 3.75e-03 +2024-08-06 11:49:06,656 INFO [trainer.py:765] (6/8) Epoch 23, batch 400, train_loss[loss=3.252, NarTop10Accuracy=0.6789, over 5726.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.642, over 5126.33 frames. ], batch size: 8, lr: 3.74e-03 +2024-08-06 11:49:37,619 INFO [trainer.py:765] (6/8) Epoch 23, batch 500, train_loss[loss=3.556, NarTop10Accuracy=0.6057, over 6142.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6433, over 5406.50 frames. ], batch size: 11, lr: 3.74e-03 +2024-08-06 11:50:06,740 INFO [trainer.py:765] (6/8) Epoch 23, batch 600, train_loss[loss=3.61, NarTop10Accuracy=0.6011, over 5881.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6431, over 5677.89 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 11:50:47,601 INFO [trainer.py:765] (6/8) Epoch 23, batch 700, train_loss[loss=3.047, NarTop10Accuracy=0.7093, over 5095.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6407, over 5752.11 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:21,345 INFO [trainer.py:765] (6/8) Epoch 23, batch 800, train_loss[loss=3.194, NarTop10Accuracy=0.6732, over 5187.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6402, over 5829.10 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:52,397 INFO [trainer.py:765] (6/8) Epoch 23, batch 900, train_loss[loss=3.17, NarTop10Accuracy=0.6812, over 6263.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6424, over 5828.83 frames. ], batch size: 13, lr: 3.73e-03 +2024-08-06 11:52:33,918 INFO [trainer.py:765] (6/8) Epoch 23, batch 1000, train_loss[loss=3.158, NarTop10Accuracy=0.7011, over 6703.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6411, over 5930.51 frames. ], batch size: 14, lr: 3.73e-03 +2024-08-06 11:53:08,608 INFO [trainer.py:765] (6/8) Epoch 23, batch 1100, train_loss[loss=3.471, NarTop10Accuracy=0.6262, over 6821.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6379, over 5972.15 frames. ], batch size: 17, lr: 3.72e-03 +2024-08-06 11:53:40,339 INFO [trainer.py:765] (6/8) Epoch 23, batch 1200, train_loss[loss=3.521, NarTop10Accuracy=0.6198, over 6960.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.635, over 5958.17 frames. ], batch size: 30, lr: 3.72e-03 +2024-08-06 11:53:42,826 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 11:53:53,935 INFO [trainer.py:811] (6/8) Epoch 23, validation: loss=3.236, NarTop10Accuracy=0.6739, over 1907754.00 frames. +2024-08-06 11:53:53,935 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 11:53:54,457 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.901e+02 2.047e+02 2.234e+02 4.368e+02, threshold=4.093e+02, percent-clipped=0.1 +2024-08-06 11:54:30,447 INFO [trainer.py:765] (6/8) Epoch 23, batch 1300, train_loss[loss=3.117, NarTop10Accuracy=0.6919, over 5112.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6357, over 6024.15 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 11:55:04,197 INFO [trainer.py:765] (6/8) Epoch 23, batch 1400, train_loss[loss=3.379, NarTop10Accuracy=0.6537, over 6191.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6364, over 6037.82 frames. ], batch size: 11, lr: 3.71e-03 +2024-08-06 11:55:35,398 INFO [trainer.py:765] (6/8) Epoch 23, batch 1500, train_loss[loss=3.531, NarTop10Accuracy=0.6106, over 6399.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6337, over 5978.62 frames. ], batch size: 48, lr: 3.71e-03 +2024-08-06 11:56:03,427 INFO [trainer.py:765] (6/8) Epoch 23, batch 1600, train_loss[loss=3.412, NarTop10Accuracy=0.6377, over 7046.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6335, over 5961.71 frames. ], batch size: 22, lr: 3.71e-03 +2024-08-06 11:56:30,202 INFO [trainer.py:765] (6/8) Epoch 23, batch 1700, train_loss[loss=3.562, NarTop10Accuracy=0.6119, over 6638.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6313, over 5945.58 frames. ], batch size: 14, lr: 3.70e-03 +2024-08-06 11:56:56,969 INFO [trainer.py:765] (6/8) Epoch 23, batch 1800, train_loss[loss=3.309, NarTop10Accuracy=0.658, over 7054.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6326, over 6009.56 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 11:57:23,597 INFO [trainer.py:765] (6/8) Epoch 23, batch 1900, train_loss[loss=3.484, NarTop10Accuracy=0.6257, over 6626.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6322, over 6040.74 frames. ], batch size: 50, lr: 3.70e-03 +2024-08-06 11:57:49,251 INFO [trainer.py:765] (6/8) Epoch 23, batch 2000, train_loss[loss=3.668, NarTop10Accuracy=0.5832, over 5679.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6326, over 6003.62 frames. ], batch size: 48, lr: 3.69e-03 +2024-08-06 11:58:14,769 INFO [trainer.py:765] (6/8) Epoch 23, batch 2100, train_loss[loss=3.673, NarTop10Accuracy=0.5894, over 4912.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6321, over 5990.03 frames. ], batch size: 5, lr: 3.69e-03 +2024-08-06 11:58:40,237 INFO [trainer.py:765] (6/8) Epoch 23, batch 2200, train_loss[loss=3.679, NarTop10Accuracy=0.5948, over 7148.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6347, over 6031.00 frames. ], batch size: 30, lr: 3.69e-03 +2024-08-06 11:59:08,916 INFO [trainer.py:765] (6/8) Epoch 23, batch 2300, train_loss[loss=3.444, NarTop10Accuracy=0.6342, over 5601.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6326, over 6054.54 frames. ], batch size: 9, lr: 3.68e-03 +2024-08-06 11:59:33,602 INFO [trainer.py:765] (6/8) Epoch 23, batch 2400, train_loss[loss=3.237, NarTop10Accuracy=0.6758, over 5289.00 frames. ], tot_loss[loss=3.447, NarTop10Accuracy=0.6288, over 5863.94 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 11:59:57,011 INFO [trainer.py:765] (6/8) Epoch 23, batch 2500, train_loss[loss=3.284, NarTop10Accuracy=0.6687, over 4897.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6339, over 5518.70 frames. ], batch size: 6, lr: 3.68e-03 +2024-08-06 12:00:18,335 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 12:01:22,110 INFO [trainer.py:765] (6/8) Epoch 24, batch 100, train_loss[loss=3.464, NarTop10Accuracy=0.612, over 7281.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6429, over 2350.32 frames. ], batch size: 30, lr: 3.59e-03 +2024-08-06 12:01:51,342 INFO [trainer.py:765] (6/8) Epoch 24, batch 200, train_loss[loss=3.497, NarTop10Accuracy=0.6265, over 7085.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6461, over 3852.89 frames. ], batch size: 18, lr: 3.59e-03 +2024-08-06 12:02:23,512 INFO [trainer.py:765] (6/8) Epoch 24, batch 300, train_loss[loss=3.289, NarTop10Accuracy=0.6637, over 7139.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6463, over 4658.46 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 12:03:02,846 INFO [trainer.py:765] (6/8) Epoch 24, batch 400, train_loss[loss=3.034, NarTop10Accuracy=0.7095, over 5074.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6463, over 5113.37 frames. ], batch size: 7, lr: 3.59e-03 +2024-08-06 12:03:31,256 INFO [trainer.py:765] (6/8) Epoch 24, batch 500, train_loss[loss=3.22, NarTop10Accuracy=0.6708, over 5990.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6469, over 5395.44 frames. ], batch size: 11, lr: 3.58e-03 +2024-08-06 12:04:00,172 INFO [trainer.py:765] (6/8) Epoch 24, batch 600, train_loss[loss=3.53, NarTop10Accuracy=0.6216, over 5690.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6462, over 5677.75 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 12:04:12,531 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 12:04:22,775 INFO [trainer.py:811] (6/8) Epoch 24, validation: loss=3.282, NarTop10Accuracy=0.6644, over 1907754.00 frames. +2024-08-06 12:04:22,776 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 12:04:23,310 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 1.905e+02 2.071e+02 2.258e+02 3.709e+02, threshold=4.142e+02, percent-clipped=0.0 +2024-08-06 12:04:51,733 INFO [trainer.py:765] (6/8) Epoch 24, batch 700, train_loss[loss=3.202, NarTop10Accuracy=0.6823, over 5137.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6451, over 5743.77 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 12:05:21,274 INFO [trainer.py:765] (6/8) Epoch 24, batch 800, train_loss[loss=3.502, NarTop10Accuracy=0.6291, over 5195.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6433, over 5793.67 frames. ], batch size: 6, lr: 3.57e-03 +2024-08-06 12:05:51,753 INFO [trainer.py:765] (6/8) Epoch 24, batch 900, train_loss[loss=3.532, NarTop10Accuracy=0.6123, over 6153.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6416, over 5806.79 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 12:06:32,812 INFO [trainer.py:765] (6/8) Epoch 24, batch 1000, train_loss[loss=3.122, NarTop10Accuracy=0.6983, over 6653.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6391, over 5921.60 frames. ], batch size: 14, lr: 3.57e-03 +2024-08-06 12:07:09,039 INFO [trainer.py:765] (6/8) Epoch 24, batch 1100, train_loss[loss=3.282, NarTop10Accuracy=0.654, over 6720.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.638, over 5953.26 frames. ], batch size: 17, lr: 3.56e-03 +2024-08-06 12:07:38,133 INFO [trainer.py:765] (6/8) Epoch 24, batch 1200, train_loss[loss=3.387, NarTop10Accuracy=0.6427, over 7360.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6381, over 5951.50 frames. ], batch size: 31, lr: 3.56e-03 +2024-08-06 12:08:20,730 INFO [trainer.py:765] (6/8) Epoch 24, batch 1300, train_loss[loss=3.181, NarTop10Accuracy=0.6851, over 5093.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6372, over 6024.92 frames. ], batch size: 6, lr: 3.56e-03 +2024-08-06 12:08:56,065 INFO [trainer.py:765] (6/8) Epoch 24, batch 1400, train_loss[loss=3.558, NarTop10Accuracy=0.6117, over 6553.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6344, over 6055.63 frames. ], batch size: 12, lr: 3.56e-03 +2024-08-06 12:09:24,337 INFO [trainer.py:765] (6/8) Epoch 24, batch 1500, train_loss[loss=3.742, NarTop10Accuracy=0.5731, over 6011.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6332, over 5984.44 frames. ], batch size: 49, lr: 3.55e-03 +2024-08-06 12:09:52,524 INFO [trainer.py:765] (6/8) Epoch 24, batch 1600, train_loss[loss=3.359, NarTop10Accuracy=0.6477, over 7219.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6329, over 5959.83 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 12:10:22,545 INFO [trainer.py:765] (6/8) Epoch 24, batch 1700, train_loss[loss=3.595, NarTop10Accuracy=0.5985, over 6260.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.6313, over 5945.37 frames. ], batch size: 13, lr: 3.55e-03 +2024-08-06 12:10:49,272 INFO [trainer.py:765] (6/8) Epoch 24, batch 1800, train_loss[loss=3.199, NarTop10Accuracy=0.6725, over 7236.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.633, over 6016.86 frames. ], batch size: 22, lr: 3.54e-03 +2024-08-06 12:11:15,848 INFO [trainer.py:765] (6/8) Epoch 24, batch 1900, train_loss[loss=3.434, NarTop10Accuracy=0.6372, over 5973.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6335, over 6041.86 frames. ], batch size: 48, lr: 3.54e-03 +2024-08-06 12:11:41,665 INFO [trainer.py:765] (6/8) Epoch 24, batch 2000, train_loss[loss=3.448, NarTop10Accuracy=0.6362, over 6190.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6338, over 6020.61 frames. ], batch size: 48, lr: 3.54e-03 +2024-08-06 12:12:07,102 INFO [trainer.py:765] (6/8) Epoch 24, batch 2100, train_loss[loss=3.194, NarTop10Accuracy=0.6735, over 3975.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6355, over 6004.26 frames. ], batch size: 4, lr: 3.54e-03 +2024-08-06 12:12:33,372 INFO [trainer.py:765] (6/8) Epoch 24, batch 2200, train_loss[loss=3.506, NarTop10Accuracy=0.6155, over 7022.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6351, over 6039.24 frames. ], batch size: 30, lr: 3.53e-03 +2024-08-06 12:12:58,771 INFO [trainer.py:765] (6/8) Epoch 24, batch 2300, train_loss[loss=3.629, NarTop10Accuracy=0.5887, over 5706.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6334, over 6067.52 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 12:13:23,486 INFO [trainer.py:765] (6/8) Epoch 24, batch 2400, train_loss[loss=3.378, NarTop10Accuracy=0.6556, over 5146.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6314, over 5885.34 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 12:13:47,004 INFO [trainer.py:765] (6/8) Epoch 24, batch 2500, train_loss[loss=3.234, NarTop10Accuracy=0.6766, over 5061.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6381, over 5546.72 frames. ], batch size: 6, lr: 3.52e-03 +2024-08-06 12:14:08,096 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 12:14:50,195 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 12:15:00,657 INFO [trainer.py:811] (6/8) Epoch 25, validation: loss=3.279, NarTop10Accuracy=0.6656, over 1907754.00 frames. +2024-08-06 12:15:00,658 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 12:15:01,363 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.921e+02 2.068e+02 2.276e+02 6.228e+02, threshold=4.136e+02, percent-clipped=0.3 +2024-08-06 12:15:17,917 INFO [trainer.py:765] (6/8) Epoch 25, batch 100, train_loss[loss=3.224, NarTop10Accuracy=0.684, over 7169.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6515, over 2383.60 frames. ], batch size: 30, lr: 3.45e-03 +2024-08-06 12:15:53,499 INFO [trainer.py:765] (6/8) Epoch 25, batch 200, train_loss[loss=3.322, NarTop10Accuracy=0.6557, over 6856.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.651, over 3858.75 frames. ], batch size: 17, lr: 3.44e-03 +2024-08-06 12:16:23,595 INFO [trainer.py:765] (6/8) Epoch 25, batch 300, train_loss[loss=3.389, NarTop10Accuracy=0.6522, over 7048.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6481, over 4670.34 frames. ], batch size: 22, lr: 3.44e-03 +2024-08-06 12:16:59,163 INFO [trainer.py:765] (6/8) Epoch 25, batch 400, train_loss[loss=3.275, NarTop10Accuracy=0.6645, over 5238.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6468, over 5121.80 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 12:17:32,096 INFO [trainer.py:765] (6/8) Epoch 25, batch 500, train_loss[loss=3.179, NarTop10Accuracy=0.6848, over 6155.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6476, over 5399.21 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 12:18:05,181 INFO [trainer.py:765] (6/8) Epoch 25, batch 600, train_loss[loss=3.221, NarTop10Accuracy=0.6729, over 5856.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6461, over 5663.79 frames. ], batch size: 9, lr: 3.43e-03 +2024-08-06 12:18:39,598 INFO [trainer.py:765] (6/8) Epoch 25, batch 700, train_loss[loss=3.297, NarTop10Accuracy=0.6633, over 4911.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6458, over 5743.86 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 12:19:16,015 INFO [trainer.py:765] (6/8) Epoch 25, batch 800, train_loss[loss=3.141, NarTop10Accuracy=0.6988, over 5152.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6419, over 5824.09 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 12:19:49,558 INFO [trainer.py:765] (6/8) Epoch 25, batch 900, train_loss[loss=3.227, NarTop10Accuracy=0.6691, over 6388.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6415, over 5831.87 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 12:20:23,876 INFO [trainer.py:765] (6/8) Epoch 25, batch 1000, train_loss[loss=3.274, NarTop10Accuracy=0.6693, over 6273.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6413, over 5933.25 frames. ], batch size: 13, lr: 3.42e-03 +2024-08-06 12:21:01,915 INFO [trainer.py:765] (6/8) Epoch 25, batch 1100, train_loss[loss=3.445, NarTop10Accuracy=0.6236, over 6978.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6387, over 5963.73 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 12:21:40,637 INFO [trainer.py:765] (6/8) Epoch 25, batch 1200, train_loss[loss=3.334, NarTop10Accuracy=0.6459, over 7437.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6418, over 5959.28 frames. ], batch size: 31, lr: 3.42e-03 +2024-08-06 12:22:11,837 INFO [trainer.py:765] (6/8) Epoch 25, batch 1300, train_loss[loss=3.593, NarTop10Accuracy=0.5944, over 5090.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6421, over 6024.09 frames. ], batch size: 6, lr: 3.41e-03 +2024-08-06 12:22:48,550 INFO [trainer.py:765] (6/8) Epoch 25, batch 1400, train_loss[loss=3.601, NarTop10Accuracy=0.5947, over 6124.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6427, over 6035.94 frames. ], batch size: 11, lr: 3.41e-03 +2024-08-06 12:23:21,655 INFO [trainer.py:765] (6/8) Epoch 25, batch 1500, train_loss[loss=3.745, NarTop10Accuracy=0.5727, over 6134.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6402, over 5979.44 frames. ], batch size: 49, lr: 3.41e-03 +2024-08-06 12:23:49,717 INFO [trainer.py:765] (6/8) Epoch 25, batch 1600, train_loss[loss=3.378, NarTop10Accuracy=0.6422, over 7286.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6381, over 5948.16 frames. ], batch size: 22, lr: 3.41e-03 +2024-08-06 12:24:16,372 INFO [trainer.py:765] (6/8) Epoch 25, batch 1700, train_loss[loss=3.452, NarTop10Accuracy=0.6333, over 6712.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.635, over 5938.10 frames. ], batch size: 14, lr: 3.40e-03 +2024-08-06 12:24:43,092 INFO [trainer.py:765] (6/8) Epoch 25, batch 1800, train_loss[loss=3.529, NarTop10Accuracy=0.6142, over 7038.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6331, over 5998.91 frames. ], batch size: 22, lr: 3.40e-03 +2024-08-06 12:25:09,776 INFO [trainer.py:765] (6/8) Epoch 25, batch 1900, train_loss[loss=3.628, NarTop10Accuracy=0.5953, over 6658.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6317, over 6059.05 frames. ], batch size: 52, lr: 3.40e-03 +2024-08-06 12:25:35,711 INFO [trainer.py:765] (6/8) Epoch 25, batch 2000, train_loss[loss=3.65, NarTop10Accuracy=0.5913, over 5740.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6355, over 6014.07 frames. ], batch size: 48, lr: 3.40e-03 +2024-08-06 12:25:47,855 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 12:25:58,846 INFO [trainer.py:811] (6/8) Epoch 25, validation: loss=3.265, NarTop10Accuracy=0.667, over 1907754.00 frames. +2024-08-06 12:25:58,847 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 12:25:59,343 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 1.947e+02 2.092e+02 2.280e+02 8.190e+02, threshold=4.185e+02, percent-clipped=0.2 +2024-08-06 12:26:12,225 INFO [trainer.py:765] (6/8) Epoch 25, batch 2100, train_loss[loss=3.493, NarTop10Accuracy=0.6171, over 3966.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6348, over 6001.00 frames. ], batch size: 4, lr: 3.39e-03 +2024-08-06 12:26:37,833 INFO [trainer.py:765] (6/8) Epoch 25, batch 2200, train_loss[loss=3.569, NarTop10Accuracy=0.6031, over 7501.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6342, over 6051.32 frames. ], batch size: 31, lr: 3.39e-03 +2024-08-06 12:27:03,344 INFO [trainer.py:765] (6/8) Epoch 25, batch 2300, train_loss[loss=3.376, NarTop10Accuracy=0.6495, over 5854.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.633, over 6069.22 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 12:27:28,150 INFO [trainer.py:765] (6/8) Epoch 25, batch 2400, train_loss[loss=3.185, NarTop10Accuracy=0.6809, over 5138.00 frames. ], tot_loss[loss=3.427, NarTop10Accuracy=0.6322, over 5881.46 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 12:27:51,732 INFO [trainer.py:765] (6/8) Epoch 25, batch 2500, train_loss[loss=3.363, NarTop10Accuracy=0.6499, over 5020.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6369, over 5550.73 frames. ], batch size: 6, lr: 3.38e-03 +2024-08-06 12:28:13,173 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 12:29:08,881 INFO [trainer.py:765] (6/8) Epoch 26, batch 100, train_loss[loss=3.493, NarTop10Accuracy=0.6156, over 6995.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6496, over 2370.13 frames. ], batch size: 30, lr: 3.31e-03 +2024-08-06 12:29:44,318 INFO [trainer.py:765] (6/8) Epoch 26, batch 200, train_loss[loss=3.114, NarTop10Accuracy=0.688, over 6892.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6454, over 3879.80 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 12:30:19,754 INFO [trainer.py:765] (6/8) Epoch 26, batch 300, train_loss[loss=3.222, NarTop10Accuracy=0.6707, over 7175.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6462, over 4679.40 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 12:30:52,509 INFO [trainer.py:765] (6/8) Epoch 26, batch 400, train_loss[loss=3.262, NarTop10Accuracy=0.6735, over 5113.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6464, over 5136.28 frames. ], batch size: 7, lr: 3.30e-03 +2024-08-06 12:31:26,531 INFO [trainer.py:765] (6/8) Epoch 26, batch 500, train_loss[loss=3.308, NarTop10Accuracy=0.655, over 6259.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6483, over 5410.84 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 12:31:59,782 INFO [trainer.py:765] (6/8) Epoch 26, batch 600, train_loss[loss=3.47, NarTop10Accuracy=0.5992, over 5760.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6455, over 5678.06 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 12:32:36,966 INFO [trainer.py:765] (6/8) Epoch 26, batch 700, train_loss[loss=3.342, NarTop10Accuracy=0.6534, over 5078.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6436, over 5742.95 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 12:33:10,809 INFO [trainer.py:765] (6/8) Epoch 26, batch 800, train_loss[loss=3.254, NarTop10Accuracy=0.6707, over 5158.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6437, over 5792.24 frames. ], batch size: 6, lr: 3.29e-03 +2024-08-06 12:33:46,257 INFO [trainer.py:765] (6/8) Epoch 26, batch 900, train_loss[loss=3.538, NarTop10Accuracy=0.6109, over 6274.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6428, over 5809.14 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 12:34:22,902 INFO [trainer.py:765] (6/8) Epoch 26, batch 1000, train_loss[loss=3.215, NarTop10Accuracy=0.6764, over 6115.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6427, over 5906.89 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 12:34:57,798 INFO [trainer.py:765] (6/8) Epoch 26, batch 1100, train_loss[loss=3.212, NarTop10Accuracy=0.6723, over 6921.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6425, over 5946.20 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 12:35:31,894 INFO [trainer.py:765] (6/8) Epoch 26, batch 1200, train_loss[loss=3.304, NarTop10Accuracy=0.6645, over 7110.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6427, over 5947.54 frames. ], batch size: 30, lr: 3.28e-03 +2024-08-06 12:36:10,658 INFO [trainer.py:765] (6/8) Epoch 26, batch 1300, train_loss[loss=3.494, NarTop10Accuracy=0.6137, over 4935.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.644, over 6009.43 frames. ], batch size: 6, lr: 3.28e-03 +2024-08-06 12:36:44,564 INFO [trainer.py:765] (6/8) Epoch 26, batch 1400, train_loss[loss=3.098, NarTop10Accuracy=0.6943, over 6236.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6403, over 6043.71 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 12:37:03,594 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 12:37:13,567 INFO [trainer.py:811] (6/8) Epoch 26, validation: loss=3.231, NarTop10Accuracy=0.6753, over 1907754.00 frames. +2024-08-06 12:37:13,568 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 12:37:14,078 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.928e+02 2.102e+02 2.299e+02 4.602e+02, threshold=4.203e+02, percent-clipped=0.2 +2024-08-06 12:37:23,028 INFO [trainer.py:765] (6/8) Epoch 26, batch 1500, train_loss[loss=3.644, NarTop10Accuracy=0.6004, over 6140.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6408, over 5990.28 frames. ], batch size: 49, lr: 3.28e-03 +2024-08-06 12:37:51,061 INFO [trainer.py:765] (6/8) Epoch 26, batch 1600, train_loss[loss=3.188, NarTop10Accuracy=0.6698, over 7101.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.641, over 5957.27 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:38:17,853 INFO [trainer.py:765] (6/8) Epoch 26, batch 1700, train_loss[loss=3.318, NarTop10Accuracy=0.6392, over 6626.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6379, over 5950.41 frames. ], batch size: 14, lr: 3.27e-03 +2024-08-06 12:38:44,384 INFO [trainer.py:765] (6/8) Epoch 26, batch 1800, train_loss[loss=3.429, NarTop10Accuracy=0.6348, over 7145.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6369, over 6011.63 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:39:10,952 INFO [trainer.py:765] (6/8) Epoch 26, batch 1900, train_loss[loss=3.565, NarTop10Accuracy=0.5977, over 6463.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6343, over 6069.44 frames. ], batch size: 50, lr: 3.27e-03 +2024-08-06 12:39:36,610 INFO [trainer.py:765] (6/8) Epoch 26, batch 2000, train_loss[loss=3.429, NarTop10Accuracy=0.6372, over 6007.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6346, over 6052.85 frames. ], batch size: 49, lr: 3.26e-03 +2024-08-06 12:40:02,148 INFO [trainer.py:765] (6/8) Epoch 26, batch 2100, train_loss[loss=3.427, NarTop10Accuracy=0.6451, over 4678.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6372, over 6025.68 frames. ], batch size: 5, lr: 3.26e-03 +2024-08-06 12:40:27,759 INFO [trainer.py:765] (6/8) Epoch 26, batch 2200, train_loss[loss=3.376, NarTop10Accuracy=0.6452, over 7168.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6358, over 6054.91 frames. ], batch size: 31, lr: 3.26e-03 +2024-08-06 12:40:53,233 INFO [trainer.py:765] (6/8) Epoch 26, batch 2300, train_loss[loss=3.025, NarTop10Accuracy=0.7021, over 5932.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6349, over 6078.45 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 12:41:17,931 INFO [trainer.py:765] (6/8) Epoch 26, batch 2400, train_loss[loss=3.315, NarTop10Accuracy=0.6424, over 5347.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6352, over 5871.48 frames. ], batch size: 7, lr: 3.25e-03 +2024-08-06 12:41:44,478 INFO [trainer.py:765] (6/8) Epoch 26, batch 2500, train_loss[loss=2.974, NarTop10Accuracy=0.7098, over 4982.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6414, over 5538.89 frames. ], batch size: 6, lr: 3.25e-03 +2024-08-06 12:42:05,828 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 12:43:12,533 INFO [trainer.py:765] (6/8) Epoch 27, batch 100, train_loss[loss=3.635, NarTop10Accuracy=0.5885, over 7281.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6482, over 2376.18 frames. ], batch size: 30, lr: 3.19e-03 +2024-08-06 12:43:43,576 INFO [trainer.py:765] (6/8) Epoch 27, batch 200, train_loss[loss=3.409, NarTop10Accuracy=0.6342, over 6924.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6529, over 3888.35 frames. ], batch size: 17, lr: 3.18e-03 +2024-08-06 12:44:13,786 INFO [trainer.py:765] (6/8) Epoch 27, batch 300, train_loss[loss=3.291, NarTop10Accuracy=0.6642, over 7324.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6519, over 4684.03 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 12:44:50,460 INFO [trainer.py:765] (6/8) Epoch 27, batch 400, train_loss[loss=3.103, NarTop10Accuracy=0.7016, over 5134.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6528, over 5130.93 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 12:45:20,669 INFO [trainer.py:765] (6/8) Epoch 27, batch 500, train_loss[loss=3.216, NarTop10Accuracy=0.6703, over 6187.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.653, over 5404.70 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 12:45:55,260 INFO [trainer.py:765] (6/8) Epoch 27, batch 600, train_loss[loss=3.225, NarTop10Accuracy=0.665, over 5817.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6506, over 5671.99 frames. ], batch size: 9, lr: 3.17e-03 +2024-08-06 12:46:26,747 INFO [trainer.py:765] (6/8) Epoch 27, batch 700, train_loss[loss=3.431, NarTop10Accuracy=0.6281, over 5079.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.649, over 5742.21 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:05,015 INFO [trainer.py:765] (6/8) Epoch 27, batch 800, train_loss[loss=3.132, NarTop10Accuracy=0.6927, over 5008.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.646, over 5790.81 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:32,741 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 12:47:42,765 INFO [trainer.py:811] (6/8) Epoch 27, validation: loss=3.258, NarTop10Accuracy=0.6695, over 1907754.00 frames. +2024-08-06 12:47:42,766 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 12:47:43,336 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 1.939e+02 2.100e+02 2.298e+02 4.859e+02, threshold=4.201e+02, percent-clipped=0.2 +2024-08-06 12:47:47,260 INFO [trainer.py:765] (6/8) Epoch 27, batch 900, train_loss[loss=3.436, NarTop10Accuracy=0.6296, over 6182.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6457, over 5812.21 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 12:48:22,863 INFO [trainer.py:765] (6/8) Epoch 27, batch 1000, train_loss[loss=3.299, NarTop10Accuracy=0.6497, over 6295.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6445, over 5916.02 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 12:48:58,085 INFO [trainer.py:765] (6/8) Epoch 27, batch 1100, train_loss[loss=3.508, NarTop10Accuracy=0.6175, over 6687.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6426, over 5944.78 frames. ], batch size: 17, lr: 3.16e-03 +2024-08-06 12:49:34,896 INFO [trainer.py:765] (6/8) Epoch 27, batch 1200, train_loss[loss=3.35, NarTop10Accuracy=0.6553, over 7371.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6427, over 5942.11 frames. ], batch size: 30, lr: 3.16e-03 +2024-08-06 12:50:06,242 INFO [trainer.py:765] (6/8) Epoch 27, batch 1300, train_loss[loss=3.209, NarTop10Accuracy=0.6848, over 5113.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6424, over 6011.65 frames. ], batch size: 6, lr: 3.16e-03 +2024-08-06 12:50:42,950 INFO [trainer.py:765] (6/8) Epoch 27, batch 1400, train_loss[loss=3.316, NarTop10Accuracy=0.6573, over 6181.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6411, over 6043.81 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 12:51:11,279 INFO [trainer.py:765] (6/8) Epoch 27, batch 1500, train_loss[loss=3.475, NarTop10Accuracy=0.6255, over 5791.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6413, over 5967.37 frames. ], batch size: 48, lr: 3.15e-03 +2024-08-06 12:51:39,352 INFO [trainer.py:765] (6/8) Epoch 27, batch 1600, train_loss[loss=3.413, NarTop10Accuracy=0.6326, over 7252.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6414, over 5957.43 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:52:06,062 INFO [trainer.py:765] (6/8) Epoch 27, batch 1700, train_loss[loss=3.614, NarTop10Accuracy=0.5961, over 6252.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6404, over 5931.45 frames. ], batch size: 13, lr: 3.15e-03 +2024-08-06 12:52:32,669 INFO [trainer.py:765] (6/8) Epoch 27, batch 1800, train_loss[loss=3.498, NarTop10Accuracy=0.6245, over 7257.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6421, over 5998.36 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:53:02,289 INFO [trainer.py:765] (6/8) Epoch 27, batch 1900, train_loss[loss=3.729, NarTop10Accuracy=0.5734, over 6164.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6377, over 6039.24 frames. ], batch size: 49, lr: 3.14e-03 +2024-08-06 12:53:27,999 INFO [trainer.py:765] (6/8) Epoch 27, batch 2000, train_loss[loss=3.319, NarTop10Accuracy=0.6572, over 5891.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6391, over 6019.85 frames. ], batch size: 50, lr: 3.14e-03 +2024-08-06 12:53:53,539 INFO [trainer.py:765] (6/8) Epoch 27, batch 2100, train_loss[loss=3.827, NarTop10Accuracy=0.5599, over 3956.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.637, over 6011.27 frames. ], batch size: 4, lr: 3.14e-03 +2024-08-06 12:54:18,997 INFO [trainer.py:765] (6/8) Epoch 27, batch 2200, train_loss[loss=3.248, NarTop10Accuracy=0.6692, over 7089.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.637, over 6050.12 frames. ], batch size: 30, lr: 3.14e-03 +2024-08-06 12:54:44,480 INFO [trainer.py:765] (6/8) Epoch 27, batch 2300, train_loss[loss=3.223, NarTop10Accuracy=0.6728, over 5848.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6347, over 6064.20 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 12:55:09,218 INFO [trainer.py:765] (6/8) Epoch 27, batch 2400, train_loss[loss=3.593, NarTop10Accuracy=0.5888, over 5225.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6321, over 5877.08 frames. ], batch size: 7, lr: 3.13e-03 +2024-08-06 12:55:32,727 INFO [trainer.py:765] (6/8) Epoch 27, batch 2500, train_loss[loss=3.222, NarTop10Accuracy=0.6612, over 5161.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6379, over 5546.99 frames. ], batch size: 6, lr: 3.13e-03 +2024-08-06 12:55:54,577 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 12:56:46,803 INFO [trainer.py:765] (6/8) Epoch 28, batch 100, train_loss[loss=3.302, NarTop10Accuracy=0.6555, over 7119.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6524, over 2371.15 frames. ], batch size: 30, lr: 3.07e-03 +2024-08-06 12:57:23,205 INFO [trainer.py:765] (6/8) Epoch 28, batch 200, train_loss[loss=3.233, NarTop10Accuracy=0.6692, over 6838.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.652, over 3868.27 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 12:57:55,705 INFO [trainer.py:765] (6/8) Epoch 28, batch 300, train_loss[loss=3.414, NarTop10Accuracy=0.6362, over 7258.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.65, over 4669.15 frames. ], batch size: 22, lr: 3.07e-03 +2024-08-06 12:57:56,457 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 12:58:06,828 INFO [trainer.py:811] (6/8) Epoch 28, validation: loss=3.275, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 12:58:06,828 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 12:58:07,334 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 1.944e+02 2.106e+02 2.298e+02 4.786e+02, threshold=4.211e+02, percent-clipped=0.1 +2024-08-06 12:58:34,933 INFO [trainer.py:765] (6/8) Epoch 28, batch 400, train_loss[loss=3.193, NarTop10Accuracy=0.6797, over 5048.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.648, over 5124.01 frames. ], batch size: 7, lr: 3.06e-03 +2024-08-06 12:59:11,438 INFO [trainer.py:765] (6/8) Epoch 28, batch 500, train_loss[loss=3.125, NarTop10Accuracy=0.7006, over 6149.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6508, over 5416.50 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 12:59:44,487 INFO [trainer.py:765] (6/8) Epoch 28, batch 600, train_loss[loss=3.131, NarTop10Accuracy=0.6867, over 5877.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6497, over 5679.82 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 13:00:20,013 INFO [trainer.py:765] (6/8) Epoch 28, batch 700, train_loss[loss=3.138, NarTop10Accuracy=0.6909, over 4260.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6502, over 5743.97 frames. ], batch size: 5, lr: 3.06e-03 +2024-08-06 13:00:56,434 INFO [trainer.py:765] (6/8) Epoch 28, batch 800, train_loss[loss=3.066, NarTop10Accuracy=0.7186, over 5238.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6492, over 5790.23 frames. ], batch size: 6, lr: 3.05e-03 +2024-08-06 13:01:31,043 INFO [trainer.py:765] (6/8) Epoch 28, batch 900, train_loss[loss=3.244, NarTop10Accuracy=0.6716, over 6301.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6463, over 5807.81 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 13:02:06,495 INFO [trainer.py:765] (6/8) Epoch 28, batch 1000, train_loss[loss=3.456, NarTop10Accuracy=0.6244, over 6274.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6423, over 5901.98 frames. ], batch size: 13, lr: 3.05e-03 +2024-08-06 13:02:41,230 INFO [trainer.py:765] (6/8) Epoch 28, batch 1100, train_loss[loss=3.091, NarTop10Accuracy=0.6928, over 6764.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.641, over 5941.30 frames. ], batch size: 17, lr: 3.05e-03 +2024-08-06 13:03:16,895 INFO [trainer.py:765] (6/8) Epoch 28, batch 1200, train_loss[loss=3.385, NarTop10Accuracy=0.6372, over 7455.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6405, over 5933.04 frames. ], batch size: 31, lr: 3.05e-03 +2024-08-06 13:03:54,154 INFO [trainer.py:765] (6/8) Epoch 28, batch 1300, train_loss[loss=3.12, NarTop10Accuracy=0.6946, over 5067.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6423, over 5992.83 frames. ], batch size: 6, lr: 3.04e-03 +2024-08-06 13:04:28,712 INFO [trainer.py:765] (6/8) Epoch 28, batch 1400, train_loss[loss=3.279, NarTop10Accuracy=0.6631, over 5986.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6412, over 6022.83 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 13:05:02,349 INFO [trainer.py:765] (6/8) Epoch 28, batch 1500, train_loss[loss=3.479, NarTop10Accuracy=0.6191, over 6541.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6423, over 5975.90 frames. ], batch size: 48, lr: 3.04e-03 +2024-08-06 13:05:30,371 INFO [trainer.py:765] (6/8) Epoch 28, batch 1600, train_loss[loss=3.662, NarTop10Accuracy=0.5834, over 7143.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6402, over 5968.26 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 13:05:57,130 INFO [trainer.py:765] (6/8) Epoch 28, batch 1700, train_loss[loss=3.56, NarTop10Accuracy=0.5995, over 6166.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6407, over 5946.31 frames. ], batch size: 13, lr: 3.04e-03 +2024-08-06 13:06:23,732 INFO [trainer.py:765] (6/8) Epoch 28, batch 1800, train_loss[loss=3.599, NarTop10Accuracy=0.5907, over 7066.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6424, over 6000.15 frames. ], batch size: 22, lr: 3.03e-03 +2024-08-06 13:06:50,373 INFO [trainer.py:765] (6/8) Epoch 28, batch 1900, train_loss[loss=3.558, NarTop10Accuracy=0.6076, over 6451.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6396, over 6039.97 frames. ], batch size: 51, lr: 3.03e-03 +2024-08-06 13:07:16,116 INFO [trainer.py:765] (6/8) Epoch 28, batch 2000, train_loss[loss=3.422, NarTop10Accuracy=0.6309, over 5876.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6418, over 6026.69 frames. ], batch size: 49, lr: 3.03e-03 +2024-08-06 13:07:41,547 INFO [trainer.py:765] (6/8) Epoch 28, batch 2100, train_loss[loss=3.494, NarTop10Accuracy=0.6078, over 3965.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6389, over 6015.09 frames. ], batch size: 4, lr: 3.03e-03 +2024-08-06 13:08:06,931 INFO [trainer.py:765] (6/8) Epoch 28, batch 2200, train_loss[loss=3.54, NarTop10Accuracy=0.6127, over 7091.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6391, over 6043.85 frames. ], batch size: 30, lr: 3.02e-03 +2024-08-06 13:08:32,387 INFO [trainer.py:765] (6/8) Epoch 28, batch 2300, train_loss[loss=3.477, NarTop10Accuracy=0.6158, over 5666.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6361, over 6083.79 frames. ], batch size: 9, lr: 3.02e-03 +2024-08-06 13:08:33,135 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 13:08:43,385 INFO [trainer.py:811] (6/8) Epoch 28, validation: loss=3.224, NarTop10Accuracy=0.676, over 1907754.00 frames. +2024-08-06 13:08:43,386 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 13:08:43,891 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 1.997e+02 2.131e+02 2.314e+02 6.875e+02, threshold=4.261e+02, percent-clipped=0.5 +2024-08-06 13:09:07,390 INFO [trainer.py:765] (6/8) Epoch 28, batch 2400, train_loss[loss=3.462, NarTop10Accuracy=0.6195, over 5131.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6337, over 5899.75 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 13:09:30,781 INFO [trainer.py:765] (6/8) Epoch 28, batch 2500, train_loss[loss=3.259, NarTop10Accuracy=0.6431, over 5026.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6398, over 5543.13 frames. ], batch size: 6, lr: 3.02e-03 +2024-08-06 13:09:51,858 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 13:10:48,192 INFO [trainer.py:765] (6/8) Epoch 29, batch 100, train_loss[loss=3.413, NarTop10Accuracy=0.6394, over 7131.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.656, over 2381.49 frames. ], batch size: 30, lr: 2.96e-03 +2024-08-06 13:11:20,840 INFO [trainer.py:765] (6/8) Epoch 29, batch 200, train_loss[loss=3.238, NarTop10Accuracy=0.6703, over 6887.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6537, over 3867.05 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 13:11:56,950 INFO [trainer.py:765] (6/8) Epoch 29, batch 300, train_loss[loss=3.149, NarTop10Accuracy=0.6881, over 7105.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6529, over 4676.24 frames. ], batch size: 22, lr: 2.96e-03 +2024-08-06 13:12:29,716 INFO [trainer.py:765] (6/8) Epoch 29, batch 400, train_loss[loss=3.162, NarTop10Accuracy=0.6873, over 5126.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6525, over 5125.42 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 13:12:59,921 INFO [trainer.py:765] (6/8) Epoch 29, batch 500, train_loss[loss=3.359, NarTop10Accuracy=0.6438, over 6136.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6507, over 5398.93 frames. ], batch size: 11, lr: 2.95e-03 +2024-08-06 13:13:33,547 INFO [trainer.py:765] (6/8) Epoch 29, batch 600, train_loss[loss=3.6, NarTop10Accuracy=0.5997, over 5727.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6487, over 5668.52 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 13:14:09,937 INFO [trainer.py:765] (6/8) Epoch 29, batch 700, train_loss[loss=3.436, NarTop10Accuracy=0.6382, over 5081.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6479, over 5755.52 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:14:46,677 INFO [trainer.py:765] (6/8) Epoch 29, batch 800, train_loss[loss=3.635, NarTop10Accuracy=0.5882, over 5069.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6422, over 5802.24 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:15:17,114 INFO [trainer.py:765] (6/8) Epoch 29, batch 900, train_loss[loss=3.287, NarTop10Accuracy=0.6612, over 6806.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6436, over 5834.04 frames. ], batch size: 14, lr: 2.95e-03 +2024-08-06 13:15:59,363 INFO [trainer.py:765] (6/8) Epoch 29, batch 1000, train_loss[loss=3.369, NarTop10Accuracy=0.6409, over 6701.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6436, over 5926.19 frames. ], batch size: 14, lr: 2.94e-03 +2024-08-06 13:16:31,713 INFO [trainer.py:765] (6/8) Epoch 29, batch 1100, train_loss[loss=3.563, NarTop10Accuracy=0.6039, over 6932.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6422, over 5963.21 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 13:17:04,934 INFO [trainer.py:765] (6/8) Epoch 29, batch 1200, train_loss[loss=3.391, NarTop10Accuracy=0.6316, over 7301.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.642, over 5962.80 frames. ], batch size: 31, lr: 2.94e-03 +2024-08-06 13:17:43,957 INFO [trainer.py:765] (6/8) Epoch 29, batch 1300, train_loss[loss=3.469, NarTop10Accuracy=0.6201, over 5064.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6433, over 6031.58 frames. ], batch size: 6, lr: 2.94e-03 +2024-08-06 13:18:17,924 INFO [trainer.py:765] (6/8) Epoch 29, batch 1400, train_loss[loss=3.646, NarTop10Accuracy=0.579, over 6149.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6409, over 6045.16 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 13:18:48,306 INFO [trainer.py:765] (6/8) Epoch 29, batch 1500, train_loss[loss=3.898, NarTop10Accuracy=0.534, over 6336.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6432, over 5972.84 frames. ], batch size: 51, lr: 2.93e-03 +2024-08-06 13:19:16,409 INFO [trainer.py:765] (6/8) Epoch 29, batch 1600, train_loss[loss=3.227, NarTop10Accuracy=0.6807, over 6893.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6413, over 5960.06 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:19:43,242 INFO [trainer.py:765] (6/8) Epoch 29, batch 1700, train_loss[loss=3.219, NarTop10Accuracy=0.6751, over 6211.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6425, over 5935.54 frames. ], batch size: 13, lr: 2.93e-03 +2024-08-06 13:19:49,091 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 13:19:59,386 INFO [trainer.py:811] (6/8) Epoch 29, validation: loss=3.233, NarTop10Accuracy=0.6754, over 1907754.00 frames. +2024-08-06 13:19:59,387 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 13:19:59,903 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.964e+02 2.123e+02 2.299e+02 5.520e+02, threshold=4.246e+02, percent-clipped=0.2 +2024-08-06 13:20:20,108 INFO [trainer.py:765] (6/8) Epoch 29, batch 1800, train_loss[loss=3.292, NarTop10Accuracy=0.663, over 7310.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6438, over 6009.98 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:20:46,845 INFO [trainer.py:765] (6/8) Epoch 29, batch 1900, train_loss[loss=3.597, NarTop10Accuracy=0.6009, over 5989.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.64, over 6050.56 frames. ], batch size: 48, lr: 2.93e-03 +2024-08-06 13:21:12,478 INFO [trainer.py:765] (6/8) Epoch 29, batch 2000, train_loss[loss=3.7, NarTop10Accuracy=0.5668, over 5882.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6395, over 6021.18 frames. ], batch size: 49, lr: 2.92e-03 +2024-08-06 13:21:37,983 INFO [trainer.py:765] (6/8) Epoch 29, batch 2100, train_loss[loss=3.6, NarTop10Accuracy=0.6046, over 4825.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6387, over 6013.47 frames. ], batch size: 5, lr: 2.92e-03 +2024-08-06 13:22:03,360 INFO [trainer.py:765] (6/8) Epoch 29, batch 2200, train_loss[loss=3.423, NarTop10Accuracy=0.632, over 7395.00 frames. ], tot_loss[loss=3.398, NarTop10Accuracy=0.6389, over 6050.91 frames. ], batch size: 30, lr: 2.92e-03 +2024-08-06 13:22:28,831 INFO [trainer.py:765] (6/8) Epoch 29, batch 2300, train_loss[loss=3.432, NarTop10Accuracy=0.6228, over 5784.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6365, over 6063.34 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 13:22:53,621 INFO [trainer.py:765] (6/8) Epoch 29, batch 2400, train_loss[loss=3.24, NarTop10Accuracy=0.6701, over 5216.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6366, over 5880.81 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 13:23:16,979 INFO [trainer.py:765] (6/8) Epoch 29, batch 2500, train_loss[loss=3.401, NarTop10Accuracy=0.6295, over 4941.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6423, over 5529.26 frames. ], batch size: 6, lr: 2.91e-03 +2024-08-06 13:23:38,152 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 13:24:38,391 INFO [trainer.py:765] (6/8) Epoch 30, batch 100, train_loss[loss=3.304, NarTop10Accuracy=0.6624, over 7074.00 frames. ], tot_loss[loss=3.272, NarTop10Accuracy=0.6661, over 2368.72 frames. ], batch size: 30, lr: 2.86e-03 +2024-08-06 13:25:14,782 INFO [trainer.py:765] (6/8) Epoch 30, batch 200, train_loss[loss=3.15, NarTop10Accuracy=0.6795, over 6842.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6595, over 3873.02 frames. ], batch size: 17, lr: 2.86e-03 +2024-08-06 13:25:46,846 INFO [trainer.py:765] (6/8) Epoch 30, batch 300, train_loss[loss=3.238, NarTop10Accuracy=0.6696, over 7309.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6549, over 4654.14 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 13:26:17,538 INFO [trainer.py:765] (6/8) Epoch 30, batch 400, train_loss[loss=3.275, NarTop10Accuracy=0.6619, over 5180.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6525, over 5111.93 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 13:26:53,919 INFO [trainer.py:765] (6/8) Epoch 30, batch 500, train_loss[loss=3.211, NarTop10Accuracy=0.6719, over 6252.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6548, over 5395.23 frames. ], batch size: 11, lr: 2.85e-03 +2024-08-06 13:27:25,422 INFO [trainer.py:765] (6/8) Epoch 30, batch 600, train_loss[loss=3.29, NarTop10Accuracy=0.6542, over 5814.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6543, over 5681.06 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 13:28:00,307 INFO [trainer.py:765] (6/8) Epoch 30, batch 700, train_loss[loss=3.521, NarTop10Accuracy=0.6209, over 5022.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6529, over 5745.57 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:28:37,476 INFO [trainer.py:765] (6/8) Epoch 30, batch 800, train_loss[loss=3.392, NarTop10Accuracy=0.6243, over 5074.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.65, over 5789.51 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:29:10,424 INFO [trainer.py:765] (6/8) Epoch 30, batch 900, train_loss[loss=3.507, NarTop10Accuracy=0.6189, over 6186.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6484, over 5817.66 frames. ], batch size: 13, lr: 2.85e-03 +2024-08-06 13:29:45,913 INFO [trainer.py:765] (6/8) Epoch 30, batch 1000, train_loss[loss=3.435, NarTop10Accuracy=0.6402, over 6744.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6455, over 5914.66 frames. ], batch size: 14, lr: 2.84e-03 +2024-08-06 13:30:24,171 INFO [trainer.py:765] (6/8) Epoch 30, batch 1100, train_loss[loss=3.365, NarTop10Accuracy=0.6493, over 6743.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6444, over 5962.59 frames. ], batch size: 17, lr: 2.84e-03 +2024-08-06 13:30:38,000 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 13:30:48,195 INFO [trainer.py:811] (6/8) Epoch 30, validation: loss=3.239, NarTop10Accuracy=0.6729, over 1907754.00 frames. +2024-08-06 13:30:48,196 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 13:30:48,916 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 1.985e+02 2.139e+02 2.326e+02 4.628e+02, threshold=4.279e+02, percent-clipped=0.1 +2024-08-06 13:31:05,665 INFO [trainer.py:765] (6/8) Epoch 30, batch 1200, train_loss[loss=3.237, NarTop10Accuracy=0.6759, over 7159.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6435, over 5953.77 frames. ], batch size: 30, lr: 2.84e-03 +2024-08-06 13:31:43,020 INFO [trainer.py:765] (6/8) Epoch 30, batch 1300, train_loss[loss=3.205, NarTop10Accuracy=0.6708, over 5092.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6442, over 6008.32 frames. ], batch size: 6, lr: 2.84e-03 +2024-08-06 13:32:19,325 INFO [trainer.py:765] (6/8) Epoch 30, batch 1400, train_loss[loss=3.633, NarTop10Accuracy=0.5931, over 6049.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.644, over 6040.87 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 13:32:52,335 INFO [trainer.py:765] (6/8) Epoch 30, batch 1500, train_loss[loss=3.552, NarTop10Accuracy=0.6087, over 6257.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6427, over 5975.82 frames. ], batch size: 50, lr: 2.83e-03 +2024-08-06 13:33:20,407 INFO [trainer.py:765] (6/8) Epoch 30, batch 1600, train_loss[loss=3.489, NarTop10Accuracy=0.6152, over 6908.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.641, over 5945.90 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:33:47,200 INFO [trainer.py:765] (6/8) Epoch 30, batch 1700, train_loss[loss=3.606, NarTop10Accuracy=0.5939, over 6241.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6415, over 5927.85 frames. ], batch size: 13, lr: 2.83e-03 +2024-08-06 13:34:13,887 INFO [trainer.py:765] (6/8) Epoch 30, batch 1800, train_loss[loss=3.601, NarTop10Accuracy=0.5965, over 7204.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6424, over 6015.87 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:34:40,547 INFO [trainer.py:765] (6/8) Epoch 30, batch 1900, train_loss[loss=3.607, NarTop10Accuracy=0.6043, over 5924.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6394, over 6056.50 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:35:06,315 INFO [trainer.py:765] (6/8) Epoch 30, batch 2000, train_loss[loss=3.745, NarTop10Accuracy=0.5631, over 6240.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6418, over 6031.35 frames. ], batch size: 48, lr: 2.83e-03 +2024-08-06 13:35:31,872 INFO [trainer.py:765] (6/8) Epoch 30, batch 2100, train_loss[loss=3.375, NarTop10Accuracy=0.6469, over 4103.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6396, over 6003.39 frames. ], batch size: 4, lr: 2.82e-03 +2024-08-06 13:36:00,553 INFO [trainer.py:765] (6/8) Epoch 30, batch 2200, train_loss[loss=3.432, NarTop10Accuracy=0.6349, over 7108.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6413, over 6030.89 frames. ], batch size: 30, lr: 2.82e-03 +2024-08-06 13:36:26,029 INFO [trainer.py:765] (6/8) Epoch 30, batch 2300, train_loss[loss=3.781, NarTop10Accuracy=0.5669, over 5821.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6408, over 6064.95 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 13:36:50,824 INFO [trainer.py:765] (6/8) Epoch 30, batch 2400, train_loss[loss=3.188, NarTop10Accuracy=0.6825, over 5225.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6379, over 5890.14 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 13:37:14,388 INFO [trainer.py:765] (6/8) Epoch 30, batch 2500, train_loss[loss=3.247, NarTop10Accuracy=0.673, over 5133.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6433, over 5550.12 frames. ], batch size: 6, lr: 2.82e-03 +2024-08-06 13:37:36,189 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 13:38:28,438 INFO [trainer.py:765] (6/8) Epoch 31, batch 100, train_loss[loss=3.291, NarTop10Accuracy=0.67, over 7138.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6575, over 2373.79 frames. ], batch size: 30, lr: 2.77e-03 +2024-08-06 13:39:02,652 INFO [trainer.py:765] (6/8) Epoch 31, batch 200, train_loss[loss=3.199, NarTop10Accuracy=0.676, over 6859.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6553, over 3871.00 frames. ], batch size: 17, lr: 2.76e-03 +2024-08-06 13:39:34,676 INFO [trainer.py:765] (6/8) Epoch 31, batch 300, train_loss[loss=3.16, NarTop10Accuracy=0.6795, over 7221.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6566, over 4671.54 frames. ], batch size: 22, lr: 2.76e-03 +2024-08-06 13:40:07,363 INFO [trainer.py:765] (6/8) Epoch 31, batch 400, train_loss[loss=3.603, NarTop10Accuracy=0.6054, over 4991.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6529, over 5111.88 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 13:40:37,814 INFO [trainer.py:765] (6/8) Epoch 31, batch 500, train_loss[loss=3.182, NarTop10Accuracy=0.6748, over 6245.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6526, over 5394.97 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 13:40:58,299 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 13:41:08,777 INFO [trainer.py:811] (6/8) Epoch 31, validation: loss=3.268, NarTop10Accuracy=0.6673, over 1907754.00 frames. +2024-08-06 13:41:08,778 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 13:41:09,338 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 1.987e+02 2.143e+02 2.328e+02 4.341e+02, threshold=4.287e+02, percent-clipped=0.1 +2024-08-06 13:41:20,862 INFO [trainer.py:765] (6/8) Epoch 31, batch 600, train_loss[loss=3.067, NarTop10Accuracy=0.7101, over 5746.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6518, over 5668.95 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 13:41:54,260 INFO [trainer.py:765] (6/8) Epoch 31, batch 700, train_loss[loss=3.085, NarTop10Accuracy=0.7053, over 4366.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6499, over 5741.32 frames. ], batch size: 5, lr: 2.76e-03 +2024-08-06 13:42:32,158 INFO [trainer.py:765] (6/8) Epoch 31, batch 800, train_loss[loss=3.176, NarTop10Accuracy=0.689, over 5221.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6505, over 5801.80 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:43:06,274 INFO [trainer.py:765] (6/8) Epoch 31, batch 900, train_loss[loss=3.427, NarTop10Accuracy=0.6407, over 6299.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6518, over 5799.85 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 13:43:38,009 INFO [trainer.py:765] (6/8) Epoch 31, batch 1000, train_loss[loss=3.304, NarTop10Accuracy=0.6645, over 6211.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6496, over 5917.71 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 13:44:14,513 INFO [trainer.py:765] (6/8) Epoch 31, batch 1100, train_loss[loss=3.444, NarTop10Accuracy=0.6321, over 6759.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6481, over 5949.88 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 13:44:53,786 INFO [trainer.py:765] (6/8) Epoch 31, batch 1200, train_loss[loss=3.312, NarTop10Accuracy=0.6477, over 7459.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6458, over 5946.91 frames. ], batch size: 31, lr: 2.75e-03 +2024-08-06 13:45:25,076 INFO [trainer.py:765] (6/8) Epoch 31, batch 1300, train_loss[loss=3.448, NarTop10Accuracy=0.631, over 5074.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6467, over 6014.96 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:45:58,741 INFO [trainer.py:765] (6/8) Epoch 31, batch 1400, train_loss[loss=3.25, NarTop10Accuracy=0.6712, over 6006.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6451, over 6032.27 frames. ], batch size: 11, lr: 2.74e-03 +2024-08-06 13:46:33,490 INFO [trainer.py:765] (6/8) Epoch 31, batch 1500, train_loss[loss=3.558, NarTop10Accuracy=0.6092, over 6111.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.646, over 5964.27 frames. ], batch size: 49, lr: 2.74e-03 +2024-08-06 13:47:04,658 INFO [trainer.py:765] (6/8) Epoch 31, batch 1600, train_loss[loss=3.171, NarTop10Accuracy=0.6915, over 7330.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6457, over 5944.94 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 13:47:31,424 INFO [trainer.py:765] (6/8) Epoch 31, batch 1700, train_loss[loss=3.632, NarTop10Accuracy=0.5823, over 6229.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6439, over 5939.56 frames. ], batch size: 13, lr: 2.74e-03 +2024-08-06 13:47:58,017 INFO [trainer.py:765] (6/8) Epoch 31, batch 1800, train_loss[loss=3.699, NarTop10Accuracy=0.5871, over 7226.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6435, over 6006.39 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 13:48:24,577 INFO [trainer.py:765] (6/8) Epoch 31, batch 1900, train_loss[loss=3.509, NarTop10Accuracy=0.6173, over 6141.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6416, over 6034.08 frames. ], batch size: 48, lr: 2.74e-03 +2024-08-06 13:48:50,258 INFO [trainer.py:765] (6/8) Epoch 31, batch 2000, train_loss[loss=3.7, NarTop10Accuracy=0.5852, over 6557.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6426, over 6012.09 frames. ], batch size: 50, lr: 2.73e-03 +2024-08-06 13:49:15,765 INFO [trainer.py:765] (6/8) Epoch 31, batch 2100, train_loss[loss=3.174, NarTop10Accuracy=0.6756, over 4688.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.643, over 5997.49 frames. ], batch size: 5, lr: 2.73e-03 +2024-08-06 13:49:41,279 INFO [trainer.py:765] (6/8) Epoch 31, batch 2200, train_loss[loss=3.365, NarTop10Accuracy=0.6354, over 7308.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6445, over 6040.62 frames. ], batch size: 30, lr: 2.73e-03 +2024-08-06 13:50:06,708 INFO [trainer.py:765] (6/8) Epoch 31, batch 2300, train_loss[loss=3.245, NarTop10Accuracy=0.6749, over 5612.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6404, over 6061.08 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 13:50:31,393 INFO [trainer.py:765] (6/8) Epoch 31, batch 2400, train_loss[loss=3.328, NarTop10Accuracy=0.6353, over 5151.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6375, over 5859.50 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 13:50:54,892 INFO [trainer.py:765] (6/8) Epoch 31, batch 2500, train_loss[loss=3.316, NarTop10Accuracy=0.6643, over 4919.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6423, over 5531.54 frames. ], batch size: 6, lr: 2.72e-03 +2024-08-06 13:51:08,995 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 13:51:19,069 INFO [trainer.py:811] (6/8) Epoch 31, validation: loss=3.234, NarTop10Accuracy=0.6746, over 1907754.00 frames. +2024-08-06 13:51:19,070 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 13:51:19,540 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.007e+02 2.182e+02 2.368e+02 4.565e+02, threshold=4.363e+02, percent-clipped=0.1 +2024-08-06 13:51:26,007 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 13:52:19,910 INFO [trainer.py:765] (6/8) Epoch 32, batch 100, train_loss[loss=3.101, NarTop10Accuracy=0.6983, over 7024.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6576, over 2372.08 frames. ], batch size: 30, lr: 2.68e-03 +2024-08-06 13:52:52,537 INFO [trainer.py:765] (6/8) Epoch 32, batch 200, train_loss[loss=3.545, NarTop10Accuracy=0.6075, over 6914.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6537, over 3872.46 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 13:53:28,093 INFO [trainer.py:765] (6/8) Epoch 32, batch 300, train_loss[loss=3.144, NarTop10Accuracy=0.6949, over 6989.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6556, over 4685.11 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 13:54:00,886 INFO [trainer.py:765] (6/8) Epoch 32, batch 400, train_loss[loss=3.553, NarTop10Accuracy=0.6089, over 5101.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6541, over 5142.81 frames. ], batch size: 7, lr: 2.67e-03 +2024-08-06 13:54:32,821 INFO [trainer.py:765] (6/8) Epoch 32, batch 500, train_loss[loss=3.1, NarTop10Accuracy=0.6901, over 6199.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6548, over 5414.14 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 13:55:01,772 INFO [trainer.py:765] (6/8) Epoch 32, batch 600, train_loss[loss=3.279, NarTop10Accuracy=0.6649, over 5884.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.656, over 5678.10 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 13:55:41,511 INFO [trainer.py:765] (6/8) Epoch 32, batch 700, train_loss[loss=3.128, NarTop10Accuracy=0.7035, over 5006.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6523, over 5742.32 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:13,172 INFO [trainer.py:765] (6/8) Epoch 32, batch 800, train_loss[loss=2.871, NarTop10Accuracy=0.7296, over 4996.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6522, over 5809.32 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:43,165 INFO [trainer.py:765] (6/8) Epoch 32, batch 900, train_loss[loss=3.733, NarTop10Accuracy=0.5688, over 6318.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6517, over 5833.75 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 13:57:24,520 INFO [trainer.py:765] (6/8) Epoch 32, batch 1000, train_loss[loss=3.469, NarTop10Accuracy=0.6146, over 6624.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6499, over 5940.61 frames. ], batch size: 14, lr: 2.66e-03 +2024-08-06 13:57:57,452 INFO [trainer.py:765] (6/8) Epoch 32, batch 1100, train_loss[loss=3.148, NarTop10Accuracy=0.6899, over 6894.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6467, over 5968.04 frames. ], batch size: 17, lr: 2.66e-03 +2024-08-06 13:58:30,541 INFO [trainer.py:765] (6/8) Epoch 32, batch 1200, train_loss[loss=3.221, NarTop10Accuracy=0.6798, over 6895.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6463, over 5954.92 frames. ], batch size: 30, lr: 2.66e-03 +2024-08-06 13:59:08,259 INFO [trainer.py:765] (6/8) Epoch 32, batch 1300, train_loss[loss=3.257, NarTop10Accuracy=0.6635, over 4931.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6452, over 6015.81 frames. ], batch size: 6, lr: 2.66e-03 +2024-08-06 13:59:42,265 INFO [trainer.py:765] (6/8) Epoch 32, batch 1400, train_loss[loss=3.418, NarTop10Accuracy=0.6358, over 6148.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6445, over 6032.04 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 14:00:12,975 INFO [trainer.py:765] (6/8) Epoch 32, batch 1500, train_loss[loss=3.686, NarTop10Accuracy=0.5824, over 6131.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.645, over 5982.63 frames. ], batch size: 49, lr: 2.66e-03 +2024-08-06 14:00:40,823 INFO [trainer.py:765] (6/8) Epoch 32, batch 1600, train_loss[loss=3.25, NarTop10Accuracy=0.6689, over 7122.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6451, over 5946.14 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:01:07,533 INFO [trainer.py:765] (6/8) Epoch 32, batch 1700, train_loss[loss=3.378, NarTop10Accuracy=0.6498, over 6547.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6448, over 5950.94 frames. ], batch size: 14, lr: 2.65e-03 +2024-08-06 14:01:34,088 INFO [trainer.py:765] (6/8) Epoch 32, batch 1800, train_loss[loss=3.371, NarTop10Accuracy=0.6453, over 7217.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6428, over 6007.67 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:02:00,635 INFO [trainer.py:765] (6/8) Epoch 32, batch 1900, train_loss[loss=3.405, NarTop10Accuracy=0.6338, over 5888.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6424, over 6035.39 frames. ], batch size: 48, lr: 2.65e-03 +2024-08-06 14:02:20,590 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 14:02:30,653 INFO [trainer.py:811] (6/8) Epoch 32, validation: loss=3.204, NarTop10Accuracy=0.6812, over 1907754.00 frames. +2024-08-06 14:02:30,653 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 29874MB +2024-08-06 14:02:31,152 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.032e+02 2.200e+02 2.392e+02 6.182e+02, threshold=4.401e+02, percent-clipped=0.1 +2024-08-06 14:02:36,384 INFO [trainer.py:765] (6/8) Epoch 32, batch 2000, train_loss[loss=3.524, NarTop10Accuracy=0.6144, over 6077.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6449, over 6024.58 frames. ], batch size: 49, lr: 2.65e-03 +2024-08-06 14:03:01,698 INFO [trainer.py:765] (6/8) Epoch 32, batch 2100, train_loss[loss=3.01, NarTop10Accuracy=0.7048, over 3962.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6438, over 5993.24 frames. ], batch size: 4, lr: 2.65e-03 +2024-08-06 14:03:27,177 INFO [trainer.py:765] (6/8) Epoch 32, batch 2200, train_loss[loss=3.587, NarTop10Accuracy=0.5955, over 6872.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6435, over 6023.35 frames. ], batch size: 30, lr: 2.64e-03 +2024-08-06 14:03:52,587 INFO [trainer.py:765] (6/8) Epoch 32, batch 2300, train_loss[loss=3.74, NarTop10Accuracy=0.5698, over 5800.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6414, over 6065.09 frames. ], batch size: 9, lr: 2.64e-03 +2024-08-06 14:04:17,275 INFO [trainer.py:765] (6/8) Epoch 32, batch 2400, train_loss[loss=3.277, NarTop10Accuracy=0.6552, over 5102.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6407, over 5877.51 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 14:04:40,636 INFO [trainer.py:765] (6/8) Epoch 32, batch 2500, train_loss[loss=3.391, NarTop10Accuracy=0.6556, over 4899.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.647, over 5534.77 frames. ], batch size: 6, lr: 2.64e-03 +2024-08-06 14:05:02,276 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 14:06:02,906 INFO [trainer.py:765] (6/8) Epoch 33, batch 100, train_loss[loss=3.47, NarTop10Accuracy=0.6194, over 7144.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6535, over 2360.04 frames. ], batch size: 30, lr: 2.60e-03 +2024-08-06 14:06:36,079 INFO [trainer.py:765] (6/8) Epoch 33, batch 200, train_loss[loss=3.406, NarTop10Accuracy=0.6522, over 6867.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6568, over 3840.76 frames. ], batch size: 17, lr: 2.59e-03 +2024-08-06 14:07:12,147 INFO [trainer.py:765] (6/8) Epoch 33, batch 300, train_loss[loss=3.191, NarTop10Accuracy=0.6852, over 7181.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6574, over 4664.56 frames. ], batch size: 22, lr: 2.59e-03 +2024-08-06 14:07:48,256 INFO [trainer.py:765] (6/8) Epoch 33, batch 400, train_loss[loss=3.438, NarTop10Accuracy=0.6338, over 5070.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6557, over 5143.54 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 14:08:18,547 INFO [trainer.py:765] (6/8) Epoch 33, batch 500, train_loss[loss=3.259, NarTop10Accuracy=0.6489, over 6089.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6535, over 5421.54 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 14:08:49,792 INFO [trainer.py:765] (6/8) Epoch 33, batch 600, train_loss[loss=3.301, NarTop10Accuracy=0.6689, over 5852.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6519, over 5674.98 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 14:09:32,925 INFO [trainer.py:765] (6/8) Epoch 33, batch 700, train_loss[loss=3.003, NarTop10Accuracy=0.7141, over 5089.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6511, over 5742.18 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 14:10:04,596 INFO [trainer.py:765] (6/8) Epoch 33, batch 800, train_loss[loss=2.946, NarTop10Accuracy=0.71, over 4373.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6479, over 5801.73 frames. ], batch size: 5, lr: 2.58e-03 +2024-08-06 14:10:35,387 INFO [trainer.py:765] (6/8) Epoch 33, batch 900, train_loss[loss=3.235, NarTop10Accuracy=0.6805, over 6171.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6496, over 5823.56 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 14:11:15,069 INFO [trainer.py:765] (6/8) Epoch 33, batch 1000, train_loss[loss=3.268, NarTop10Accuracy=0.665, over 6204.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.65, over 5906.35 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 14:11:47,302 INFO [trainer.py:765] (6/8) Epoch 33, batch 1100, train_loss[loss=3.597, NarTop10Accuracy=0.5874, over 6810.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6495, over 5937.37 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 14:12:20,928 INFO [trainer.py:765] (6/8) Epoch 33, batch 1200, train_loss[loss=3.539, NarTop10Accuracy=0.6221, over 7291.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6483, over 5938.95 frames. ], batch size: 31, lr: 2.58e-03 +2024-08-06 14:12:57,629 INFO [trainer.py:765] (6/8) Epoch 33, batch 1300, train_loss[loss=3.654, NarTop10Accuracy=0.5898, over 4990.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6498, over 6022.86 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 14:13:30,666 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 14:13:41,686 INFO [trainer.py:811] (6/8) Epoch 33, validation: loss=3.242, NarTop10Accuracy=0.6732, over 1907754.00 frames. +2024-08-06 14:13:41,687 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30201MB +2024-08-06 14:13:42,264 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.031e+02 2.174e+02 2.363e+02 4.871e+02, threshold=4.347e+02, percent-clipped=0.1 +2024-08-06 14:13:42,802 INFO [trainer.py:765] (6/8) Epoch 33, batch 1400, train_loss[loss=3.232, NarTop10Accuracy=0.6547, over 6166.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6513, over 6031.47 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 14:14:11,245 INFO [trainer.py:765] (6/8) Epoch 33, batch 1500, train_loss[loss=3.551, NarTop10Accuracy=0.6153, over 5742.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6483, over 5972.32 frames. ], batch size: 48, lr: 2.57e-03 +2024-08-06 14:14:39,191 INFO [trainer.py:765] (6/8) Epoch 33, batch 1600, train_loss[loss=3.327, NarTop10Accuracy=0.6542, over 7207.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6466, over 5963.84 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:05,857 INFO [trainer.py:765] (6/8) Epoch 33, batch 1700, train_loss[loss=3.481, NarTop10Accuracy=0.6276, over 6272.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6455, over 5952.16 frames. ], batch size: 13, lr: 2.57e-03 +2024-08-06 14:15:32,589 INFO [trainer.py:765] (6/8) Epoch 33, batch 1800, train_loss[loss=3.307, NarTop10Accuracy=0.6542, over 7256.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6477, over 6021.96 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:59,214 INFO [trainer.py:765] (6/8) Epoch 33, batch 1900, train_loss[loss=3.406, NarTop10Accuracy=0.6346, over 6153.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.645, over 6064.18 frames. ], batch size: 49, lr: 2.57e-03 +2024-08-06 14:16:24,894 INFO [trainer.py:765] (6/8) Epoch 33, batch 2000, train_loss[loss=3.524, NarTop10Accuracy=0.6199, over 6904.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6453, over 6050.62 frames. ], batch size: 49, lr: 2.57e-03 +2024-08-06 14:16:50,349 INFO [trainer.py:765] (6/8) Epoch 33, batch 2100, train_loss[loss=3.12, NarTop10Accuracy=0.6784, over 3860.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6448, over 6025.47 frames. ], batch size: 4, lr: 2.56e-03 +2024-08-06 14:17:15,825 INFO [trainer.py:765] (6/8) Epoch 33, batch 2200, train_loss[loss=3.396, NarTop10Accuracy=0.6387, over 7338.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6446, over 6061.56 frames. ], batch size: 31, lr: 2.56e-03 +2024-08-06 14:17:41,308 INFO [trainer.py:765] (6/8) Epoch 33, batch 2300, train_loss[loss=3.281, NarTop10Accuracy=0.6664, over 5634.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.642, over 6079.89 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 14:18:10,143 INFO [trainer.py:765] (6/8) Epoch 33, batch 2400, train_loss[loss=3.578, NarTop10Accuracy=0.6099, over 5092.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6406, over 5897.96 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 14:18:33,706 INFO [trainer.py:765] (6/8) Epoch 33, batch 2500, train_loss[loss=3.515, NarTop10Accuracy=0.6181, over 5088.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6476, over 5543.70 frames. ], batch size: 6, lr: 2.56e-03 +2024-08-06 14:18:54,690 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 14:19:51,932 INFO [trainer.py:765] (6/8) Epoch 34, batch 100, train_loss[loss=3.194, NarTop10Accuracy=0.692, over 7157.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.6584, over 2360.99 frames. ], batch size: 30, lr: 2.52e-03 +2024-08-06 14:20:24,372 INFO [trainer.py:765] (6/8) Epoch 34, batch 200, train_loss[loss=3.377, NarTop10Accuracy=0.6293, over 6851.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6602, over 3863.33 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 14:21:00,842 INFO [trainer.py:765] (6/8) Epoch 34, batch 300, train_loss[loss=3.187, NarTop10Accuracy=0.6886, over 7076.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6573, over 4669.37 frames. ], batch size: 22, lr: 2.51e-03 +2024-08-06 14:21:31,449 INFO [trainer.py:765] (6/8) Epoch 34, batch 400, train_loss[loss=3.321, NarTop10Accuracy=0.6615, over 5076.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6563, over 5117.93 frames. ], batch size: 7, lr: 2.51e-03 +2024-08-06 14:22:01,875 INFO [trainer.py:765] (6/8) Epoch 34, batch 500, train_loss[loss=3.296, NarTop10Accuracy=0.6718, over 6111.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6551, over 5401.35 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 14:22:36,826 INFO [trainer.py:765] (6/8) Epoch 34, batch 600, train_loss[loss=3.438, NarTop10Accuracy=0.6216, over 5788.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.657, over 5672.24 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 14:23:14,605 INFO [trainer.py:765] (6/8) Epoch 34, batch 700, train_loss[loss=3.133, NarTop10Accuracy=0.6768, over 5214.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6539, over 5742.66 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:46,606 INFO [trainer.py:765] (6/8) Epoch 34, batch 800, train_loss[loss=3.262, NarTop10Accuracy=0.6692, over 5133.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6523, over 5797.38 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:50,718 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 14:24:00,855 INFO [trainer.py:811] (6/8) Epoch 34, validation: loss=3.226, NarTop10Accuracy=0.6758, over 1907754.00 frames. +2024-08-06 14:24:00,856 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30201MB +2024-08-06 14:24:01,413 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.033e+02 2.200e+02 2.391e+02 5.918e+02, threshold=4.399e+02, percent-clipped=0.1 +2024-08-06 14:24:28,899 INFO [trainer.py:765] (6/8) Epoch 34, batch 900, train_loss[loss=3.352, NarTop10Accuracy=0.651, over 6663.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6514, over 5815.07 frames. ], batch size: 14, lr: 2.51e-03 +2024-08-06 14:25:05,287 INFO [trainer.py:765] (6/8) Epoch 34, batch 1000, train_loss[loss=3.169, NarTop10Accuracy=0.6771, over 6164.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6512, over 5917.91 frames. ], batch size: 13, lr: 2.50e-03 +2024-08-06 14:25:37,996 INFO [trainer.py:765] (6/8) Epoch 34, batch 1100, train_loss[loss=3.391, NarTop10Accuracy=0.641, over 6807.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6507, over 5963.37 frames. ], batch size: 17, lr: 2.50e-03 +2024-08-06 14:26:13,974 INFO [trainer.py:765] (6/8) Epoch 34, batch 1200, train_loss[loss=3.425, NarTop10Accuracy=0.6407, over 7477.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6519, over 5960.88 frames. ], batch size: 32, lr: 2.50e-03 +2024-08-06 14:26:52,652 INFO [trainer.py:765] (6/8) Epoch 34, batch 1300, train_loss[loss=3.593, NarTop10Accuracy=0.5973, over 4961.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6496, over 6033.27 frames. ], batch size: 6, lr: 2.50e-03 +2024-08-06 14:27:24,383 INFO [trainer.py:765] (6/8) Epoch 34, batch 1400, train_loss[loss=3.155, NarTop10Accuracy=0.6865, over 6158.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6489, over 6051.10 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 14:27:52,726 INFO [trainer.py:765] (6/8) Epoch 34, batch 1500, train_loss[loss=3.852, NarTop10Accuracy=0.5487, over 5779.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6509, over 5980.86 frames. ], batch size: 51, lr: 2.50e-03 +2024-08-06 14:28:20,672 INFO [trainer.py:765] (6/8) Epoch 34, batch 1600, train_loss[loss=3.296, NarTop10Accuracy=0.6585, over 7014.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6479, over 5951.42 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 14:28:47,384 INFO [trainer.py:765] (6/8) Epoch 34, batch 1700, train_loss[loss=3.482, NarTop10Accuracy=0.6218, over 6319.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6463, over 5941.54 frames. ], batch size: 13, lr: 2.49e-03 +2024-08-06 14:29:14,010 INFO [trainer.py:765] (6/8) Epoch 34, batch 1800, train_loss[loss=3.684, NarTop10Accuracy=0.5846, over 7193.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6485, over 5997.86 frames. ], batch size: 22, lr: 2.49e-03 +2024-08-06 14:29:43,752 INFO [trainer.py:765] (6/8) Epoch 34, batch 1900, train_loss[loss=3.658, NarTop10Accuracy=0.5869, over 5930.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6438, over 6044.34 frames. ], batch size: 49, lr: 2.49e-03 +2024-08-06 14:30:09,516 INFO [trainer.py:765] (6/8) Epoch 34, batch 2000, train_loss[loss=3.663, NarTop10Accuracy=0.5882, over 5955.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6444, over 6019.32 frames. ], batch size: 49, lr: 2.49e-03 +2024-08-06 14:30:35,016 INFO [trainer.py:765] (6/8) Epoch 34, batch 2100, train_loss[loss=3.305, NarTop10Accuracy=0.6667, over 4938.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.645, over 6019.61 frames. ], batch size: 5, lr: 2.49e-03 +2024-08-06 14:31:00,511 INFO [trainer.py:765] (6/8) Epoch 34, batch 2200, train_loss[loss=3.406, NarTop10Accuracy=0.6403, over 7410.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6437, over 6049.55 frames. ], batch size: 31, lr: 2.49e-03 +2024-08-06 14:31:25,979 INFO [trainer.py:765] (6/8) Epoch 34, batch 2300, train_loss[loss=3.184, NarTop10Accuracy=0.6732, over 5877.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6437, over 6066.03 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 14:31:50,751 INFO [trainer.py:765] (6/8) Epoch 34, batch 2400, train_loss[loss=3.577, NarTop10Accuracy=0.6109, over 5109.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.642, over 5881.50 frames. ], batch size: 7, lr: 2.48e-03 +2024-08-06 14:32:14,249 INFO [trainer.py:765] (6/8) Epoch 34, batch 2500, train_loss[loss=3.298, NarTop10Accuracy=0.6516, over 5050.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6463, over 5546.26 frames. ], batch size: 6, lr: 2.48e-03 +2024-08-06 14:32:35,107 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 14:33:26,337 INFO [trainer.py:765] (6/8) Epoch 35, batch 100, train_loss[loss=3.342, NarTop10Accuracy=0.6487, over 7419.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6588, over 2368.98 frames. ], batch size: 30, lr: 2.44e-03 +2024-08-06 14:34:03,582 INFO [trainer.py:765] (6/8) Epoch 35, batch 200, train_loss[loss=3.305, NarTop10Accuracy=0.6586, over 6849.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6585, over 3869.88 frames. ], batch size: 17, lr: 2.44e-03 +2024-08-06 14:34:13,186 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 14:34:23,574 INFO [trainer.py:811] (6/8) Epoch 35, validation: loss=3.163, NarTop10Accuracy=0.689, over 1907754.00 frames. +2024-08-06 14:34:23,575 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30201MB +2024-08-06 14:34:24,109 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.042e+02 2.203e+02 2.360e+02 4.181e+02, threshold=4.406e+02, percent-clipped=0.0 +2024-08-06 14:34:44,664 INFO [trainer.py:765] (6/8) Epoch 35, batch 300, train_loss[loss=3.616, NarTop10Accuracy=0.5924, over 7062.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6597, over 4680.27 frames. ], batch size: 22, lr: 2.44e-03 +2024-08-06 14:35:13,543 INFO [trainer.py:765] (6/8) Epoch 35, batch 400, train_loss[loss=3.424, NarTop10Accuracy=0.6394, over 5172.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6562, over 5135.02 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 14:35:48,187 INFO [trainer.py:765] (6/8) Epoch 35, batch 500, train_loss[loss=3.465, NarTop10Accuracy=0.6221, over 6087.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6576, over 5408.33 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 14:36:22,747 INFO [trainer.py:765] (6/8) Epoch 35, batch 600, train_loss[loss=3.261, NarTop10Accuracy=0.6648, over 5767.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.656, over 5676.90 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 14:36:57,827 INFO [trainer.py:765] (6/8) Epoch 35, batch 700, train_loss[loss=3.348, NarTop10Accuracy=0.6477, over 5148.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6552, over 5745.66 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 14:37:29,770 INFO [trainer.py:765] (6/8) Epoch 35, batch 800, train_loss[loss=3.094, NarTop10Accuracy=0.7031, over 5038.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6538, over 5818.84 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:38:03,304 INFO [trainer.py:765] (6/8) Epoch 35, batch 900, train_loss[loss=3.265, NarTop10Accuracy=0.6741, over 6570.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6518, over 5834.80 frames. ], batch size: 14, lr: 2.43e-03 +2024-08-06 14:38:43,709 INFO [trainer.py:765] (6/8) Epoch 35, batch 1000, train_loss[loss=3.505, NarTop10Accuracy=0.6195, over 6210.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.652, over 5928.99 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 14:39:16,567 INFO [trainer.py:765] (6/8) Epoch 35, batch 1100, train_loss[loss=3.606, NarTop10Accuracy=0.6016, over 6892.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6517, over 5945.60 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 14:39:50,838 INFO [trainer.py:765] (6/8) Epoch 35, batch 1200, train_loss[loss=3.143, NarTop10Accuracy=0.6807, over 7540.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.652, over 5943.87 frames. ], batch size: 31, lr: 2.43e-03 +2024-08-06 14:40:33,953 INFO [trainer.py:765] (6/8) Epoch 35, batch 1300, train_loss[loss=3.201, NarTop10Accuracy=0.6858, over 4931.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6522, over 6005.41 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:41:03,183 INFO [trainer.py:765] (6/8) Epoch 35, batch 1400, train_loss[loss=3.294, NarTop10Accuracy=0.6686, over 6223.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6483, over 6016.28 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 14:41:33,824 INFO [trainer.py:765] (6/8) Epoch 35, batch 1500, train_loss[loss=3.492, NarTop10Accuracy=0.6158, over 6322.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6493, over 5958.43 frames. ], batch size: 49, lr: 2.43e-03 +2024-08-06 14:42:01,777 INFO [trainer.py:765] (6/8) Epoch 35, batch 1600, train_loss[loss=3.609, NarTop10Accuracy=0.5946, over 7114.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6482, over 5934.51 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 14:42:28,467 INFO [trainer.py:765] (6/8) Epoch 35, batch 1700, train_loss[loss=3.212, NarTop10Accuracy=0.6805, over 6619.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6488, over 5925.79 frames. ], batch size: 14, lr: 2.42e-03 +2024-08-06 14:42:55,040 INFO [trainer.py:765] (6/8) Epoch 35, batch 1800, train_loss[loss=3.255, NarTop10Accuracy=0.6688, over 6988.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6454, over 5980.59 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 14:43:21,646 INFO [trainer.py:765] (6/8) Epoch 35, batch 1900, train_loss[loss=3.417, NarTop10Accuracy=0.6375, over 6305.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6447, over 6038.44 frames. ], batch size: 48, lr: 2.42e-03 +2024-08-06 14:43:47,367 INFO [trainer.py:765] (6/8) Epoch 35, batch 2000, train_loss[loss=3.547, NarTop10Accuracy=0.6084, over 6219.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6446, over 6003.17 frames. ], batch size: 49, lr: 2.42e-03 +2024-08-06 14:44:12,857 INFO [trainer.py:765] (6/8) Epoch 35, batch 2100, train_loss[loss=3.125, NarTop10Accuracy=0.7059, over 3991.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.645, over 5990.59 frames. ], batch size: 4, lr: 2.42e-03 +2024-08-06 14:44:38,388 INFO [trainer.py:765] (6/8) Epoch 35, batch 2200, train_loss[loss=3.548, NarTop10Accuracy=0.6092, over 7009.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6448, over 6041.90 frames. ], batch size: 30, lr: 2.42e-03 +2024-08-06 14:44:47,200 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 14:44:57,441 INFO [trainer.py:811] (6/8) Epoch 35, validation: loss=3.219, NarTop10Accuracy=0.6773, over 1907754.00 frames. +2024-08-06 14:44:57,441 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30201MB +2024-08-06 14:44:57,974 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.083e+02 2.237e+02 2.412e+02 3.944e+02, threshold=4.474e+02, percent-clipped=0.0 +2024-08-06 14:45:14,102 INFO [trainer.py:765] (6/8) Epoch 35, batch 2300, train_loss[loss=3.238, NarTop10Accuracy=0.6624, over 5849.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6438, over 6064.06 frames. ], batch size: 9, lr: 2.41e-03 +2024-08-06 14:45:38,820 INFO [trainer.py:765] (6/8) Epoch 35, batch 2400, train_loss[loss=3.169, NarTop10Accuracy=0.6883, over 5720.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6443, over 5865.25 frames. ], batch size: 8, lr: 2.41e-03 +2024-08-06 14:46:02,147 INFO [trainer.py:765] (6/8) Epoch 35, batch 2500, train_loss[loss=3.41, NarTop10Accuracy=0.6403, over 4981.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6495, over 5525.82 frames. ], batch size: 6, lr: 2.41e-03 +2024-08-06 14:46:23,327 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 14:47:25,441 INFO [trainer.py:765] (6/8) Epoch 36, batch 100, train_loss[loss=3.2, NarTop10Accuracy=0.6678, over 7028.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6564, over 2375.42 frames. ], batch size: 30, lr: 2.38e-03 +2024-08-06 14:47:58,358 INFO [trainer.py:765] (6/8) Epoch 36, batch 200, train_loss[loss=3.23, NarTop10Accuracy=0.6694, over 7045.00 frames. ], tot_loss[loss=3.296, NarTop10Accuracy=0.66, over 3870.24 frames. ], batch size: 17, lr: 2.37e-03 +2024-08-06 14:48:30,724 INFO [trainer.py:765] (6/8) Epoch 36, batch 300, train_loss[loss=3.058, NarTop10Accuracy=0.7055, over 7216.00 frames. ], tot_loss[loss=3.282, NarTop10Accuracy=0.6621, over 4667.76 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 14:49:04,814 INFO [trainer.py:765] (6/8) Epoch 36, batch 400, train_loss[loss=3.076, NarTop10Accuracy=0.697, over 5146.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6616, over 5125.24 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 14:49:36,587 INFO [trainer.py:765] (6/8) Epoch 36, batch 500, train_loss[loss=3.516, NarTop10Accuracy=0.6175, over 6127.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6611, over 5393.09 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 14:50:09,654 INFO [trainer.py:765] (6/8) Epoch 36, batch 600, train_loss[loss=3.269, NarTop10Accuracy=0.6726, over 5736.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.6571, over 5668.22 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 14:50:46,513 INFO [trainer.py:765] (6/8) Epoch 36, batch 700, train_loss[loss=3.321, NarTop10Accuracy=0.6603, over 5253.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6548, over 5739.88 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:23,702 INFO [trainer.py:765] (6/8) Epoch 36, batch 800, train_loss[loss=3.418, NarTop10Accuracy=0.6312, over 4967.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6548, over 5790.83 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:54,346 INFO [trainer.py:765] (6/8) Epoch 36, batch 900, train_loss[loss=3.211, NarTop10Accuracy=0.6751, over 6310.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6549, over 5814.15 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 14:52:30,324 INFO [trainer.py:765] (6/8) Epoch 36, batch 1000, train_loss[loss=3.197, NarTop10Accuracy=0.679, over 6245.00 frames. ], tot_loss[loss=3.314, NarTop10Accuracy=0.6553, over 5907.63 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 14:53:06,863 INFO [trainer.py:765] (6/8) Epoch 36, batch 1100, train_loss[loss=3.245, NarTop10Accuracy=0.6746, over 6820.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6516, over 5951.30 frames. ], batch size: 17, lr: 2.36e-03 +2024-08-06 14:53:40,248 INFO [trainer.py:765] (6/8) Epoch 36, batch 1200, train_loss[loss=3.313, NarTop10Accuracy=0.6531, over 6910.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6544, over 5939.89 frames. ], batch size: 30, lr: 2.36e-03 +2024-08-06 14:54:15,855 INFO [trainer.py:765] (6/8) Epoch 36, batch 1300, train_loss[loss=3.324, NarTop10Accuracy=0.6724, over 5044.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6531, over 6014.23 frames. ], batch size: 6, lr: 2.36e-03 +2024-08-06 14:54:51,540 INFO [trainer.py:765] (6/8) Epoch 36, batch 1400, train_loss[loss=3.083, NarTop10Accuracy=0.6884, over 6086.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6517, over 6027.78 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 14:55:21,802 INFO [trainer.py:765] (6/8) Epoch 36, batch 1500, train_loss[loss=3.497, NarTop10Accuracy=0.6199, over 5736.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6493, over 5965.88 frames. ], batch size: 50, lr: 2.36e-03 +2024-08-06 14:55:49,902 INFO [trainer.py:765] (6/8) Epoch 36, batch 1600, train_loss[loss=3.218, NarTop10Accuracy=0.6893, over 7363.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.65, over 5960.99 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 14:56:04,132 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 14:56:14,600 INFO [trainer.py:811] (6/8) Epoch 36, validation: loss=3.22, NarTop10Accuracy=0.6784, over 1907754.00 frames. +2024-08-06 14:56:14,601 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30201MB +2024-08-06 14:56:15,103 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.063e+02 2.224e+02 2.398e+02 5.290e+02, threshold=4.447e+02, percent-clipped=0.1 +2024-08-06 14:56:27,177 INFO [trainer.py:765] (6/8) Epoch 36, batch 1700, train_loss[loss=3.275, NarTop10Accuracy=0.6733, over 6532.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6477, over 5950.71 frames. ], batch size: 14, lr: 2.35e-03 +2024-08-06 14:56:53,759 INFO [trainer.py:765] (6/8) Epoch 36, batch 1800, train_loss[loss=3.45, NarTop10Accuracy=0.6327, over 7235.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6469, over 6009.34 frames. ], batch size: 22, lr: 2.35e-03 +2024-08-06 14:57:20,336 INFO [trainer.py:765] (6/8) Epoch 36, batch 1900, train_loss[loss=3.565, NarTop10Accuracy=0.6022, over 6099.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6475, over 6027.52 frames. ], batch size: 48, lr: 2.35e-03 +2024-08-06 14:57:46,057 INFO [trainer.py:765] (6/8) Epoch 36, batch 2000, train_loss[loss=3.854, NarTop10Accuracy=0.5434, over 5991.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6458, over 6024.91 frames. ], batch size: 51, lr: 2.35e-03 +2024-08-06 14:58:11,405 INFO [trainer.py:765] (6/8) Epoch 36, batch 2100, train_loss[loss=3.216, NarTop10Accuracy=0.6694, over 4016.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6454, over 6002.71 frames. ], batch size: 4, lr: 2.35e-03 +2024-08-06 14:58:36,833 INFO [trainer.py:765] (6/8) Epoch 36, batch 2200, train_loss[loss=3.555, NarTop10Accuracy=0.6021, over 7174.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6447, over 6040.10 frames. ], batch size: 30, lr: 2.35e-03 +2024-08-06 14:59:02,344 INFO [trainer.py:765] (6/8) Epoch 36, batch 2300, train_loss[loss=3.304, NarTop10Accuracy=0.6588, over 5833.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6422, over 6066.66 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 14:59:27,094 INFO [trainer.py:765] (6/8) Epoch 36, batch 2400, train_loss[loss=3.538, NarTop10Accuracy=0.6207, over 5063.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6415, over 5887.77 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 14:59:50,503 INFO [trainer.py:765] (6/8) Epoch 36, batch 2500, train_loss[loss=3.453, NarTop10Accuracy=0.6267, over 5060.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6464, over 5545.66 frames. ], batch size: 6, lr: 2.34e-03 +2024-08-06 15:00:11,930 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 15:01:14,218 INFO [trainer.py:765] (6/8) Epoch 37, batch 100, train_loss[loss=3.21, NarTop10Accuracy=0.6804, over 7251.00 frames. ], tot_loss[loss=3.278, NarTop10Accuracy=0.6633, over 2363.52 frames. ], batch size: 30, lr: 2.31e-03 +2024-08-06 15:01:44,098 INFO [trainer.py:765] (6/8) Epoch 37, batch 200, train_loss[loss=3.007, NarTop10Accuracy=0.7087, over 6894.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6638, over 3869.19 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 15:02:17,383 INFO [trainer.py:765] (6/8) Epoch 37, batch 300, train_loss[loss=3.101, NarTop10Accuracy=0.7042, over 7058.00 frames. ], tot_loss[loss=3.273, NarTop10Accuracy=0.6648, over 4659.86 frames. ], batch size: 22, lr: 2.31e-03 +2024-08-06 15:02:48,346 INFO [trainer.py:765] (6/8) Epoch 37, batch 400, train_loss[loss=3.079, NarTop10Accuracy=0.6914, over 5185.00 frames. ], tot_loss[loss=3.289, NarTop10Accuracy=0.6612, over 5123.30 frames. ], batch size: 7, lr: 2.31e-03 +2024-08-06 15:03:26,570 INFO [trainer.py:765] (6/8) Epoch 37, batch 500, train_loss[loss=3.038, NarTop10Accuracy=0.6996, over 6015.00 frames. ], tot_loss[loss=3.295, NarTop10Accuracy=0.6598, over 5388.99 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 15:03:58,033 INFO [trainer.py:765] (6/8) Epoch 37, batch 600, train_loss[loss=3.108, NarTop10Accuracy=0.6989, over 5947.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6581, over 5660.96 frames. ], batch size: 9, lr: 2.30e-03 +2024-08-06 15:04:30,248 INFO [trainer.py:765] (6/8) Epoch 37, batch 700, train_loss[loss=3.242, NarTop10Accuracy=0.6788, over 5073.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.657, over 5734.96 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:12,163 INFO [trainer.py:765] (6/8) Epoch 37, batch 800, train_loss[loss=3.527, NarTop10Accuracy=0.61, over 5657.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6544, over 5782.23 frames. ], batch size: 7, lr: 2.30e-03 +2024-08-06 15:05:40,606 INFO [trainer.py:765] (6/8) Epoch 37, batch 900, train_loss[loss=3.174, NarTop10Accuracy=0.6816, over 6385.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6544, over 5828.70 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 15:06:15,608 INFO [trainer.py:765] (6/8) Epoch 37, batch 1000, train_loss[loss=3.212, NarTop10Accuracy=0.6819, over 6197.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6504, over 5938.81 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 15:06:42,491 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 15:06:53,168 INFO [trainer.py:811] (6/8) Epoch 37, validation: loss=3.234, NarTop10Accuracy=0.6744, over 1907754.00 frames. +2024-08-06 15:06:53,169 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30201MB +2024-08-06 15:06:53,809 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.068e+02 2.238e+02 2.409e+02 6.392e+02, threshold=4.475e+02, percent-clipped=0.1 +2024-08-06 15:07:01,306 INFO [trainer.py:765] (6/8) Epoch 37, batch 1100, train_loss[loss=3.598, NarTop10Accuracy=0.601, over 6683.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6515, over 5962.30 frames. ], batch size: 17, lr: 2.30e-03 +2024-08-06 15:07:32,718 INFO [trainer.py:765] (6/8) Epoch 37, batch 1200, train_loss[loss=3.246, NarTop10Accuracy=0.6746, over 7447.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6538, over 5964.65 frames. ], batch size: 30, lr: 2.30e-03 +2024-08-06 15:08:04,777 INFO [trainer.py:765] (6/8) Epoch 37, batch 1300, train_loss[loss=3.211, NarTop10Accuracy=0.6709, over 5042.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6519, over 6028.66 frames. ], batch size: 6, lr: 2.29e-03 +2024-08-06 15:08:47,879 INFO [trainer.py:765] (6/8) Epoch 37, batch 1400, train_loss[loss=3.046, NarTop10Accuracy=0.7083, over 6140.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6508, over 6038.44 frames. ], batch size: 11, lr: 2.29e-03 +2024-08-06 15:09:16,180 INFO [trainer.py:765] (6/8) Epoch 37, batch 1500, train_loss[loss=3.686, NarTop10Accuracy=0.5888, over 5822.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6482, over 5967.59 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:09:44,190 INFO [trainer.py:765] (6/8) Epoch 37, batch 1600, train_loss[loss=3.552, NarTop10Accuracy=0.6065, over 6973.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6479, over 5962.94 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:10:11,082 INFO [trainer.py:765] (6/8) Epoch 37, batch 1700, train_loss[loss=3.292, NarTop10Accuracy=0.6618, over 6246.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6491, over 5941.32 frames. ], batch size: 13, lr: 2.29e-03 +2024-08-06 15:10:37,752 INFO [trainer.py:765] (6/8) Epoch 37, batch 1800, train_loss[loss=3.409, NarTop10Accuracy=0.6286, over 7153.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6504, over 6003.12 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:11:04,270 INFO [trainer.py:765] (6/8) Epoch 37, batch 1900, train_loss[loss=3.434, NarTop10Accuracy=0.6427, over 5957.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6473, over 6044.00 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:11:29,941 INFO [trainer.py:765] (6/8) Epoch 37, batch 2000, train_loss[loss=3.393, NarTop10Accuracy=0.6412, over 6005.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6455, over 6012.23 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:11:58,797 INFO [trainer.py:765] (6/8) Epoch 37, batch 2100, train_loss[loss=3.335, NarTop10Accuracy=0.6422, over 3846.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.647, over 5991.67 frames. ], batch size: 4, lr: 2.29e-03 +2024-08-06 15:12:24,311 INFO [trainer.py:765] (6/8) Epoch 37, batch 2200, train_loss[loss=3.323, NarTop10Accuracy=0.6549, over 7444.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6488, over 6034.72 frames. ], batch size: 30, lr: 2.28e-03 +2024-08-06 15:12:49,786 INFO [trainer.py:765] (6/8) Epoch 37, batch 2300, train_loss[loss=3.166, NarTop10Accuracy=0.6644, over 5770.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6484, over 6059.75 frames. ], batch size: 9, lr: 2.28e-03 +2024-08-06 15:13:14,526 INFO [trainer.py:765] (6/8) Epoch 37, batch 2400, train_loss[loss=3.1, NarTop10Accuracy=0.6957, over 5288.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.647, over 5873.90 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 15:13:37,942 INFO [trainer.py:765] (6/8) Epoch 37, batch 2500, train_loss[loss=3.476, NarTop10Accuracy=0.6135, over 5068.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6496, over 5542.36 frames. ], batch size: 6, lr: 2.28e-03 +2024-08-06 15:13:59,104 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 15:14:50,846 INFO [trainer.py:765] (6/8) Epoch 38, batch 100, train_loss[loss=3.36, NarTop10Accuracy=0.639, over 7181.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6544, over 2371.07 frames. ], batch size: 30, lr: 2.25e-03 +2024-08-06 15:15:27,289 INFO [trainer.py:765] (6/8) Epoch 38, batch 200, train_loss[loss=3.351, NarTop10Accuracy=0.6449, over 6727.00 frames. ], tot_loss[loss=3.281, NarTop10Accuracy=0.6626, over 3872.28 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 15:16:01,281 INFO [trainer.py:765] (6/8) Epoch 38, batch 300, train_loss[loss=3.176, NarTop10Accuracy=0.6815, over 7180.00 frames. ], tot_loss[loss=3.273, NarTop10Accuracy=0.6645, over 4665.53 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 15:16:32,595 INFO [trainer.py:765] (6/8) Epoch 38, batch 400, train_loss[loss=3.193, NarTop10Accuracy=0.6831, over 5085.00 frames. ], tot_loss[loss=3.278, NarTop10Accuracy=0.6636, over 5132.20 frames. ], batch size: 7, lr: 2.24e-03 +2024-08-06 15:17:04,258 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 15:17:14,104 INFO [trainer.py:811] (6/8) Epoch 38, validation: loss=3.229, NarTop10Accuracy=0.6755, over 1907754.00 frames. +2024-08-06 15:17:14,105 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30201MB +2024-08-06 15:17:14,630 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.062e+02 2.214e+02 2.396e+02 3.845e+02, threshold=4.429e+02, percent-clipped=0.0 +2024-08-06 15:17:16,479 INFO [trainer.py:765] (6/8) Epoch 38, batch 500, train_loss[loss=3.363, NarTop10Accuracy=0.6505, over 6157.00 frames. ], tot_loss[loss=3.271, NarTop10Accuracy=0.6651, over 5411.26 frames. ], batch size: 11, lr: 2.24e-03 +2024-08-06 15:17:53,875 INFO [trainer.py:765] (6/8) Epoch 38, batch 600, train_loss[loss=3.107, NarTop10Accuracy=0.7004, over 5907.00 frames. ], tot_loss[loss=3.28, NarTop10Accuracy=0.663, over 5681.08 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 15:18:26,466 INFO [trainer.py:765] (6/8) Epoch 38, batch 700, train_loss[loss=3.1, NarTop10Accuracy=0.6961, over 4934.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6593, over 5742.50 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:01,129 INFO [trainer.py:765] (6/8) Epoch 38, batch 800, train_loss[loss=3.281, NarTop10Accuracy=0.6624, over 5088.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6601, over 5797.73 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:36,539 INFO [trainer.py:765] (6/8) Epoch 38, batch 900, train_loss[loss=3.559, NarTop10Accuracy=0.61, over 6306.00 frames. ], tot_loss[loss=3.308, NarTop10Accuracy=0.6569, over 5819.30 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 15:20:09,134 INFO [trainer.py:765] (6/8) Epoch 38, batch 1000, train_loss[loss=3.422, NarTop10Accuracy=0.6321, over 6649.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.654, over 5908.33 frames. ], batch size: 14, lr: 2.24e-03 +2024-08-06 15:20:47,346 INFO [trainer.py:765] (6/8) Epoch 38, batch 1100, train_loss[loss=3.494, NarTop10Accuracy=0.622, over 6911.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6501, over 5954.43 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 15:21:25,594 INFO [trainer.py:765] (6/8) Epoch 38, batch 1200, train_loss[loss=3.359, NarTop10Accuracy=0.6408, over 6941.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6499, over 5944.43 frames. ], batch size: 30, lr: 2.23e-03 +2024-08-06 15:21:57,556 INFO [trainer.py:765] (6/8) Epoch 38, batch 1300, train_loss[loss=3.221, NarTop10Accuracy=0.652, over 4257.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6535, over 6005.73 frames. ], batch size: 5, lr: 2.23e-03 +2024-08-06 15:22:29,467 INFO [trainer.py:765] (6/8) Epoch 38, batch 1400, train_loss[loss=3.15, NarTop10Accuracy=0.6883, over 6155.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6519, over 6038.74 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 15:23:06,615 INFO [trainer.py:765] (6/8) Epoch 38, batch 1500, train_loss[loss=3.324, NarTop10Accuracy=0.6572, over 6138.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6494, over 5982.60 frames. ], batch size: 48, lr: 2.23e-03 +2024-08-06 15:23:34,640 INFO [trainer.py:765] (6/8) Epoch 38, batch 1600, train_loss[loss=3.587, NarTop10Accuracy=0.6157, over 7074.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.649, over 5959.95 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:01,433 INFO [trainer.py:765] (6/8) Epoch 38, batch 1700, train_loss[loss=3.253, NarTop10Accuracy=0.6692, over 6170.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.649, over 5940.94 frames. ], batch size: 13, lr: 2.23e-03 +2024-08-06 15:24:28,064 INFO [trainer.py:765] (6/8) Epoch 38, batch 1800, train_loss[loss=3.448, NarTop10Accuracy=0.633, over 7188.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6481, over 6003.24 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:54,673 INFO [trainer.py:765] (6/8) Epoch 38, batch 1900, train_loss[loss=3.381, NarTop10Accuracy=0.6509, over 5939.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6473, over 6031.08 frames. ], batch size: 52, lr: 2.23e-03 +2024-08-06 15:25:20,410 INFO [trainer.py:765] (6/8) Epoch 38, batch 2000, train_loss[loss=3.622, NarTop10Accuracy=0.5928, over 5959.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6463, over 6009.57 frames. ], batch size: 50, lr: 2.23e-03 +2024-08-06 15:25:45,856 INFO [trainer.py:765] (6/8) Epoch 38, batch 2100, train_loss[loss=3.036, NarTop10Accuracy=0.6701, over 4781.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6482, over 5988.77 frames. ], batch size: 5, lr: 2.22e-03 +2024-08-06 15:26:11,316 INFO [trainer.py:765] (6/8) Epoch 38, batch 2200, train_loss[loss=3.623, NarTop10Accuracy=0.5933, over 6767.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.6487, over 6034.87 frames. ], batch size: 30, lr: 2.22e-03 +2024-08-06 15:26:36,708 INFO [trainer.py:765] (6/8) Epoch 38, batch 2300, train_loss[loss=3.175, NarTop10Accuracy=0.6888, over 5784.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6464, over 6073.39 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 15:27:01,479 INFO [trainer.py:765] (6/8) Epoch 38, batch 2400, train_loss[loss=3.19, NarTop10Accuracy=0.6843, over 5164.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6438, over 5875.80 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 15:27:23,144 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 15:27:33,590 INFO [trainer.py:811] (6/8) Epoch 38, validation: loss=3.213, NarTop10Accuracy=0.6782, over 1907754.00 frames. +2024-08-06 15:27:33,590 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30201MB +2024-08-06 15:27:34,076 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.098e+02 2.247e+02 2.437e+02 3.550e+02, threshold=4.494e+02, percent-clipped=0.0 +2024-08-06 15:27:35,515 INFO [trainer.py:765] (6/8) Epoch 38, batch 2500, train_loss[loss=3.187, NarTop10Accuracy=0.6687, over 5047.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6496, over 5529.95 frames. ], batch size: 6, lr: 2.22e-03 +2024-08-06 15:27:56,657 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 15:28:51,227 INFO [trainer.py:765] (6/8) Epoch 39, batch 100, train_loss[loss=3.204, NarTop10Accuracy=0.6736, over 7110.00 frames. ], tot_loss[loss=3.254, NarTop10Accuracy=0.6684, over 2376.31 frames. ], batch size: 30, lr: 2.19e-03 +2024-08-06 15:29:28,052 INFO [trainer.py:765] (6/8) Epoch 39, batch 200, train_loss[loss=3.636, NarTop10Accuracy=0.5854, over 6839.00 frames. ], tot_loss[loss=3.271, NarTop10Accuracy=0.6659, over 3885.78 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 15:30:02,018 INFO [trainer.py:765] (6/8) Epoch 39, batch 300, train_loss[loss=3.195, NarTop10Accuracy=0.6839, over 7090.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6631, over 4678.41 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 15:30:32,993 INFO [trainer.py:765] (6/8) Epoch 39, batch 400, train_loss[loss=3.004, NarTop10Accuracy=0.7111, over 5068.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6628, over 5137.81 frames. ], batch size: 7, lr: 2.19e-03 +2024-08-06 15:31:03,569 INFO [trainer.py:765] (6/8) Epoch 39, batch 500, train_loss[loss=3.078, NarTop10Accuracy=0.6981, over 6172.00 frames. ], tot_loss[loss=3.287, NarTop10Accuracy=0.6619, over 5423.45 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 15:31:40,850 INFO [trainer.py:765] (6/8) Epoch 39, batch 600, train_loss[loss=3.305, NarTop10Accuracy=0.6578, over 5727.00 frames. ], tot_loss[loss=3.286, NarTop10Accuracy=0.6623, over 5681.81 frames. ], batch size: 9, lr: 2.18e-03 +2024-08-06 15:32:14,451 INFO [trainer.py:765] (6/8) Epoch 39, batch 700, train_loss[loss=3.253, NarTop10Accuracy=0.675, over 4957.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6592, over 5752.20 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:32:44,166 INFO [trainer.py:765] (6/8) Epoch 39, batch 800, train_loss[loss=3.185, NarTop10Accuracy=0.6838, over 5069.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6592, over 5801.16 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:33:21,117 INFO [trainer.py:765] (6/8) Epoch 39, batch 900, train_loss[loss=3.06, NarTop10Accuracy=0.7039, over 6559.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6587, over 5808.12 frames. ], batch size: 14, lr: 2.18e-03 +2024-08-06 15:34:02,655 INFO [trainer.py:765] (6/8) Epoch 39, batch 1000, train_loss[loss=3.103, NarTop10Accuracy=0.6879, over 6691.00 frames. ], tot_loss[loss=3.301, NarTop10Accuracy=0.6581, over 5911.85 frames. ], batch size: 14, lr: 2.18e-03 +2024-08-06 15:34:33,095 INFO [trainer.py:765] (6/8) Epoch 39, batch 1100, train_loss[loss=3.261, NarTop10Accuracy=0.6672, over 6692.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6553, over 5964.53 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 15:35:09,245 INFO [trainer.py:765] (6/8) Epoch 39, batch 1200, train_loss[loss=3.248, NarTop10Accuracy=0.6711, over 7237.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6552, over 5971.35 frames. ], batch size: 30, lr: 2.18e-03 +2024-08-06 15:35:46,813 INFO [trainer.py:765] (6/8) Epoch 39, batch 1300, train_loss[loss=3.411, NarTop10Accuracy=0.6382, over 5182.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6552, over 6027.17 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:36:18,850 INFO [trainer.py:765] (6/8) Epoch 39, batch 1400, train_loss[loss=3.3, NarTop10Accuracy=0.6554, over 6213.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6543, over 6051.18 frames. ], batch size: 11, lr: 2.17e-03 +2024-08-06 15:36:47,214 INFO [trainer.py:765] (6/8) Epoch 39, batch 1500, train_loss[loss=3.325, NarTop10Accuracy=0.6625, over 6124.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6517, over 5996.04 frames. ], batch size: 49, lr: 2.17e-03 +2024-08-06 15:37:15,216 INFO [trainer.py:765] (6/8) Epoch 39, batch 1600, train_loss[loss=3.383, NarTop10Accuracy=0.6481, over 7133.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6517, over 5958.11 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:37:41,883 INFO [trainer.py:765] (6/8) Epoch 39, batch 1700, train_loss[loss=3.178, NarTop10Accuracy=0.679, over 6223.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6506, over 5939.49 frames. ], batch size: 13, lr: 2.17e-03 +2024-08-06 15:38:08,510 INFO [trainer.py:765] (6/8) Epoch 39, batch 1800, train_loss[loss=3.125, NarTop10Accuracy=0.6973, over 7056.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6507, over 6020.17 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:38:35,253 INFO [trainer.py:765] (6/8) Epoch 39, batch 1900, train_loss[loss=3.307, NarTop10Accuracy=0.6656, over 6012.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6474, over 6045.30 frames. ], batch size: 49, lr: 2.17e-03 +2024-08-06 15:38:37,990 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 15:38:48,262 INFO [trainer.py:811] (6/8) Epoch 39, validation: loss=3.177, NarTop10Accuracy=0.6866, over 1907754.00 frames. +2024-08-06 15:38:48,262 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30201MB +2024-08-06 15:38:48,768 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.106e+02 2.266e+02 2.462e+02 4.274e+02, threshold=4.532e+02, percent-clipped=0.0 +2024-08-06 15:39:11,226 INFO [trainer.py:765] (6/8) Epoch 39, batch 2000, train_loss[loss=3.494, NarTop10Accuracy=0.6235, over 6295.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6485, over 6008.80 frames. ], batch size: 49, lr: 2.17e-03 +2024-08-06 15:39:36,692 INFO [trainer.py:765] (6/8) Epoch 39, batch 2100, train_loss[loss=3.449, NarTop10Accuracy=0.6251, over 3941.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6484, over 5979.20 frames. ], batch size: 4, lr: 2.17e-03 +2024-08-06 15:40:02,086 INFO [trainer.py:765] (6/8) Epoch 39, batch 2200, train_loss[loss=3.52, NarTop10Accuracy=0.6183, over 7337.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6507, over 6016.57 frames. ], batch size: 31, lr: 2.17e-03 +2024-08-06 15:40:27,497 INFO [trainer.py:765] (6/8) Epoch 39, batch 2300, train_loss[loss=3.002, NarTop10Accuracy=0.7216, over 5711.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6509, over 6054.90 frames. ], batch size: 9, lr: 2.16e-03 +2024-08-06 15:40:52,331 INFO [trainer.py:765] (6/8) Epoch 39, batch 2400, train_loss[loss=3.18, NarTop10Accuracy=0.6762, over 5716.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6486, over 5887.50 frames. ], batch size: 8, lr: 2.16e-03 +2024-08-06 15:41:15,695 INFO [trainer.py:765] (6/8) Epoch 39, batch 2500, train_loss[loss=3.622, NarTop10Accuracy=0.5944, over 4881.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6543, over 5537.60 frames. ], batch size: 6, lr: 2.16e-03 +2024-08-06 15:41:36,849 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 15:42:35,253 INFO [trainer.py:765] (6/8) Epoch 40, batch 100, train_loss[loss=3.556, NarTop10Accuracy=0.6042, over 6980.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6533, over 2389.70 frames. ], batch size: 30, lr: 2.13e-03 +2024-08-06 15:43:09,645 INFO [trainer.py:765] (6/8) Epoch 40, batch 200, train_loss[loss=3.396, NarTop10Accuracy=0.6379, over 6682.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6611, over 3870.64 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 15:43:43,738 INFO [trainer.py:765] (6/8) Epoch 40, batch 300, train_loss[loss=3.337, NarTop10Accuracy=0.6545, over 7173.00 frames. ], tot_loss[loss=3.28, NarTop10Accuracy=0.6632, over 4675.96 frames. ], batch size: 22, lr: 2.13e-03 +2024-08-06 15:44:18,201 INFO [trainer.py:765] (6/8) Epoch 40, batch 400, train_loss[loss=3.027, NarTop10Accuracy=0.7051, over 5173.00 frames. ], tot_loss[loss=3.264, NarTop10Accuracy=0.6659, over 5136.07 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 15:44:50,257 INFO [trainer.py:765] (6/8) Epoch 40, batch 500, train_loss[loss=3.142, NarTop10Accuracy=0.6927, over 6126.00 frames. ], tot_loss[loss=3.265, NarTop10Accuracy=0.6667, over 5417.66 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 15:45:25,431 INFO [trainer.py:765] (6/8) Epoch 40, batch 600, train_loss[loss=3.471, NarTop10Accuracy=0.623, over 5848.00 frames. ], tot_loss[loss=3.285, NarTop10Accuracy=0.6627, over 5676.06 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 15:45:58,647 INFO [trainer.py:765] (6/8) Epoch 40, batch 700, train_loss[loss=3.492, NarTop10Accuracy=0.6264, over 4929.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6587, over 5736.61 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 15:46:34,887 INFO [trainer.py:765] (6/8) Epoch 40, batch 800, train_loss[loss=3.299, NarTop10Accuracy=0.6548, over 5101.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.658, over 5808.96 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 15:47:07,289 INFO [trainer.py:765] (6/8) Epoch 40, batch 900, train_loss[loss=3.27, NarTop10Accuracy=0.6717, over 6301.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.66, over 5842.75 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:47:43,510 INFO [trainer.py:765] (6/8) Epoch 40, batch 1000, train_loss[loss=3.456, NarTop10Accuracy=0.6343, over 6304.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6566, over 5932.57 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:48:18,709 INFO [trainer.py:765] (6/8) Epoch 40, batch 1100, train_loss[loss=3.52, NarTop10Accuracy=0.6123, over 6882.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6546, over 5960.20 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 15:48:52,093 INFO [trainer.py:765] (6/8) Epoch 40, batch 1200, train_loss[loss=3.329, NarTop10Accuracy=0.6536, over 7397.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6547, over 5957.27 frames. ], batch size: 31, lr: 2.12e-03 +2024-08-06 15:49:29,782 INFO [trainer.py:765] (6/8) Epoch 40, batch 1300, train_loss[loss=3.503, NarTop10Accuracy=0.6169, over 5020.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6556, over 6015.05 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 15:49:38,245 INFO [trainer.py:803] (6/8) Computing validation loss +2024-08-06 15:49:48,934 INFO [trainer.py:811] (6/8) Epoch 40, validation: loss=3.171, NarTop10Accuracy=0.6871, over 1907754.00 frames. +2024-08-06 15:49:48,935 INFO [trainer.py:814] (6/8) Maximum memory allocated so far is 30201MB +2024-08-06 15:49:49,615 INFO [optim.py:386] (6/8) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.095e+02 2.264e+02 2.441e+02 4.960e+02, threshold=4.528e+02, percent-clipped=0.1 +2024-08-06 15:50:12,460 INFO [trainer.py:765] (6/8) Epoch 40, batch 1400, train_loss[loss=3.219, NarTop10Accuracy=0.6809, over 6198.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6543, over 6044.11 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 15:50:45,930 INFO [trainer.py:765] (6/8) Epoch 40, batch 1500, train_loss[loss=3.52, NarTop10Accuracy=0.6119, over 6290.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6539, over 5978.64 frames. ], batch size: 49, lr: 2.12e-03 +2024-08-06 15:51:13,820 INFO [trainer.py:765] (6/8) Epoch 40, batch 1600, train_loss[loss=3.309, NarTop10Accuracy=0.6594, over 6909.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6552, over 5954.92 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:51:40,571 INFO [trainer.py:765] (6/8) Epoch 40, batch 1700, train_loss[loss=3.234, NarTop10Accuracy=0.6733, over 6365.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.655, over 5936.38 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:52:07,236 INFO [trainer.py:765] (6/8) Epoch 40, batch 1800, train_loss[loss=3.411, NarTop10Accuracy=0.6316, over 7168.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6533, over 6003.12 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:52:33,820 INFO [trainer.py:765] (6/8) Epoch 40, batch 1900, train_loss[loss=3.449, NarTop10Accuracy=0.631, over 6306.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6515, over 6044.38 frames. ], batch size: 48, lr: 2.11e-03 +2024-08-06 15:52:59,511 INFO [trainer.py:765] (6/8) Epoch 40, batch 2000, train_loss[loss=3.407, NarTop10Accuracy=0.6382, over 6670.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6518, over 6023.37 frames. ], batch size: 48, lr: 2.11e-03 +2024-08-06 15:53:24,913 INFO [trainer.py:765] (6/8) Epoch 40, batch 2100, train_loss[loss=3.382, NarTop10Accuracy=0.6526, over 4798.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6525, over 6001.67 frames. ], batch size: 5, lr: 2.11e-03 +2024-08-06 15:53:50,419 INFO [trainer.py:765] (6/8) Epoch 40, batch 2200, train_loss[loss=3.255, NarTop10Accuracy=0.6721, over 7244.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6522, over 6047.99 frames. ], batch size: 30, lr: 2.11e-03 +2024-08-06 15:54:15,886 INFO [trainer.py:765] (6/8) Epoch 40, batch 2300, train_loss[loss=3.19, NarTop10Accuracy=0.6872, over 5776.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6513, over 6076.10 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 15:54:43,787 INFO [trainer.py:765] (6/8) Epoch 40, batch 2400, train_loss[loss=3.457, NarTop10Accuracy=0.618, over 5007.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6491, over 5879.08 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 15:55:07,364 INFO [trainer.py:765] (6/8) Epoch 40, batch 2500, train_loss[loss=3.338, NarTop10Accuracy=0.6677, over 4997.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6547, over 5535.58 frames. ], batch size: 6, lr: 2.11e-03 +2024-08-06 15:55:28,417 INFO [trainer.py:650] (6/8) Reaches end of dataloader. +2024-08-06 15:55:28,419 INFO [trainer.py:1069] (6/8) Done! diff --git a/libritts/log/log-train-2024-08-06-06-41-41-7 b/libritts/log/log-train-2024-08-06-06-41-41-7 new file mode 100644 index 0000000000000000000000000000000000000000..8ba2c5de40146d6c17f8be2a5414418a9b8c209b --- /dev/null +++ b/libritts/log/log-train-2024-08-06-06-41-41-7 @@ -0,0 +1,1260 @@ +2024-08-06 06:41:41,474 INFO [trainer.py:870] (7/8) Training started +2024-08-06 06:41:41,475 INFO [trainer.py:889] (7/8) Device: cuda:7 +2024-08-06 06:41:41,475 INFO [trainer.py:890] (7/8) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 100, 'reset_interval': 200, 'valid_interval': 2000, 'env_info': {'k2-version': '1.24.3', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '279b0c87015a615b81b147251814d737a548f397', 'k2-git-date': 'Wed May 24 22:24:09 2023', 'lhotse-version': '1.26.0', 'torch-version': '2.0.1+cu118', 'torch-cuda-available': True, 'torch-cuda-version': '11.8', 'python-version': '3.10', 'icefall-git-branch': 'main', 'icefall-git-sha1': '3e4fbb6-dirty', 'icefall-git-date': 'Tue Aug 6 06:30:45 2024', 'icefall-path': '/workspace/icefall_llm', 'k2-path': '/usr/local/lib/python3.10/dist-packages/k2/__init__.py', 'lhotse-path': '/usr/local/lib/python3.10/dist-packages/lhotse/__init__.py', 'hostname': '6865771', 'IP address': '0.104.195.107'}, 'world_size': 8, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 100, 'start_batch': 0, 'exp_dir': PosixPath('exp/valle'), 'optimizer_name': 'ScaledAdam', 'scheduler_name': 'Eden', 'base_lr': 0.03, 'warmup_steps': 200, 'seed': 42, 'inf_check': False, 'save_every_n': 1000, 'keep_last_k': 20, 'average_period': 0, 'accumulate_grad_steps': 2, 'dtype': 'float32', 'filter_min_duration': 0.5, 'filter_max_duration': 14.0, 'train_stage': 2, 'visualize': False, 'oom_check': False, 'model_name': 'valle', 'decoder_dim': 1024, 'nhead': 16, 'num_decoder_layers': 12, 'scale_factor': 1.0, 'norm_first': True, 'add_prenet': False, 'prefix_mode': 1, 'share_embedding': True, 'prepend_bos': False, 'num_quantizers': 8, 'scaling_xformers': False, 'manifest_dir': PosixPath('data/tokenized'), 'max_duration': 160, 'bucketing_sampler': True, 'num_buckets': 6, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 0.1, 'on_the_fly_feats': False, 'shuffle': True, 'buffer_size': 40000, 'shuffle_buffer_size': 100000, 'drop_last': False, 'return_cuts': True, 'num_workers': 8, 'enable_spec_aug': False, 'spec_aug_time_warp_factor': 80, 'input_strategy': 'PrecomputedFeatures', 'dataset': 'libritts', 'text_tokens': 'data/tokenized/unique_text_tokens.k2symbols', 'sampling_rate': 24000} +2024-08-06 06:41:41,475 INFO [trainer.py:892] (7/8) About to create model +2024-08-06 06:41:42,210 INFO [trainer.py:899] (7/8) Number of model parameters: 367386628 +2024-08-06 06:41:42,211 INFO [checkpoint.py:112] (7/8) Loading checkpoint from exp/valle/epoch-99.pt +2024-08-06 06:41:44,215 INFO [trainer.py:914] (7/8) Using DDP +2024-08-06 06:41:46,897 INFO [datamodule.py:427] (7/8) About to get train cuts +2024-08-06 06:41:46,900 INFO [datamodule.py:434] (7/8) About to get dev cuts +2024-08-06 06:41:46,901 INFO [datamodule.py:292] (7/8) Disable SpecAugment +2024-08-06 06:41:46,901 INFO [datamodule.py:294] (7/8) About to create train dataset +2024-08-06 06:41:46,903 INFO [datamodule.py:323] (7/8) Using DynamicBucketingSampler +2024-08-06 06:41:47,528 INFO [datamodule.py:344] (7/8) About to create train dataloader +2024-08-06 06:41:47,529 INFO [datamodule.py:367] (7/8) About to create dev dataset +2024-08-06 06:41:47,861 INFO [datamodule.py:388] (7/8) About to create dev dataloader +2024-08-06 06:42:36,135 INFO [trainer.py:765] (7/8) Epoch 1, batch 100, train_loss[loss=95.55, NarTop10Accuracy=0.01339, over 7047.00 frames. ], tot_loss[loss=80.98, NarTop10Accuracy=0.05369, over 2367.61 frames. ], batch size: 31, lr: 2.25e-02 +2024-08-06 06:43:05,818 INFO [trainer.py:765] (7/8) Epoch 1, batch 200, train_loss[loss=121.8, NarTop10Accuracy=0.02089, over 6822.00 frames. ], tot_loss[loss=98.88, NarTop10Accuracy=0.04476, over 3863.13 frames. ], batch size: 17, lr: 3.00e-02 +2024-08-06 06:43:33,848 INFO [trainer.py:765] (7/8) Epoch 1, batch 300, train_loss[loss=71.83, NarTop10Accuracy=0.02463, over 7051.00 frames. ], tot_loss[loss=86.88, NarTop10Accuracy=0.04603, over 4659.75 frames. ], batch size: 22, lr: 3.00e-02 +2024-08-06 06:44:05,250 INFO [trainer.py:765] (7/8) Epoch 1, batch 400, train_loss[loss=33.05, NarTop10Accuracy=0.06255, over 5060.00 frames. ], tot_loss[loss=67.85, NarTop10Accuracy=0.05027, over 5116.56 frames. ], batch size: 7, lr: 3.00e-02 +2024-08-06 06:44:33,444 INFO [trainer.py:765] (7/8) Epoch 1, batch 500, train_loss[loss=16.4, NarTop10Accuracy=0.02446, over 6107.00 frames. ], tot_loss[loss=48.65, NarTop10Accuracy=0.0559, over 5408.16 frames. ], batch size: 11, lr: 2.99e-02 +2024-08-06 06:45:02,923 INFO [trainer.py:765] (7/8) Epoch 1, batch 600, train_loss[loss=6.139, NarTop10Accuracy=0.1628, over 5781.00 frames. ], tot_loss[loss=33.31, NarTop10Accuracy=0.06243, over 5672.69 frames. ], batch size: 9, lr: 2.99e-02 +2024-08-06 06:45:40,480 INFO [trainer.py:765] (7/8) Epoch 1, batch 700, train_loss[loss=7.091, NarTop10Accuracy=0.1133, over 5057.00 frames. ], tot_loss[loss=23.51, NarTop10Accuracy=0.06896, over 5753.94 frames. ], batch size: 6, lr: 2.99e-02 +2024-08-06 06:46:09,662 INFO [trainer.py:765] (7/8) Epoch 1, batch 800, train_loss[loss=6.611, NarTop10Accuracy=0.1084, over 4875.00 frames. ], tot_loss[loss=17.53, NarTop10Accuracy=0.08081, over 5797.22 frames. ], batch size: 6, lr: 2.98e-02 +2024-08-06 06:46:37,732 INFO [trainer.py:765] (7/8) Epoch 1, batch 900, train_loss[loss=6.062, NarTop10Accuracy=0.1569, over 6267.00 frames. ], tot_loss[loss=13.02, NarTop10Accuracy=0.11, over 5824.67 frames. ], batch size: 13, lr: 2.98e-02 +2024-08-06 06:47:13,908 INFO [trainer.py:765] (7/8) Epoch 1, batch 1000, train_loss[loss=5.883, NarTop10Accuracy=0.1916, over 6391.00 frames. ], tot_loss[loss=10.17, NarTop10Accuracy=0.1363, over 5919.60 frames. ], batch size: 13, lr: 2.97e-02 +2024-08-06 06:47:47,141 INFO [trainer.py:765] (7/8) Epoch 1, batch 1100, train_loss[loss=5.479, NarTop10Accuracy=0.2076, over 6770.00 frames. ], tot_loss[loss=8.414, NarTop10Accuracy=0.1568, over 5946.04 frames. ], batch size: 17, lr: 2.96e-02 +2024-08-06 06:48:15,709 INFO [trainer.py:765] (7/8) Epoch 1, batch 1200, train_loss[loss=6.227, NarTop10Accuracy=0.1384, over 7364.00 frames. ], tot_loss[loss=7.308, NarTop10Accuracy=0.1742, over 5947.78 frames. ], batch size: 31, lr: 2.96e-02 +2024-08-06 06:48:47,234 INFO [trainer.py:765] (7/8) Epoch 1, batch 1300, train_loss[loss=5.561, NarTop10Accuracy=0.1834, over 5095.00 frames. ], tot_loss[loss=6.603, NarTop10Accuracy=0.1867, over 6034.33 frames. ], batch size: 6, lr: 2.95e-02 +2024-08-06 06:49:23,567 INFO [trainer.py:765] (7/8) Epoch 1, batch 1400, train_loss[loss=5.708, NarTop10Accuracy=0.1706, over 6091.00 frames. ], tot_loss[loss=6.188, NarTop10Accuracy=0.1929, over 6030.82 frames. ], batch size: 11, lr: 2.94e-02 +2024-08-06 06:49:51,506 INFO [trainer.py:765] (7/8) Epoch 1, batch 1500, train_loss[loss=5.573, NarTop10Accuracy=0.1954, over 6060.00 frames. ], tot_loss[loss=5.925, NarTop10Accuracy=0.1991, over 5965.80 frames. ], batch size: 48, lr: 2.94e-02 +2024-08-06 06:50:19,162 INFO [trainer.py:765] (7/8) Epoch 1, batch 1600, train_loss[loss=5.469, NarTop10Accuracy=0.2068, over 7040.00 frames. ], tot_loss[loss=5.751, NarTop10Accuracy=0.2048, over 5930.24 frames. ], batch size: 22, lr: 2.93e-02 +2024-08-06 06:50:45,596 INFO [trainer.py:765] (7/8) Epoch 1, batch 1700, train_loss[loss=5.39, NarTop10Accuracy=0.2259, over 6202.00 frames. ], tot_loss[loss=5.641, NarTop10Accuracy=0.2094, over 5916.31 frames. ], batch size: 13, lr: 2.92e-02 +2024-08-06 06:51:11,954 INFO [trainer.py:765] (7/8) Epoch 1, batch 1800, train_loss[loss=5.455, NarTop10Accuracy=0.2183, over 6980.00 frames. ], tot_loss[loss=5.55, NarTop10Accuracy=0.216, over 5986.10 frames. ], batch size: 22, lr: 2.91e-02 +2024-08-06 06:51:38,223 INFO [trainer.py:765] (7/8) Epoch 1, batch 1900, train_loss[loss=5.677, NarTop10Accuracy=0.1806, over 6365.00 frames. ], tot_loss[loss=5.494, NarTop10Accuracy=0.2212, over 6034.39 frames. ], batch size: 49, lr: 2.90e-02 +2024-08-06 06:52:03,652 INFO [trainer.py:765] (7/8) Epoch 1, batch 2000, train_loss[loss=5.452, NarTop10Accuracy=0.2234, over 6190.00 frames. ], tot_loss[loss=5.44, NarTop10Accuracy=0.2277, over 6026.23 frames. ], batch size: 49, lr: 2.89e-02 +2024-08-06 06:52:03,653 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 06:52:13,994 INFO [trainer.py:811] (7/8) Epoch 1, validation: loss=5.351, NarTop10Accuracy=0.2423, over 1907754.00 frames. +2024-08-06 06:52:13,994 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 26718MB +2024-08-06 06:52:14,534 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 4.341e+01 2.262e+02 7.241e+02 2.074e+04 7.259e+05, threshold=1.448e+03, percent-clipped=0.0 +2024-08-06 06:52:39,585 INFO [trainer.py:765] (7/8) Epoch 1, batch 2100, train_loss[loss=5.372, NarTop10Accuracy=0.2343, over 3806.00 frames. ], tot_loss[loss=5.386, NarTop10Accuracy=0.237, over 5997.41 frames. ], batch size: 4, lr: 2.88e-02 +2024-08-06 06:53:05,354 INFO [trainer.py:765] (7/8) Epoch 1, batch 2200, train_loss[loss=5.184, NarTop10Accuracy=0.2664, over 7178.00 frames. ], tot_loss[loss=5.356, NarTop10Accuracy=0.2416, over 6042.83 frames. ], batch size: 30, lr: 2.87e-02 +2024-08-06 06:53:30,701 INFO [trainer.py:765] (7/8) Epoch 1, batch 2300, train_loss[loss=5.426, NarTop10Accuracy=0.2417, over 5728.00 frames. ], tot_loss[loss=5.337, NarTop10Accuracy=0.2452, over 6057.30 frames. ], batch size: 9, lr: 2.86e-02 +2024-08-06 06:53:55,358 INFO [trainer.py:765] (7/8) Epoch 1, batch 2400, train_loss[loss=4.882, NarTop10Accuracy=0.3201, over 5224.00 frames. ], tot_loss[loss=5.315, NarTop10Accuracy=0.2498, over 5887.12 frames. ], batch size: 7, lr: 2.85e-02 +2024-08-06 06:54:18,659 INFO [trainer.py:765] (7/8) Epoch 1, batch 2500, train_loss[loss=5.412, NarTop10Accuracy=0.2393, over 4971.00 frames. ], tot_loss[loss=5.268, NarTop10Accuracy=0.2583, over 5536.51 frames. ], batch size: 6, lr: 2.84e-02 +2024-08-06 06:54:40,250 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 06:55:37,937 INFO [trainer.py:765] (7/8) Epoch 2, batch 100, train_loss[loss=5.304, NarTop10Accuracy=0.2584, over 7173.00 frames. ], tot_loss[loss=5.173, NarTop10Accuracy=0.2811, over 2381.48 frames. ], batch size: 30, lr: 2.77e-02 +2024-08-06 06:56:16,406 INFO [trainer.py:765] (7/8) Epoch 2, batch 200, train_loss[loss=5.061, NarTop10Accuracy=0.3081, over 6902.00 frames. ], tot_loss[loss=5.138, NarTop10Accuracy=0.2867, over 3876.43 frames. ], batch size: 17, lr: 2.76e-02 +2024-08-06 06:56:44,973 INFO [trainer.py:765] (7/8) Epoch 2, batch 300, train_loss[loss=5.295, NarTop10Accuracy=0.2661, over 7102.00 frames. ], tot_loss[loss=5.145, NarTop10Accuracy=0.2859, over 4673.46 frames. ], batch size: 22, lr: 2.75e-02 +2024-08-06 06:57:13,939 INFO [trainer.py:765] (7/8) Epoch 2, batch 400, train_loss[loss=5.378, NarTop10Accuracy=0.2266, over 5041.00 frames. ], tot_loss[loss=5.132, NarTop10Accuracy=0.2885, over 5123.45 frames. ], batch size: 7, lr: 2.74e-02 +2024-08-06 06:57:56,209 INFO [trainer.py:765] (7/8) Epoch 2, batch 500, train_loss[loss=4.855, NarTop10Accuracy=0.336, over 6191.00 frames. ], tot_loss[loss=5.109, NarTop10Accuracy=0.2925, over 5402.71 frames. ], batch size: 11, lr: 2.73e-02 +2024-08-06 06:58:25,426 INFO [trainer.py:765] (7/8) Epoch 2, batch 600, train_loss[loss=4.97, NarTop10Accuracy=0.3251, over 5849.00 frames. ], tot_loss[loss=5.083, NarTop10Accuracy=0.298, over 5679.46 frames. ], batch size: 9, lr: 2.71e-02 +2024-08-06 06:58:55,283 INFO [trainer.py:765] (7/8) Epoch 2, batch 700, train_loss[loss=4.984, NarTop10Accuracy=0.3221, over 5035.00 frames. ], tot_loss[loss=5.074, NarTop10Accuracy=0.2995, over 5747.92 frames. ], batch size: 6, lr: 2.70e-02 +2024-08-06 06:59:31,890 INFO [trainer.py:765] (7/8) Epoch 2, batch 800, train_loss[loss=4.997, NarTop10Accuracy=0.3057, over 5050.00 frames. ], tot_loss[loss=5.077, NarTop10Accuracy=0.2987, over 5813.47 frames. ], batch size: 6, lr: 2.69e-02 +2024-08-06 07:00:03,184 INFO [trainer.py:765] (7/8) Epoch 2, batch 900, train_loss[loss=5.518, NarTop10Accuracy=0.1978, over 6182.00 frames. ], tot_loss[loss=5.059, NarTop10Accuracy=0.3026, over 5813.31 frames. ], batch size: 13, lr: 2.68e-02 +2024-08-06 07:00:33,142 INFO [trainer.py:765] (7/8) Epoch 2, batch 1000, train_loss[loss=4.897, NarTop10Accuracy=0.3294, over 6546.00 frames. ], tot_loss[loss=5.027, NarTop10Accuracy=0.3096, over 5922.63 frames. ], batch size: 14, lr: 2.66e-02 +2024-08-06 07:01:05,574 INFO [trainer.py:765] (7/8) Epoch 2, batch 1100, train_loss[loss=4.88, NarTop10Accuracy=0.3401, over 6804.00 frames. ], tot_loss[loss=5.016, NarTop10Accuracy=0.311, over 5954.68 frames. ], batch size: 17, lr: 2.65e-02 +2024-08-06 07:01:46,285 INFO [trainer.py:765] (7/8) Epoch 2, batch 1200, train_loss[loss=4.817, NarTop10Accuracy=0.3482, over 6867.00 frames. ], tot_loss[loss=5.001, NarTop10Accuracy=0.3132, over 5960.12 frames. ], batch size: 30, lr: 2.64e-02 +2024-08-06 07:02:15,646 INFO [trainer.py:765] (7/8) Epoch 2, batch 1300, train_loss[loss=5.505, NarTop10Accuracy=0.2072, over 5014.00 frames. ], tot_loss[loss=4.959, NarTop10Accuracy=0.3214, over 6027.27 frames. ], batch size: 6, lr: 2.63e-02 +2024-08-06 07:02:45,252 INFO [trainer.py:765] (7/8) Epoch 2, batch 1400, train_loss[loss=4.673, NarTop10Accuracy=0.3661, over 6124.00 frames. ], tot_loss[loss=4.936, NarTop10Accuracy=0.3257, over 6054.08 frames. ], batch size: 11, lr: 2.61e-02 +2024-08-06 07:02:50,267 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 07:03:02,094 INFO [trainer.py:811] (7/8) Epoch 2, validation: loss=4.943, NarTop10Accuracy=0.3266, over 1907754.00 frames. +2024-08-06 07:03:02,095 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27258MB +2024-08-06 07:03:02,638 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 5.429e+01 1.166e+02 1.425e+02 1.750e+02 6.435e+02, threshold=2.851e+02, percent-clipped=0.0 +2024-08-06 07:03:25,471 INFO [trainer.py:765] (7/8) Epoch 2, batch 1500, train_loss[loss=5.162, NarTop10Accuracy=0.2849, over 5964.00 frames. ], tot_loss[loss=4.934, NarTop10Accuracy=0.3263, over 5982.15 frames. ], batch size: 49, lr: 2.60e-02 +2024-08-06 07:03:53,553 INFO [trainer.py:765] (7/8) Epoch 2, batch 1600, train_loss[loss=4.712, NarTop10Accuracy=0.369, over 7233.00 frames. ], tot_loss[loss=4.915, NarTop10Accuracy=0.3303, over 5958.42 frames. ], batch size: 22, lr: 2.59e-02 +2024-08-06 07:04:20,313 INFO [trainer.py:765] (7/8) Epoch 2, batch 1700, train_loss[loss=4.594, NarTop10Accuracy=0.3891, over 6664.00 frames. ], tot_loss[loss=4.917, NarTop10Accuracy=0.3304, over 5948.45 frames. ], batch size: 14, lr: 2.58e-02 +2024-08-06 07:04:46,888 INFO [trainer.py:765] (7/8) Epoch 2, batch 1800, train_loss[loss=4.754, NarTop10Accuracy=0.3637, over 7243.00 frames. ], tot_loss[loss=4.907, NarTop10Accuracy=0.3322, over 6002.94 frames. ], batch size: 22, lr: 2.56e-02 +2024-08-06 07:05:13,586 INFO [trainer.py:765] (7/8) Epoch 2, batch 1900, train_loss[loss=4.823, NarTop10Accuracy=0.3515, over 6555.00 frames. ], tot_loss[loss=4.89, NarTop10Accuracy=0.336, over 6060.02 frames. ], batch size: 50, lr: 2.55e-02 +2024-08-06 07:05:39,285 INFO [trainer.py:765] (7/8) Epoch 2, batch 2000, train_loss[loss=4.828, NarTop10Accuracy=0.347, over 6306.00 frames. ], tot_loss[loss=4.861, NarTop10Accuracy=0.3413, over 6022.72 frames. ], batch size: 49, lr: 2.54e-02 +2024-08-06 07:06:04,829 INFO [trainer.py:765] (7/8) Epoch 2, batch 2100, train_loss[loss=4.541, NarTop10Accuracy=0.4211, over 3866.00 frames. ], tot_loss[loss=4.866, NarTop10Accuracy=0.3404, over 5996.60 frames. ], batch size: 4, lr: 2.52e-02 +2024-08-06 07:06:30,373 INFO [trainer.py:765] (7/8) Epoch 2, batch 2200, train_loss[loss=4.588, NarTop10Accuracy=0.3947, over 7457.00 frames. ], tot_loss[loss=4.818, NarTop10Accuracy=0.3502, over 6026.86 frames. ], batch size: 31, lr: 2.51e-02 +2024-08-06 07:06:55,874 INFO [trainer.py:765] (7/8) Epoch 2, batch 2300, train_loss[loss=4.699, NarTop10Accuracy=0.3713, over 5858.00 frames. ], tot_loss[loss=4.813, NarTop10Accuracy=0.3518, over 6041.35 frames. ], batch size: 9, lr: 2.50e-02 +2024-08-06 07:07:20,576 INFO [trainer.py:765] (7/8) Epoch 2, batch 2400, train_loss[loss=4.601, NarTop10Accuracy=0.3828, over 5815.00 frames. ], tot_loss[loss=4.789, NarTop10Accuracy=0.3562, over 5870.12 frames. ], batch size: 8, lr: 2.49e-02 +2024-08-06 07:07:47,111 INFO [trainer.py:765] (7/8) Epoch 2, batch 2500, train_loss[loss=4.766, NarTop10Accuracy=0.3614, over 4966.00 frames. ], tot_loss[loss=4.751, NarTop10Accuracy=0.3638, over 5520.04 frames. ], batch size: 6, lr: 2.47e-02 +2024-08-06 07:08:08,305 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 07:09:08,538 INFO [trainer.py:765] (7/8) Epoch 3, batch 100, train_loss[loss=4.985, NarTop10Accuracy=0.3241, over 7128.00 frames. ], tot_loss[loss=4.642, NarTop10Accuracy=0.3869, over 2379.07 frames. ], batch size: 30, lr: 2.35e-02 +2024-08-06 07:09:41,500 INFO [trainer.py:765] (7/8) Epoch 3, batch 200, train_loss[loss=4.39, NarTop10Accuracy=0.445, over 7129.00 frames. ], tot_loss[loss=4.613, NarTop10Accuracy=0.3925, over 3878.85 frames. ], batch size: 18, lr: 2.34e-02 +2024-08-06 07:10:16,976 INFO [trainer.py:765] (7/8) Epoch 3, batch 300, train_loss[loss=4.528, NarTop10Accuracy=0.419, over 7180.00 frames. ], tot_loss[loss=4.596, NarTop10Accuracy=0.3945, over 4690.66 frames. ], batch size: 22, lr: 2.33e-02 +2024-08-06 07:10:49,792 INFO [trainer.py:765] (7/8) Epoch 3, batch 400, train_loss[loss=4.54, NarTop10Accuracy=0.4043, over 5080.00 frames. ], tot_loss[loss=4.574, NarTop10Accuracy=0.3988, over 5141.40 frames. ], batch size: 7, lr: 2.32e-02 +2024-08-06 07:11:18,179 INFO [trainer.py:765] (7/8) Epoch 3, batch 500, train_loss[loss=4.87, NarTop10Accuracy=0.3496, over 6179.00 frames. ], tot_loss[loss=4.566, NarTop10Accuracy=0.4005, over 5411.29 frames. ], batch size: 11, lr: 2.31e-02 +2024-08-06 07:11:51,262 INFO [trainer.py:765] (7/8) Epoch 3, batch 600, train_loss[loss=4.538, NarTop10Accuracy=0.4065, over 5667.00 frames. ], tot_loss[loss=4.556, NarTop10Accuracy=0.4024, over 5673.68 frames. ], batch size: 9, lr: 2.30e-02 +2024-08-06 07:12:32,101 INFO [trainer.py:765] (7/8) Epoch 3, batch 700, train_loss[loss=4.574, NarTop10Accuracy=0.3809, over 5023.00 frames. ], tot_loss[loss=4.542, NarTop10Accuracy=0.4051, over 5740.76 frames. ], batch size: 6, lr: 2.29e-02 +2024-08-06 07:13:01,919 INFO [trainer.py:765] (7/8) Epoch 3, batch 800, train_loss[loss=4.358, NarTop10Accuracy=0.4342, over 5065.00 frames. ], tot_loss[loss=4.531, NarTop10Accuracy=0.4068, over 5806.32 frames. ], batch size: 6, lr: 2.27e-02 +2024-08-06 07:13:12,668 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 07:13:22,883 INFO [trainer.py:811] (7/8) Epoch 3, validation: loss=4.43, NarTop10Accuracy=0.4285, over 1907754.00 frames. +2024-08-06 07:13:22,884 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 27258MB +2024-08-06 07:13:23,429 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 6.823e+01 1.318e+02 1.583e+02 1.978e+02 8.364e+02, threshold=3.166e+02, percent-clipped=5.2 +2024-08-06 07:13:42,435 INFO [trainer.py:765] (7/8) Epoch 3, batch 900, train_loss[loss=4.392, NarTop10Accuracy=0.4346, over 6197.00 frames. ], tot_loss[loss=4.515, NarTop10Accuracy=0.4098, over 5814.88 frames. ], batch size: 13, lr: 2.26e-02 +2024-08-06 07:14:25,627 INFO [trainer.py:765] (7/8) Epoch 3, batch 1000, train_loss[loss=4.378, NarTop10Accuracy=0.4395, over 6632.00 frames. ], tot_loss[loss=4.497, NarTop10Accuracy=0.4136, over 5928.97 frames. ], batch size: 14, lr: 2.25e-02 +2024-08-06 07:14:56,325 INFO [trainer.py:765] (7/8) Epoch 3, batch 1100, train_loss[loss=4.364, NarTop10Accuracy=0.4405, over 6858.00 frames. ], tot_loss[loss=4.476, NarTop10Accuracy=0.4175, over 5948.00 frames. ], batch size: 17, lr: 2.24e-02 +2024-08-06 07:15:29,867 INFO [trainer.py:765] (7/8) Epoch 3, batch 1200, train_loss[loss=4.467, NarTop10Accuracy=0.426, over 7398.00 frames. ], tot_loss[loss=4.473, NarTop10Accuracy=0.4183, over 5950.21 frames. ], batch size: 30, lr: 2.23e-02 +2024-08-06 07:16:12,665 INFO [trainer.py:765] (7/8) Epoch 3, batch 1300, train_loss[loss=4.388, NarTop10Accuracy=0.4267, over 5082.00 frames. ], tot_loss[loss=4.464, NarTop10Accuracy=0.4201, over 6015.51 frames. ], batch size: 6, lr: 2.22e-02 +2024-08-06 07:16:42,204 INFO [trainer.py:765] (7/8) Epoch 3, batch 1400, train_loss[loss=4.184, NarTop10Accuracy=0.4692, over 6190.00 frames. ], tot_loss[loss=4.45, NarTop10Accuracy=0.4225, over 6028.38 frames. ], batch size: 11, lr: 2.21e-02 +2024-08-06 07:17:10,664 INFO [trainer.py:765] (7/8) Epoch 3, batch 1500, train_loss[loss=4.713, NarTop10Accuracy=0.3733, over 6046.00 frames. ], tot_loss[loss=4.443, NarTop10Accuracy=0.4243, over 5966.98 frames. ], batch size: 49, lr: 2.20e-02 +2024-08-06 07:17:38,769 INFO [trainer.py:765] (7/8) Epoch 3, batch 1600, train_loss[loss=4.347, NarTop10Accuracy=0.4444, over 7020.00 frames. ], tot_loss[loss=4.43, NarTop10Accuracy=0.4265, over 5955.61 frames. ], batch size: 22, lr: 2.19e-02 +2024-08-06 07:18:05,504 INFO [trainer.py:765] (7/8) Epoch 3, batch 1700, train_loss[loss=4.245, NarTop10Accuracy=0.4627, over 6239.00 frames. ], tot_loss[loss=4.404, NarTop10Accuracy=0.4317, over 5940.90 frames. ], batch size: 13, lr: 2.18e-02 +2024-08-06 07:18:32,161 INFO [trainer.py:765] (7/8) Epoch 3, batch 1800, train_loss[loss=4.269, NarTop10Accuracy=0.4603, over 7115.00 frames. ], tot_loss[loss=4.386, NarTop10Accuracy=0.4347, over 6013.03 frames. ], batch size: 22, lr: 2.17e-02 +2024-08-06 07:19:01,959 INFO [trainer.py:765] (7/8) Epoch 3, batch 1900, train_loss[loss=4.579, NarTop10Accuracy=0.4111, over 6031.00 frames. ], tot_loss[loss=4.373, NarTop10Accuracy=0.4373, over 6024.38 frames. ], batch size: 50, lr: 2.16e-02 +2024-08-06 07:19:27,622 INFO [trainer.py:765] (7/8) Epoch 3, batch 2000, train_loss[loss=4.471, NarTop10Accuracy=0.4184, over 6344.00 frames. ], tot_loss[loss=4.349, NarTop10Accuracy=0.4418, over 6016.17 frames. ], batch size: 48, lr: 2.15e-02 +2024-08-06 07:19:53,071 INFO [trainer.py:765] (7/8) Epoch 3, batch 2100, train_loss[loss=4.338, NarTop10Accuracy=0.4443, over 4931.00 frames. ], tot_loss[loss=4.322, NarTop10Accuracy=0.4474, over 6013.21 frames. ], batch size: 5, lr: 2.14e-02 +2024-08-06 07:20:18,554 INFO [trainer.py:765] (7/8) Epoch 3, batch 2200, train_loss[loss=4.499, NarTop10Accuracy=0.4076, over 7273.00 frames. ], tot_loss[loss=4.31, NarTop10Accuracy=0.4496, over 6044.34 frames. ], batch size: 31, lr: 2.13e-02 +2024-08-06 07:20:44,051 INFO [trainer.py:765] (7/8) Epoch 3, batch 2300, train_loss[loss=4.128, NarTop10Accuracy=0.4821, over 5852.00 frames. ], tot_loss[loss=4.318, NarTop10Accuracy=0.448, over 6070.19 frames. ], batch size: 9, lr: 2.12e-02 +2024-08-06 07:21:08,678 INFO [trainer.py:765] (7/8) Epoch 3, batch 2400, train_loss[loss=4.067, NarTop10Accuracy=0.5078, over 5224.00 frames. ], tot_loss[loss=4.315, NarTop10Accuracy=0.4491, over 5887.60 frames. ], batch size: 7, lr: 2.11e-02 +2024-08-06 07:21:32,172 INFO [trainer.py:765] (7/8) Epoch 3, batch 2500, train_loss[loss=4.317, NarTop10Accuracy=0.4515, over 4989.00 frames. ], tot_loss[loss=4.263, NarTop10Accuracy=0.459, over 5549.61 frames. ], batch size: 6, lr: 2.10e-02 +2024-08-06 07:21:53,066 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 07:23:00,977 INFO [trainer.py:765] (7/8) Epoch 4, batch 100, train_loss[loss=4.08, NarTop10Accuracy=0.4939, over 7327.00 frames. ], tot_loss[loss=4.185, NarTop10Accuracy=0.4757, over 2374.51 frames. ], batch size: 31, lr: 1.97e-02 +2024-08-06 07:23:33,302 INFO [trainer.py:765] (7/8) Epoch 4, batch 200, train_loss[loss=4.13, NarTop10Accuracy=0.4895, over 6757.00 frames. ], tot_loss[loss=4.184, NarTop10Accuracy=0.4764, over 3863.88 frames. ], batch size: 17, lr: 1.96e-02 +2024-08-06 07:23:51,465 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 07:24:01,517 INFO [trainer.py:811] (7/8) Epoch 4, validation: loss=4.035, NarTop10Accuracy=0.5085, over 1907754.00 frames. +2024-08-06 07:24:01,517 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29812MB +2024-08-06 07:24:02,097 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 9.910e+01 1.530e+02 1.750e+02 2.064e+02 5.317e+02, threshold=3.500e+02, percent-clipped=3.3 +2024-08-06 07:24:14,362 INFO [trainer.py:765] (7/8) Epoch 4, batch 300, train_loss[loss=4.075, NarTop10Accuracy=0.4981, over 7173.00 frames. ], tot_loss[loss=4.171, NarTop10Accuracy=0.4794, over 4676.22 frames. ], batch size: 22, lr: 1.95e-02 +2024-08-06 07:24:53,596 INFO [trainer.py:765] (7/8) Epoch 4, batch 400, train_loss[loss=3.835, NarTop10Accuracy=0.5546, over 5075.00 frames. ], tot_loss[loss=4.177, NarTop10Accuracy=0.4774, over 5113.79 frames. ], batch size: 7, lr: 1.94e-02 +2024-08-06 07:25:25,294 INFO [trainer.py:765] (7/8) Epoch 4, batch 500, train_loss[loss=4.167, NarTop10Accuracy=0.4657, over 6203.00 frames. ], tot_loss[loss=4.169, NarTop10Accuracy=0.479, over 5400.08 frames. ], batch size: 11, lr: 1.93e-02 +2024-08-06 07:25:56,975 INFO [trainer.py:765] (7/8) Epoch 4, batch 600, train_loss[loss=4.253, NarTop10Accuracy=0.4621, over 5762.00 frames. ], tot_loss[loss=4.152, NarTop10Accuracy=0.4818, over 5678.14 frames. ], batch size: 9, lr: 1.92e-02 +2024-08-06 07:26:37,606 INFO [trainer.py:765] (7/8) Epoch 4, batch 700, train_loss[loss=4.29, NarTop10Accuracy=0.4601, over 5147.00 frames. ], tot_loss[loss=4.154, NarTop10Accuracy=0.4815, over 5736.75 frames. ], batch size: 6, lr: 1.92e-02 +2024-08-06 07:27:07,432 INFO [trainer.py:765] (7/8) Epoch 4, batch 800, train_loss[loss=4.269, NarTop10Accuracy=0.4523, over 4980.00 frames. ], tot_loss[loss=4.146, NarTop10Accuracy=0.4834, over 5798.51 frames. ], batch size: 6, lr: 1.91e-02 +2024-08-06 07:27:42,041 INFO [trainer.py:765] (7/8) Epoch 4, batch 900, train_loss[loss=4.106, NarTop10Accuracy=0.4945, over 6261.00 frames. ], tot_loss[loss=4.107, NarTop10Accuracy=0.491, over 5843.24 frames. ], batch size: 13, lr: 1.90e-02 +2024-08-06 07:28:20,670 INFO [trainer.py:765] (7/8) Epoch 4, batch 1000, train_loss[loss=4.141, NarTop10Accuracy=0.5033, over 6243.00 frames. ], tot_loss[loss=4.106, NarTop10Accuracy=0.4915, over 5930.43 frames. ], batch size: 13, lr: 1.89e-02 +2024-08-06 07:28:54,070 INFO [trainer.py:765] (7/8) Epoch 4, batch 1100, train_loss[loss=3.874, NarTop10Accuracy=0.5416, over 6730.00 frames. ], tot_loss[loss=4.108, NarTop10Accuracy=0.4912, over 5970.19 frames. ], batch size: 17, lr: 1.88e-02 +2024-08-06 07:29:29,598 INFO [trainer.py:765] (7/8) Epoch 4, batch 1200, train_loss[loss=4.373, NarTop10Accuracy=0.4389, over 7215.00 frames. ], tot_loss[loss=4.101, NarTop10Accuracy=0.4927, over 5960.19 frames. ], batch size: 30, lr: 1.87e-02 +2024-08-06 07:30:04,991 INFO [trainer.py:765] (7/8) Epoch 4, batch 1300, train_loss[loss=3.651, NarTop10Accuracy=0.5735, over 4361.00 frames. ], tot_loss[loss=4.072, NarTop10Accuracy=0.498, over 6014.04 frames. ], batch size: 5, lr: 1.87e-02 +2024-08-06 07:30:43,379 INFO [trainer.py:765] (7/8) Epoch 4, batch 1400, train_loss[loss=4.086, NarTop10Accuracy=0.4994, over 6207.00 frames. ], tot_loss[loss=4.072, NarTop10Accuracy=0.498, over 6007.50 frames. ], batch size: 11, lr: 1.86e-02 +2024-08-06 07:31:11,831 INFO [trainer.py:765] (7/8) Epoch 4, batch 1500, train_loss[loss=3.995, NarTop10Accuracy=0.5169, over 6574.00 frames. ], tot_loss[loss=4.073, NarTop10Accuracy=0.4979, over 5945.61 frames. ], batch size: 49, lr: 1.85e-02 +2024-08-06 07:31:39,960 INFO [trainer.py:765] (7/8) Epoch 4, batch 1600, train_loss[loss=4.074, NarTop10Accuracy=0.4962, over 7026.00 frames. ], tot_loss[loss=4.071, NarTop10Accuracy=0.4983, over 5957.67 frames. ], batch size: 22, lr: 1.84e-02 +2024-08-06 07:32:06,854 INFO [trainer.py:765] (7/8) Epoch 4, batch 1700, train_loss[loss=4.437, NarTop10Accuracy=0.4337, over 6286.00 frames. ], tot_loss[loss=4.055, NarTop10Accuracy=0.5021, over 5931.01 frames. ], batch size: 13, lr: 1.84e-02 +2024-08-06 07:32:33,483 INFO [trainer.py:765] (7/8) Epoch 4, batch 1800, train_loss[loss=4.169, NarTop10Accuracy=0.4852, over 7066.00 frames. ], tot_loss[loss=4.036, NarTop10Accuracy=0.5053, over 6012.30 frames. ], batch size: 22, lr: 1.83e-02 +2024-08-06 07:33:00,194 INFO [trainer.py:765] (7/8) Epoch 4, batch 1900, train_loss[loss=4.368, NarTop10Accuracy=0.439, over 6574.00 frames. ], tot_loss[loss=4.055, NarTop10Accuracy=0.5019, over 6033.83 frames. ], batch size: 49, lr: 1.82e-02 +2024-08-06 07:33:25,990 INFO [trainer.py:765] (7/8) Epoch 4, batch 2000, train_loss[loss=4.502, NarTop10Accuracy=0.4138, over 6283.00 frames. ], tot_loss[loss=4.043, NarTop10Accuracy=0.5042, over 6007.79 frames. ], batch size: 50, lr: 1.81e-02 +2024-08-06 07:33:51,512 INFO [trainer.py:765] (7/8) Epoch 4, batch 2100, train_loss[loss=3.548, NarTop10Accuracy=0.6004, over 4043.00 frames. ], tot_loss[loss=4.026, NarTop10Accuracy=0.5077, over 5986.22 frames. ], batch size: 4, lr: 1.81e-02 +2024-08-06 07:34:16,906 INFO [trainer.py:765] (7/8) Epoch 4, batch 2200, train_loss[loss=4.184, NarTop10Accuracy=0.4836, over 7296.00 frames. ], tot_loss[loss=4.028, NarTop10Accuracy=0.5071, over 6015.50 frames. ], batch size: 31, lr: 1.80e-02 +2024-08-06 07:34:31,431 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 07:34:41,462 INFO [trainer.py:811] (7/8) Epoch 4, validation: loss=3.858, NarTop10Accuracy=0.5445, over 1907754.00 frames. +2024-08-06 07:34:41,463 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29812MB +2024-08-06 07:34:41,980 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.230e+02 1.721e+02 1.919e+02 2.225e+02 9.682e+02, threshold=3.839e+02, percent-clipped=2.3 +2024-08-06 07:34:52,441 INFO [trainer.py:765] (7/8) Epoch 4, batch 2300, train_loss[loss=3.731, NarTop10Accuracy=0.5612, over 5733.00 frames. ], tot_loss[loss=4.028, NarTop10Accuracy=0.5077, over 6041.66 frames. ], batch size: 9, lr: 1.79e-02 +2024-08-06 07:35:17,166 INFO [trainer.py:765] (7/8) Epoch 4, batch 2400, train_loss[loss=3.804, NarTop10Accuracy=0.5512, over 5263.00 frames. ], tot_loss[loss=4.02, NarTop10Accuracy=0.5094, over 5844.65 frames. ], batch size: 7, lr: 1.78e-02 +2024-08-06 07:35:40,627 INFO [trainer.py:765] (7/8) Epoch 4, batch 2500, train_loss[loss=4.35, NarTop10Accuracy=0.4388, over 5086.00 frames. ], tot_loss[loss=4.011, NarTop10Accuracy=0.5109, over 5511.42 frames. ], batch size: 6, lr: 1.78e-02 +2024-08-06 07:36:01,683 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 07:37:02,524 INFO [trainer.py:765] (7/8) Epoch 5, batch 100, train_loss[loss=3.896, NarTop10Accuracy=0.5333, over 7456.00 frames. ], tot_loss[loss=3.953, NarTop10Accuracy=0.5248, over 2369.15 frames. ], batch size: 31, lr: 1.66e-02 +2024-08-06 07:37:39,814 INFO [trainer.py:765] (7/8) Epoch 5, batch 200, train_loss[loss=4.072, NarTop10Accuracy=0.4942, over 6916.00 frames. ], tot_loss[loss=3.937, NarTop10Accuracy=0.5271, over 3869.63 frames. ], batch size: 17, lr: 1.65e-02 +2024-08-06 07:38:13,471 INFO [trainer.py:765] (7/8) Epoch 5, batch 300, train_loss[loss=4.165, NarTop10Accuracy=0.4827, over 7109.00 frames. ], tot_loss[loss=3.918, NarTop10Accuracy=0.5312, over 4663.95 frames. ], batch size: 22, lr: 1.65e-02 +2024-08-06 07:38:42,429 INFO [trainer.py:765] (7/8) Epoch 5, batch 400, train_loss[loss=3.998, NarTop10Accuracy=0.5111, over 5194.00 frames. ], tot_loss[loss=3.919, NarTop10Accuracy=0.5305, over 5123.34 frames. ], batch size: 7, lr: 1.64e-02 +2024-08-06 07:39:17,020 INFO [trainer.py:765] (7/8) Epoch 5, batch 500, train_loss[loss=3.856, NarTop10Accuracy=0.5491, over 6229.00 frames. ], tot_loss[loss=3.928, NarTop10Accuracy=0.5292, over 5403.17 frames. ], batch size: 11, lr: 1.63e-02 +2024-08-06 07:39:51,943 INFO [trainer.py:765] (7/8) Epoch 5, batch 600, train_loss[loss=3.941, NarTop10Accuracy=0.5231, over 5734.00 frames. ], tot_loss[loss=3.909, NarTop10Accuracy=0.5326, over 5660.64 frames. ], batch size: 9, lr: 1.63e-02 +2024-08-06 07:40:28,626 INFO [trainer.py:765] (7/8) Epoch 5, batch 700, train_loss[loss=3.761, NarTop10Accuracy=0.558, over 5006.00 frames. ], tot_loss[loss=3.91, NarTop10Accuracy=0.5329, over 5733.54 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:02,366 INFO [trainer.py:765] (7/8) Epoch 5, batch 800, train_loss[loss=4.067, NarTop10Accuracy=0.4919, over 5170.00 frames. ], tot_loss[loss=3.909, NarTop10Accuracy=0.5326, over 5800.67 frames. ], batch size: 6, lr: 1.62e-02 +2024-08-06 07:41:37,937 INFO [trainer.py:765] (7/8) Epoch 5, batch 900, train_loss[loss=4.137, NarTop10Accuracy=0.4796, over 6678.00 frames. ], tot_loss[loss=3.899, NarTop10Accuracy=0.5344, over 5819.36 frames. ], batch size: 14, lr: 1.61e-02 +2024-08-06 07:42:13,846 INFO [trainer.py:765] (7/8) Epoch 5, batch 1000, train_loss[loss=3.949, NarTop10Accuracy=0.5284, over 6713.00 frames. ], tot_loss[loss=3.891, NarTop10Accuracy=0.5358, over 5905.95 frames. ], batch size: 14, lr: 1.60e-02 +2024-08-06 07:42:46,468 INFO [trainer.py:765] (7/8) Epoch 5, batch 1100, train_loss[loss=3.886, NarTop10Accuracy=0.5328, over 6824.00 frames. ], tot_loss[loss=3.894, NarTop10Accuracy=0.5347, over 5964.76 frames. ], batch size: 17, lr: 1.60e-02 +2024-08-06 07:43:25,225 INFO [trainer.py:765] (7/8) Epoch 5, batch 1200, train_loss[loss=4.109, NarTop10Accuracy=0.4808, over 7115.00 frames. ], tot_loss[loss=3.9, NarTop10Accuracy=0.534, over 5944.17 frames. ], batch size: 30, lr: 1.59e-02 +2024-08-06 07:44:00,556 INFO [trainer.py:765] (7/8) Epoch 5, batch 1300, train_loss[loss=3.727, NarTop10Accuracy=0.5467, over 5047.00 frames. ], tot_loss[loss=3.894, NarTop10Accuracy=0.5349, over 6009.55 frames. ], batch size: 6, lr: 1.59e-02 +2024-08-06 07:44:30,238 INFO [trainer.py:765] (7/8) Epoch 5, batch 1400, train_loss[loss=3.608, NarTop10Accuracy=0.5822, over 6168.00 frames. ], tot_loss[loss=3.892, NarTop10Accuracy=0.5355, over 6037.83 frames. ], batch size: 11, lr: 1.58e-02 +2024-08-06 07:45:02,845 INFO [trainer.py:765] (7/8) Epoch 5, batch 1500, train_loss[loss=3.829, NarTop10Accuracy=0.5504, over 5976.00 frames. ], tot_loss[loss=3.9, NarTop10Accuracy=0.5346, over 5973.82 frames. ], batch size: 48, lr: 1.57e-02 +2024-08-06 07:45:31,008 INFO [trainer.py:765] (7/8) Epoch 5, batch 1600, train_loss[loss=4.14, NarTop10Accuracy=0.4912, over 7106.00 frames. ], tot_loss[loss=3.905, NarTop10Accuracy=0.5336, over 5938.07 frames. ], batch size: 22, lr: 1.57e-02 +2024-08-06 07:45:51,057 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 07:46:01,621 INFO [trainer.py:811] (7/8) Epoch 5, validation: loss=3.749, NarTop10Accuracy=0.5672, over 1907754.00 frames. +2024-08-06 07:46:01,622 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 29812MB +2024-08-06 07:46:02,123 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.160e+02 1.669e+02 1.884e+02 2.190e+02 6.243e+02, threshold=3.768e+02, percent-clipped=1.8 +2024-08-06 07:46:08,362 INFO [trainer.py:765] (7/8) Epoch 5, batch 1700, train_loss[loss=3.751, NarTop10Accuracy=0.5712, over 6291.00 frames. ], tot_loss[loss=3.892, NarTop10Accuracy=0.5361, over 5941.99 frames. ], batch size: 13, lr: 1.56e-02 +2024-08-06 07:46:34,967 INFO [trainer.py:765] (7/8) Epoch 5, batch 1800, train_loss[loss=3.88, NarTop10Accuracy=0.5412, over 7119.00 frames. ], tot_loss[loss=3.887, NarTop10Accuracy=0.5371, over 6012.40 frames. ], batch size: 22, lr: 1.56e-02 +2024-08-06 07:47:01,490 INFO [trainer.py:765] (7/8) Epoch 5, batch 1900, train_loss[loss=3.952, NarTop10Accuracy=0.5361, over 6162.00 frames. ], tot_loss[loss=3.9, NarTop10Accuracy=0.5347, over 6044.25 frames. ], batch size: 48, lr: 1.55e-02 +2024-08-06 07:47:27,147 INFO [trainer.py:765] (7/8) Epoch 5, batch 2000, train_loss[loss=3.789, NarTop10Accuracy=0.5567, over 6156.00 frames. ], tot_loss[loss=3.895, NarTop10Accuracy=0.5355, over 6021.95 frames. ], batch size: 50, lr: 1.55e-02 +2024-08-06 07:47:52,619 INFO [trainer.py:765] (7/8) Epoch 5, batch 2100, train_loss[loss=4.122, NarTop10Accuracy=0.4867, over 3881.00 frames. ], tot_loss[loss=3.896, NarTop10Accuracy=0.5348, over 5998.50 frames. ], batch size: 4, lr: 1.54e-02 +2024-08-06 07:48:17,993 INFO [trainer.py:765] (7/8) Epoch 5, batch 2200, train_loss[loss=3.941, NarTop10Accuracy=0.5278, over 7287.00 frames. ], tot_loss[loss=3.886, NarTop10Accuracy=0.5371, over 6046.26 frames. ], batch size: 30, lr: 1.54e-02 +2024-08-06 07:48:43,422 INFO [trainer.py:765] (7/8) Epoch 5, batch 2300, train_loss[loss=3.808, NarTop10Accuracy=0.5442, over 5840.00 frames. ], tot_loss[loss=3.897, NarTop10Accuracy=0.5359, over 6065.24 frames. ], batch size: 9, lr: 1.53e-02 +2024-08-06 07:49:08,170 INFO [trainer.py:765] (7/8) Epoch 5, batch 2400, train_loss[loss=3.759, NarTop10Accuracy=0.5711, over 5789.00 frames. ], tot_loss[loss=3.89, NarTop10Accuracy=0.537, over 5874.74 frames. ], batch size: 8, lr: 1.53e-02 +2024-08-06 07:49:31,645 INFO [trainer.py:765] (7/8) Epoch 5, batch 2500, train_loss[loss=3.804, NarTop10Accuracy=0.5494, over 5021.00 frames. ], tot_loss[loss=3.849, NarTop10Accuracy=0.5451, over 5543.48 frames. ], batch size: 6, lr: 1.52e-02 +2024-08-06 07:49:53,636 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 07:50:58,969 INFO [trainer.py:765] (7/8) Epoch 6, batch 100, train_loss[loss=3.658, NarTop10Accuracy=0.5838, over 7086.00 frames. ], tot_loss[loss=3.797, NarTop10Accuracy=0.5566, over 2373.36 frames. ], batch size: 30, lr: 1.42e-02 +2024-08-06 07:51:31,789 INFO [trainer.py:765] (7/8) Epoch 6, batch 200, train_loss[loss=3.585, NarTop10Accuracy=0.6041, over 6847.00 frames. ], tot_loss[loss=3.796, NarTop10Accuracy=0.5567, over 3880.62 frames. ], batch size: 17, lr: 1.42e-02 +2024-08-06 07:52:04,696 INFO [trainer.py:765] (7/8) Epoch 6, batch 300, train_loss[loss=3.643, NarTop10Accuracy=0.5907, over 7353.00 frames. ], tot_loss[loss=3.795, NarTop10Accuracy=0.5566, over 4677.91 frames. ], batch size: 22, lr: 1.41e-02 +2024-08-06 07:52:36,200 INFO [trainer.py:765] (7/8) Epoch 6, batch 400, train_loss[loss=3.739, NarTop10Accuracy=0.5632, over 5246.00 frames. ], tot_loss[loss=3.785, NarTop10Accuracy=0.5584, over 5126.71 frames. ], batch size: 7, lr: 1.41e-02 +2024-08-06 07:53:06,102 INFO [trainer.py:765] (7/8) Epoch 6, batch 500, train_loss[loss=3.874, NarTop10Accuracy=0.5446, over 6087.00 frames. ], tot_loss[loss=3.767, NarTop10Accuracy=0.562, over 5407.60 frames. ], batch size: 11, lr: 1.40e-02 +2024-08-06 07:53:43,286 INFO [trainer.py:765] (7/8) Epoch 6, batch 600, train_loss[loss=3.602, NarTop10Accuracy=0.5951, over 5826.00 frames. ], tot_loss[loss=3.778, NarTop10Accuracy=0.5601, over 5685.21 frames. ], batch size: 9, lr: 1.40e-02 +2024-08-06 07:54:15,439 INFO [trainer.py:765] (7/8) Epoch 6, batch 700, train_loss[loss=3.825, NarTop10Accuracy=0.5488, over 5158.00 frames. ], tot_loss[loss=3.794, NarTop10Accuracy=0.557, over 5741.38 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:54:49,526 INFO [trainer.py:765] (7/8) Epoch 6, batch 800, train_loss[loss=3.69, NarTop10Accuracy=0.5703, over 5125.00 frames. ], tot_loss[loss=3.803, NarTop10Accuracy=0.5552, over 5795.00 frames. ], batch size: 6, lr: 1.39e-02 +2024-08-06 07:55:21,984 INFO [trainer.py:765] (7/8) Epoch 6, batch 900, train_loss[loss=3.447, NarTop10Accuracy=0.6226, over 6304.00 frames. ], tot_loss[loss=3.795, NarTop10Accuracy=0.5568, over 5810.47 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 07:56:00,804 INFO [trainer.py:765] (7/8) Epoch 6, batch 1000, train_loss[loss=3.684, NarTop10Accuracy=0.581, over 6328.00 frames. ], tot_loss[loss=3.81, NarTop10Accuracy=0.5534, over 5911.90 frames. ], batch size: 13, lr: 1.38e-02 +2024-08-06 07:56:34,171 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 07:56:44,742 INFO [trainer.py:811] (7/8) Epoch 6, validation: loss=3.634, NarTop10Accuracy=0.5919, over 1907754.00 frames. +2024-08-06 07:56:44,743 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 07:56:45,276 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.300e+02 1.714e+02 1.918e+02 2.211e+02 6.360e+02, threshold=3.836e+02, percent-clipped=1.6 +2024-08-06 07:56:46,639 INFO [trainer.py:765] (7/8) Epoch 6, batch 1100, train_loss[loss=3.641, NarTop10Accuracy=0.5945, over 7089.00 frames. ], tot_loss[loss=3.802, NarTop10Accuracy=0.5549, over 5948.59 frames. ], batch size: 18, lr: 1.37e-02 +2024-08-06 07:57:24,888 INFO [trainer.py:765] (7/8) Epoch 6, batch 1200, train_loss[loss=3.969, NarTop10Accuracy=0.5237, over 7049.00 frames. ], tot_loss[loss=3.796, NarTop10Accuracy=0.5559, over 5934.26 frames. ], batch size: 31, lr: 1.37e-02 +2024-08-06 07:57:56,612 INFO [trainer.py:765] (7/8) Epoch 6, batch 1300, train_loss[loss=3.396, NarTop10Accuracy=0.6285, over 5035.00 frames. ], tot_loss[loss=3.796, NarTop10Accuracy=0.556, over 6016.93 frames. ], batch size: 6, lr: 1.37e-02 +2024-08-06 07:58:30,736 INFO [trainer.py:765] (7/8) Epoch 6, batch 1400, train_loss[loss=3.948, NarTop10Accuracy=0.5121, over 6212.00 frames. ], tot_loss[loss=3.8, NarTop10Accuracy=0.5548, over 6032.90 frames. ], batch size: 11, lr: 1.36e-02 +2024-08-06 07:59:00,998 INFO [trainer.py:765] (7/8) Epoch 6, batch 1500, train_loss[loss=4.076, NarTop10Accuracy=0.5032, over 6221.00 frames. ], tot_loss[loss=3.803, NarTop10Accuracy=0.5545, over 5970.17 frames. ], batch size: 49, lr: 1.36e-02 +2024-08-06 07:59:28,933 INFO [trainer.py:765] (7/8) Epoch 6, batch 1600, train_loss[loss=3.798, NarTop10Accuracy=0.5621, over 7216.00 frames. ], tot_loss[loss=3.792, NarTop10Accuracy=0.5569, over 5962.67 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 07:59:55,617 INFO [trainer.py:765] (7/8) Epoch 6, batch 1700, train_loss[loss=3.812, NarTop10Accuracy=0.5498, over 6274.00 frames. ], tot_loss[loss=3.794, NarTop10Accuracy=0.556, over 5950.91 frames. ], batch size: 13, lr: 1.35e-02 +2024-08-06 08:00:22,187 INFO [trainer.py:765] (7/8) Epoch 6, batch 1800, train_loss[loss=3.641, NarTop10Accuracy=0.6005, over 7211.00 frames. ], tot_loss[loss=3.79, NarTop10Accuracy=0.5571, over 6019.67 frames. ], batch size: 22, lr: 1.35e-02 +2024-08-06 08:00:48,794 INFO [trainer.py:765] (7/8) Epoch 6, batch 1900, train_loss[loss=4.073, NarTop10Accuracy=0.5025, over 6343.00 frames. ], tot_loss[loss=3.82, NarTop10Accuracy=0.5511, over 6062.80 frames. ], batch size: 49, lr: 1.34e-02 +2024-08-06 08:01:14,461 INFO [trainer.py:765] (7/8) Epoch 6, batch 2000, train_loss[loss=3.973, NarTop10Accuracy=0.5187, over 6041.00 frames. ], tot_loss[loss=3.808, NarTop10Accuracy=0.5537, over 6023.28 frames. ], batch size: 49, lr: 1.34e-02 +2024-08-06 08:01:43,134 INFO [trainer.py:765] (7/8) Epoch 6, batch 2100, train_loss[loss=3.785, NarTop10Accuracy=0.5659, over 4767.00 frames. ], tot_loss[loss=3.799, NarTop10Accuracy=0.5553, over 6010.85 frames. ], batch size: 5, lr: 1.33e-02 +2024-08-06 08:02:08,518 INFO [trainer.py:765] (7/8) Epoch 6, batch 2200, train_loss[loss=3.736, NarTop10Accuracy=0.5696, over 6987.00 frames. ], tot_loss[loss=3.805, NarTop10Accuracy=0.5534, over 6044.40 frames. ], batch size: 30, lr: 1.33e-02 +2024-08-06 08:02:33,916 INFO [trainer.py:765] (7/8) Epoch 6, batch 2300, train_loss[loss=3.465, NarTop10Accuracy=0.6283, over 5687.00 frames. ], tot_loss[loss=3.796, NarTop10Accuracy=0.5557, over 6056.31 frames. ], batch size: 9, lr: 1.33e-02 +2024-08-06 08:02:58,616 INFO [trainer.py:765] (7/8) Epoch 6, batch 2400, train_loss[loss=3.674, NarTop10Accuracy=0.5813, over 5778.00 frames. ], tot_loss[loss=3.802, NarTop10Accuracy=0.5551, over 5877.35 frames. ], batch size: 8, lr: 1.32e-02 +2024-08-06 08:03:21,939 INFO [trainer.py:765] (7/8) Epoch 6, batch 2500, train_loss[loss=4.258, NarTop10Accuracy=0.4589, over 4967.00 frames. ], tot_loss[loss=3.776, NarTop10Accuracy=0.5601, over 5529.54 frames. ], batch size: 6, lr: 1.32e-02 +2024-08-06 08:03:42,819 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 08:04:42,817 INFO [trainer.py:765] (7/8) Epoch 7, batch 100, train_loss[loss=3.806, NarTop10Accuracy=0.5501, over 7191.00 frames. ], tot_loss[loss=3.682, NarTop10Accuracy=0.5819, over 2370.80 frames. ], batch size: 30, lr: 1.23e-02 +2024-08-06 08:05:18,347 INFO [trainer.py:765] (7/8) Epoch 7, batch 200, train_loss[loss=3.923, NarTop10Accuracy=0.536, over 6722.00 frames. ], tot_loss[loss=3.707, NarTop10Accuracy=0.5758, over 3866.97 frames. ], batch size: 17, lr: 1.23e-02 +2024-08-06 08:05:46,773 INFO [trainer.py:765] (7/8) Epoch 7, batch 300, train_loss[loss=3.577, NarTop10Accuracy=0.5995, over 7224.00 frames. ], tot_loss[loss=3.716, NarTop10Accuracy=0.5737, over 4659.06 frames. ], batch size: 22, lr: 1.23e-02 +2024-08-06 08:06:22,091 INFO [trainer.py:765] (7/8) Epoch 7, batch 400, train_loss[loss=3.84, NarTop10Accuracy=0.5417, over 5065.00 frames. ], tot_loss[loss=3.713, NarTop10Accuracy=0.5736, over 5119.69 frames. ], batch size: 7, lr: 1.22e-02 +2024-08-06 08:06:52,315 INFO [trainer.py:765] (7/8) Epoch 7, batch 500, train_loss[loss=3.747, NarTop10Accuracy=0.5722, over 6187.00 frames. ], tot_loss[loss=3.71, NarTop10Accuracy=0.5742, over 5389.99 frames. ], batch size: 11, lr: 1.22e-02 +2024-08-06 08:06:56,086 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 08:07:06,251 INFO [trainer.py:811] (7/8) Epoch 7, validation: loss=3.56, NarTop10Accuracy=0.6069, over 1907754.00 frames. +2024-08-06 08:07:06,252 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 08:07:06,837 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 1.760e+02 1.958e+02 2.227e+02 5.399e+02, threshold=3.916e+02, percent-clipped=0.8 +2024-08-06 08:07:33,151 INFO [trainer.py:765] (7/8) Epoch 7, batch 600, train_loss[loss=3.758, NarTop10Accuracy=0.5638, over 5817.00 frames. ], tot_loss[loss=3.713, NarTop10Accuracy=0.5735, over 5666.74 frames. ], batch size: 9, lr: 1.22e-02 +2024-08-06 08:08:11,332 INFO [trainer.py:765] (7/8) Epoch 7, batch 700, train_loss[loss=3.586, NarTop10Accuracy=0.6023, over 5026.00 frames. ], tot_loss[loss=3.717, NarTop10Accuracy=0.5729, over 5725.17 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 08:08:45,557 INFO [trainer.py:765] (7/8) Epoch 7, batch 800, train_loss[loss=3.489, NarTop10Accuracy=0.6108, over 4911.00 frames. ], tot_loss[loss=3.705, NarTop10Accuracy=0.5755, over 5783.17 frames. ], batch size: 6, lr: 1.21e-02 +2024-08-06 08:09:17,738 INFO [trainer.py:765] (7/8) Epoch 7, batch 900, train_loss[loss=3.73, NarTop10Accuracy=0.5565, over 6189.00 frames. ], tot_loss[loss=3.712, NarTop10Accuracy=0.5736, over 5813.42 frames. ], batch size: 13, lr: 1.21e-02 +2024-08-06 08:09:54,191 INFO [trainer.py:765] (7/8) Epoch 7, batch 1000, train_loss[loss=4.066, NarTop10Accuracy=0.4962, over 6198.00 frames. ], tot_loss[loss=3.719, NarTop10Accuracy=0.5716, over 5918.43 frames. ], batch size: 13, lr: 1.20e-02 +2024-08-06 08:10:29,570 INFO [trainer.py:765] (7/8) Epoch 7, batch 1100, train_loss[loss=3.774, NarTop10Accuracy=0.5623, over 6818.00 frames. ], tot_loss[loss=3.721, NarTop10Accuracy=0.5713, over 5939.49 frames. ], batch size: 17, lr: 1.20e-02 +2024-08-06 08:11:02,491 INFO [trainer.py:765] (7/8) Epoch 7, batch 1200, train_loss[loss=3.883, NarTop10Accuracy=0.5321, over 7122.00 frames. ], tot_loss[loss=3.722, NarTop10Accuracy=0.571, over 5929.66 frames. ], batch size: 30, lr: 1.20e-02 +2024-08-06 08:11:33,447 INFO [trainer.py:765] (7/8) Epoch 7, batch 1300, train_loss[loss=3.513, NarTop10Accuracy=0.6112, over 5031.00 frames. ], tot_loss[loss=3.72, NarTop10Accuracy=0.5714, over 6017.50 frames. ], batch size: 6, lr: 1.19e-02 +2024-08-06 08:12:10,912 INFO [trainer.py:765] (7/8) Epoch 7, batch 1400, train_loss[loss=3.722, NarTop10Accuracy=0.5768, over 6044.00 frames. ], tot_loss[loss=3.724, NarTop10Accuracy=0.5707, over 6029.10 frames. ], batch size: 11, lr: 1.19e-02 +2024-08-06 08:12:42,109 INFO [trainer.py:765] (7/8) Epoch 7, batch 1500, train_loss[loss=3.699, NarTop10Accuracy=0.5721, over 5986.00 frames. ], tot_loss[loss=3.715, NarTop10Accuracy=0.5723, over 5961.05 frames. ], batch size: 49, lr: 1.19e-02 +2024-08-06 08:13:13,237 INFO [trainer.py:765] (7/8) Epoch 7, batch 1600, train_loss[loss=3.451, NarTop10Accuracy=0.6278, over 7097.00 frames. ], tot_loss[loss=3.713, NarTop10Accuracy=0.5733, over 5939.40 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:13:40,016 INFO [trainer.py:765] (7/8) Epoch 7, batch 1700, train_loss[loss=3.781, NarTop10Accuracy=0.562, over 6226.00 frames. ], tot_loss[loss=3.725, NarTop10Accuracy=0.5704, over 5926.32 frames. ], batch size: 13, lr: 1.18e-02 +2024-08-06 08:14:06,583 INFO [trainer.py:765] (7/8) Epoch 7, batch 1800, train_loss[loss=3.818, NarTop10Accuracy=0.5495, over 7275.00 frames. ], tot_loss[loss=3.723, NarTop10Accuracy=0.5709, over 5987.58 frames. ], batch size: 22, lr: 1.18e-02 +2024-08-06 08:14:33,223 INFO [trainer.py:765] (7/8) Epoch 7, batch 1900, train_loss[loss=4.042, NarTop10Accuracy=0.5026, over 6017.00 frames. ], tot_loss[loss=3.731, NarTop10Accuracy=0.5695, over 6036.99 frames. ], batch size: 48, lr: 1.17e-02 +2024-08-06 08:14:58,994 INFO [trainer.py:765] (7/8) Epoch 7, batch 2000, train_loss[loss=3.731, NarTop10Accuracy=0.5758, over 6014.00 frames. ], tot_loss[loss=3.724, NarTop10Accuracy=0.5708, over 6014.75 frames. ], batch size: 49, lr: 1.17e-02 +2024-08-06 08:15:24,423 INFO [trainer.py:765] (7/8) Epoch 7, batch 2100, train_loss[loss=4.092, NarTop10Accuracy=0.4895, over 3895.00 frames. ], tot_loss[loss=3.727, NarTop10Accuracy=0.5703, over 5996.93 frames. ], batch size: 4, lr: 1.17e-02 +2024-08-06 08:15:49,960 INFO [trainer.py:765] (7/8) Epoch 7, batch 2200, train_loss[loss=4.048, NarTop10Accuracy=0.5083, over 7238.00 frames. ], tot_loss[loss=3.73, NarTop10Accuracy=0.5694, over 6040.01 frames. ], batch size: 30, lr: 1.17e-02 +2024-08-06 08:16:15,489 INFO [trainer.py:765] (7/8) Epoch 7, batch 2300, train_loss[loss=3.924, NarTop10Accuracy=0.5368, over 5695.00 frames. ], tot_loss[loss=3.741, NarTop10Accuracy=0.5674, over 6065.94 frames. ], batch size: 9, lr: 1.16e-02 +2024-08-06 08:16:40,319 INFO [trainer.py:765] (7/8) Epoch 7, batch 2400, train_loss[loss=3.62, NarTop10Accuracy=0.5938, over 5211.00 frames. ], tot_loss[loss=3.736, NarTop10Accuracy=0.5681, over 5868.95 frames. ], batch size: 7, lr: 1.16e-02 +2024-08-06 08:17:03,739 INFO [trainer.py:765] (7/8) Epoch 7, batch 2500, train_loss[loss=3.559, NarTop10Accuracy=0.5981, over 4988.00 frames. ], tot_loss[loss=3.72, NarTop10Accuracy=0.5714, over 5534.23 frames. ], batch size: 6, lr: 1.16e-02 +2024-08-06 08:17:06,843 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 08:17:17,433 INFO [trainer.py:811] (7/8) Epoch 7, validation: loss=3.591, NarTop10Accuracy=0.6002, over 1907754.00 frames. +2024-08-06 08:17:17,433 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 08:17:17,902 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 1.794e+02 1.981e+02 2.246e+02 4.644e+02, threshold=3.962e+02, percent-clipped=1.0 +2024-08-06 08:17:35,187 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 08:18:36,193 INFO [trainer.py:765] (7/8) Epoch 8, batch 100, train_loss[loss=3.67, NarTop10Accuracy=0.5792, over 7733.00 frames. ], tot_loss[loss=3.657, NarTop10Accuracy=0.5866, over 2374.33 frames. ], batch size: 32, lr: 1.09e-02 +2024-08-06 08:19:15,020 INFO [trainer.py:765] (7/8) Epoch 8, batch 200, train_loss[loss=3.565, NarTop10Accuracy=0.6086, over 6789.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5831, over 3861.31 frames. ], batch size: 17, lr: 1.09e-02 +2024-08-06 08:19:43,561 INFO [trainer.py:765] (7/8) Epoch 8, batch 300, train_loss[loss=3.605, NarTop10Accuracy=0.5929, over 7225.00 frames. ], tot_loss[loss=3.67, NarTop10Accuracy=0.5839, over 4675.88 frames. ], batch size: 22, lr: 1.08e-02 +2024-08-06 08:20:16,269 INFO [trainer.py:765] (7/8) Epoch 8, batch 400, train_loss[loss=3.505, NarTop10Accuracy=0.6092, over 5022.00 frames. ], tot_loss[loss=3.66, NarTop10Accuracy=0.5856, over 5137.24 frames. ], batch size: 7, lr: 1.08e-02 +2024-08-06 08:20:48,421 INFO [trainer.py:765] (7/8) Epoch 8, batch 500, train_loss[loss=3.55, NarTop10Accuracy=0.6112, over 6254.00 frames. ], tot_loss[loss=3.656, NarTop10Accuracy=0.5861, over 5405.74 frames. ], batch size: 11, lr: 1.08e-02 +2024-08-06 08:21:23,737 INFO [trainer.py:765] (7/8) Epoch 8, batch 600, train_loss[loss=3.819, NarTop10Accuracy=0.5626, over 5731.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5828, over 5674.13 frames. ], batch size: 9, lr: 1.07e-02 +2024-08-06 08:21:57,607 INFO [trainer.py:765] (7/8) Epoch 8, batch 700, train_loss[loss=3.912, NarTop10Accuracy=0.5296, over 5106.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5821, over 5757.89 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:22:27,341 INFO [trainer.py:765] (7/8) Epoch 8, batch 800, train_loss[loss=3.477, NarTop10Accuracy=0.6236, over 4958.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.5818, over 5795.57 frames. ], batch size: 6, lr: 1.07e-02 +2024-08-06 08:23:06,892 INFO [trainer.py:765] (7/8) Epoch 8, batch 900, train_loss[loss=3.508, NarTop10Accuracy=0.613, over 6239.00 frames. ], tot_loss[loss=3.664, NarTop10Accuracy=0.5834, over 5810.42 frames. ], batch size: 13, lr: 1.07e-02 +2024-08-06 08:23:42,943 INFO [trainer.py:765] (7/8) Epoch 8, batch 1000, train_loss[loss=3.584, NarTop10Accuracy=0.598, over 6326.00 frames. ], tot_loss[loss=3.661, NarTop10Accuracy=0.584, over 5913.95 frames. ], batch size: 13, lr: 1.06e-02 +2024-08-06 08:24:15,105 INFO [trainer.py:765] (7/8) Epoch 8, batch 1100, train_loss[loss=3.719, NarTop10Accuracy=0.5742, over 6747.00 frames. ], tot_loss[loss=3.666, NarTop10Accuracy=0.5832, over 5947.22 frames. ], batch size: 17, lr: 1.06e-02 +2024-08-06 08:24:57,340 INFO [trainer.py:765] (7/8) Epoch 8, batch 1200, train_loss[loss=3.68, NarTop10Accuracy=0.5838, over 7155.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.582, over 5947.60 frames. ], batch size: 30, lr: 1.06e-02 +2024-08-06 08:25:26,604 INFO [trainer.py:765] (7/8) Epoch 8, batch 1300, train_loss[loss=3.739, NarTop10Accuracy=0.5714, over 5046.00 frames. ], tot_loss[loss=3.657, NarTop10Accuracy=0.5846, over 6014.80 frames. ], batch size: 6, lr: 1.06e-02 +2024-08-06 08:26:00,605 INFO [trainer.py:765] (7/8) Epoch 8, batch 1400, train_loss[loss=3.794, NarTop10Accuracy=0.5612, over 6176.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5816, over 6041.91 frames. ], batch size: 11, lr: 1.05e-02 +2024-08-06 08:26:28,987 INFO [trainer.py:765] (7/8) Epoch 8, batch 1500, train_loss[loss=3.631, NarTop10Accuracy=0.5902, over 6336.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5814, over 5972.08 frames. ], batch size: 50, lr: 1.05e-02 +2024-08-06 08:26:56,933 INFO [trainer.py:765] (7/8) Epoch 8, batch 1600, train_loss[loss=3.814, NarTop10Accuracy=0.5491, over 6923.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5818, over 5959.53 frames. ], batch size: 22, lr: 1.05e-02 +2024-08-06 08:27:23,763 INFO [trainer.py:765] (7/8) Epoch 8, batch 1700, train_loss[loss=3.399, NarTop10Accuracy=0.6283, over 6245.00 frames. ], tot_loss[loss=3.674, NarTop10Accuracy=0.5813, over 5932.74 frames. ], batch size: 13, lr: 1.05e-02 +2024-08-06 08:27:50,462 INFO [trainer.py:765] (7/8) Epoch 8, batch 1800, train_loss[loss=3.77, NarTop10Accuracy=0.5625, over 7148.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5821, over 5995.50 frames. ], batch size: 22, lr: 1.04e-02 +2024-08-06 08:28:17,180 INFO [trainer.py:765] (7/8) Epoch 8, batch 1900, train_loss[loss=4.125, NarTop10Accuracy=0.497, over 6194.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.5818, over 6042.00 frames. ], batch size: 50, lr: 1.04e-02 +2024-08-06 08:28:25,164 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 08:28:35,290 INFO [trainer.py:811] (7/8) Epoch 8, validation: loss=3.507, NarTop10Accuracy=0.6181, over 1907754.00 frames. +2024-08-06 08:28:35,291 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 08:28:35,796 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.304e+02 1.789e+02 1.988e+02 2.230e+02 4.452e+02, threshold=3.975e+02, percent-clipped=0.5 +2024-08-06 08:28:52,983 INFO [trainer.py:765] (7/8) Epoch 8, batch 2000, train_loss[loss=3.802, NarTop10Accuracy=0.5655, over 5559.00 frames. ], tot_loss[loss=3.669, NarTop10Accuracy=0.5828, over 6016.54 frames. ], batch size: 49, lr: 1.04e-02 +2024-08-06 08:29:18,485 INFO [trainer.py:765] (7/8) Epoch 8, batch 2100, train_loss[loss=3.379, NarTop10Accuracy=0.6386, over 4697.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.5821, over 6013.11 frames. ], batch size: 5, lr: 1.04e-02 +2024-08-06 08:29:43,790 INFO [trainer.py:765] (7/8) Epoch 8, batch 2200, train_loss[loss=3.913, NarTop10Accuracy=0.5311, over 7178.00 frames. ], tot_loss[loss=3.673, NarTop10Accuracy=0.5818, over 6034.98 frames. ], batch size: 30, lr: 1.03e-02 +2024-08-06 08:30:09,134 INFO [trainer.py:765] (7/8) Epoch 8, batch 2300, train_loss[loss=3.709, NarTop10Accuracy=0.5831, over 5707.00 frames. ], tot_loss[loss=3.683, NarTop10Accuracy=0.5795, over 6073.74 frames. ], batch size: 9, lr: 1.03e-02 +2024-08-06 08:30:33,791 INFO [trainer.py:765] (7/8) Epoch 8, batch 2400, train_loss[loss=3.428, NarTop10Accuracy=0.6314, over 5167.00 frames. ], tot_loss[loss=3.691, NarTop10Accuracy=0.5779, over 5905.10 frames. ], batch size: 7, lr: 1.03e-02 +2024-08-06 08:30:57,139 INFO [trainer.py:765] (7/8) Epoch 8, batch 2500, train_loss[loss=3.347, NarTop10Accuracy=0.6453, over 5049.00 frames. ], tot_loss[loss=3.671, NarTop10Accuracy=0.5816, over 5540.88 frames. ], batch size: 6, lr: 1.03e-02 +2024-08-06 08:31:18,738 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 08:32:19,098 INFO [trainer.py:765] (7/8) Epoch 9, batch 100, train_loss[loss=3.828, NarTop10Accuracy=0.5486, over 7494.00 frames. ], tot_loss[loss=3.597, NarTop10Accuracy=0.5998, over 2381.48 frames. ], batch size: 30, lr: 9.71e-03 +2024-08-06 08:32:51,461 INFO [trainer.py:765] (7/8) Epoch 9, batch 200, train_loss[loss=3.605, NarTop10Accuracy=0.6046, over 6873.00 frames. ], tot_loss[loss=3.587, NarTop10Accuracy=0.6005, over 3868.82 frames. ], batch size: 17, lr: 9.69e-03 +2024-08-06 08:33:27,115 INFO [trainer.py:765] (7/8) Epoch 9, batch 300, train_loss[loss=3.758, NarTop10Accuracy=0.5787, over 6949.00 frames. ], tot_loss[loss=3.587, NarTop10Accuracy=0.6006, over 4664.41 frames. ], batch size: 22, lr: 9.67e-03 +2024-08-06 08:34:00,964 INFO [trainer.py:765] (7/8) Epoch 9, batch 400, train_loss[loss=3.328, NarTop10Accuracy=0.6635, over 5099.00 frames. ], tot_loss[loss=3.581, NarTop10Accuracy=0.6017, over 5110.78 frames. ], batch size: 7, lr: 9.64e-03 +2024-08-06 08:34:32,879 INFO [trainer.py:765] (7/8) Epoch 9, batch 500, train_loss[loss=3.828, NarTop10Accuracy=0.5495, over 6271.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6042, over 5400.89 frames. ], batch size: 11, lr: 9.62e-03 +2024-08-06 08:35:07,497 INFO [trainer.py:765] (7/8) Epoch 9, batch 600, train_loss[loss=3.354, NarTop10Accuracy=0.6454, over 5869.00 frames. ], tot_loss[loss=3.583, NarTop10Accuracy=0.6009, over 5671.84 frames. ], batch size: 9, lr: 9.60e-03 +2024-08-06 08:35:42,824 INFO [trainer.py:765] (7/8) Epoch 9, batch 700, train_loss[loss=3.923, NarTop10Accuracy=0.537, over 5134.00 frames. ], tot_loss[loss=3.595, NarTop10Accuracy=0.5982, over 5747.71 frames. ], batch size: 6, lr: 9.58e-03 +2024-08-06 08:36:14,821 INFO [trainer.py:765] (7/8) Epoch 9, batch 800, train_loss[loss=3.277, NarTop10Accuracy=0.6586, over 5073.00 frames. ], tot_loss[loss=3.61, NarTop10Accuracy=0.5948, over 5797.71 frames. ], batch size: 6, lr: 9.56e-03 +2024-08-06 08:36:46,454 INFO [trainer.py:765] (7/8) Epoch 9, batch 900, train_loss[loss=3.432, NarTop10Accuracy=0.6388, over 6238.00 frames. ], tot_loss[loss=3.62, NarTop10Accuracy=0.5928, over 5833.13 frames. ], batch size: 13, lr: 9.54e-03 +2024-08-06 08:37:26,564 INFO [trainer.py:765] (7/8) Epoch 9, batch 1000, train_loss[loss=3.335, NarTop10Accuracy=0.637, over 6192.00 frames. ], tot_loss[loss=3.621, NarTop10Accuracy=0.5922, over 5924.33 frames. ], batch size: 13, lr: 9.52e-03 +2024-08-06 08:37:59,420 INFO [trainer.py:765] (7/8) Epoch 9, batch 1100, train_loss[loss=3.672, NarTop10Accuracy=0.576, over 6879.00 frames. ], tot_loss[loss=3.641, NarTop10Accuracy=0.5882, over 5959.16 frames. ], batch size: 17, lr: 9.50e-03 +2024-08-06 08:38:31,995 INFO [trainer.py:765] (7/8) Epoch 9, batch 1200, train_loss[loss=3.546, NarTop10Accuracy=0.5974, over 7402.00 frames. ], tot_loss[loss=3.648, NarTop10Accuracy=0.5866, over 5964.27 frames. ], batch size: 30, lr: 9.48e-03 +2024-08-06 08:39:11,840 INFO [trainer.py:765] (7/8) Epoch 9, batch 1300, train_loss[loss=3.835, NarTop10Accuracy=0.5559, over 4244.00 frames. ], tot_loss[loss=3.639, NarTop10Accuracy=0.5885, over 6027.14 frames. ], batch size: 5, lr: 9.46e-03 +2024-08-06 08:39:27,116 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 08:39:38,196 INFO [trainer.py:811] (7/8) Epoch 9, validation: loss=3.495, NarTop10Accuracy=0.6214, over 1907754.00 frames. +2024-08-06 08:39:38,197 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 08:39:38,758 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 1.781e+02 1.970e+02 2.189e+02 6.315e+02, threshold=3.940e+02, percent-clipped=0.6 +2024-08-06 08:39:52,278 INFO [trainer.py:765] (7/8) Epoch 9, batch 1400, train_loss[loss=3.429, NarTop10Accuracy=0.6223, over 6057.00 frames. ], tot_loss[loss=3.621, NarTop10Accuracy=0.5917, over 6041.52 frames. ], batch size: 11, lr: 9.43e-03 +2024-08-06 08:40:22,332 INFO [trainer.py:765] (7/8) Epoch 9, batch 1500, train_loss[loss=4.02, NarTop10Accuracy=0.5179, over 6230.00 frames. ], tot_loss[loss=3.629, NarTop10Accuracy=0.5902, over 5986.25 frames. ], batch size: 48, lr: 9.41e-03 +2024-08-06 08:40:50,367 INFO [trainer.py:765] (7/8) Epoch 9, batch 1600, train_loss[loss=3.8, NarTop10Accuracy=0.5563, over 7279.00 frames. ], tot_loss[loss=3.626, NarTop10Accuracy=0.5909, over 5967.63 frames. ], batch size: 22, lr: 9.39e-03 +2024-08-06 08:41:17,152 INFO [trainer.py:765] (7/8) Epoch 9, batch 1700, train_loss[loss=3.595, NarTop10Accuracy=0.5927, over 6286.00 frames. ], tot_loss[loss=3.631, NarTop10Accuracy=0.5896, over 5943.19 frames. ], batch size: 13, lr: 9.37e-03 +2024-08-06 08:41:43,812 INFO [trainer.py:765] (7/8) Epoch 9, batch 1800, train_loss[loss=3.938, NarTop10Accuracy=0.5342, over 7124.00 frames. ], tot_loss[loss=3.632, NarTop10Accuracy=0.5897, over 6015.72 frames. ], batch size: 22, lr: 9.35e-03 +2024-08-06 08:42:10,495 INFO [trainer.py:765] (7/8) Epoch 9, batch 1900, train_loss[loss=3.583, NarTop10Accuracy=0.6066, over 6159.00 frames. ], tot_loss[loss=3.631, NarTop10Accuracy=0.5899, over 6040.80 frames. ], batch size: 49, lr: 9.33e-03 +2024-08-06 08:42:36,203 INFO [trainer.py:765] (7/8) Epoch 9, batch 2000, train_loss[loss=4.018, NarTop10Accuracy=0.524, over 6217.00 frames. ], tot_loss[loss=3.641, NarTop10Accuracy=0.5882, over 6012.37 frames. ], batch size: 48, lr: 9.31e-03 +2024-08-06 08:43:01,667 INFO [trainer.py:765] (7/8) Epoch 9, batch 2100, train_loss[loss=3.548, NarTop10Accuracy=0.6073, over 4037.00 frames. ], tot_loss[loss=3.637, NarTop10Accuracy=0.5889, over 5985.91 frames. ], batch size: 4, lr: 9.30e-03 +2024-08-06 08:43:27,178 INFO [trainer.py:765] (7/8) Epoch 9, batch 2200, train_loss[loss=3.57, NarTop10Accuracy=0.614, over 7456.00 frames. ], tot_loss[loss=3.639, NarTop10Accuracy=0.5884, over 6030.27 frames. ], batch size: 31, lr: 9.28e-03 +2024-08-06 08:43:52,671 INFO [trainer.py:765] (7/8) Epoch 9, batch 2300, train_loss[loss=3.715, NarTop10Accuracy=0.5726, over 5634.00 frames. ], tot_loss[loss=3.65, NarTop10Accuracy=0.5861, over 6051.74 frames. ], batch size: 9, lr: 9.26e-03 +2024-08-06 08:44:20,550 INFO [trainer.py:765] (7/8) Epoch 9, batch 2400, train_loss[loss=3.439, NarTop10Accuracy=0.6193, over 5258.00 frames. ], tot_loss[loss=3.649, NarTop10Accuracy=0.5859, over 5861.52 frames. ], batch size: 7, lr: 9.24e-03 +2024-08-06 08:44:44,002 INFO [trainer.py:765] (7/8) Epoch 9, batch 2500, train_loss[loss=3.78, NarTop10Accuracy=0.5584, over 4999.00 frames. ], tot_loss[loss=3.632, NarTop10Accuracy=0.5896, over 5511.03 frames. ], batch size: 6, lr: 9.22e-03 +2024-08-06 08:45:05,395 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 08:46:09,064 INFO [trainer.py:765] (7/8) Epoch 10, batch 100, train_loss[loss=3.328, NarTop10Accuracy=0.6561, over 7310.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6038, over 2382.33 frames. ], batch size: 30, lr: 8.75e-03 +2024-08-06 08:46:44,074 INFO [trainer.py:765] (7/8) Epoch 10, batch 200, train_loss[loss=3.57, NarTop10Accuracy=0.6021, over 6746.00 frames. ], tot_loss[loss=3.561, NarTop10Accuracy=0.605, over 3877.75 frames. ], batch size: 17, lr: 8.73e-03 +2024-08-06 08:47:14,442 INFO [trainer.py:765] (7/8) Epoch 10, batch 300, train_loss[loss=3.5, NarTop10Accuracy=0.6059, over 7227.00 frames. ], tot_loss[loss=3.576, NarTop10Accuracy=0.6018, over 4669.94 frames. ], batch size: 22, lr: 8.72e-03 +2024-08-06 08:47:46,118 INFO [trainer.py:765] (7/8) Epoch 10, batch 400, train_loss[loss=3.606, NarTop10Accuracy=0.5966, over 5160.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6024, over 5125.76 frames. ], batch size: 7, lr: 8.70e-03 +2024-08-06 08:48:22,369 INFO [trainer.py:765] (7/8) Epoch 10, batch 500, train_loss[loss=3.606, NarTop10Accuracy=0.594, over 6054.00 frames. ], tot_loss[loss=3.574, NarTop10Accuracy=0.602, over 5426.30 frames. ], batch size: 11, lr: 8.68e-03 +2024-08-06 08:48:53,459 INFO [trainer.py:765] (7/8) Epoch 10, batch 600, train_loss[loss=3.412, NarTop10Accuracy=0.6356, over 5834.00 frames. ], tot_loss[loss=3.591, NarTop10Accuracy=0.5992, over 5700.34 frames. ], batch size: 9, lr: 8.66e-03 +2024-08-06 08:49:26,706 INFO [trainer.py:765] (7/8) Epoch 10, batch 700, train_loss[loss=3.201, NarTop10Accuracy=0.6569, over 5127.00 frames. ], tot_loss[loss=3.591, NarTop10Accuracy=0.599, over 5761.63 frames. ], batch size: 6, lr: 8.65e-03 +2024-08-06 08:49:49,163 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 08:50:00,983 INFO [trainer.py:811] (7/8) Epoch 10, validation: loss=3.46, NarTop10Accuracy=0.6279, over 1907754.00 frames. +2024-08-06 08:50:00,984 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 08:50:01,725 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.353e+02 1.818e+02 1.985e+02 2.213e+02 4.843e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 08:50:09,801 INFO [trainer.py:765] (7/8) Epoch 10, batch 800, train_loss[loss=3.395, NarTop10Accuracy=0.6251, over 5029.00 frames. ], tot_loss[loss=3.597, NarTop10Accuracy=0.5976, over 5795.06 frames. ], batch size: 6, lr: 8.63e-03 +2024-08-06 08:50:42,892 INFO [trainer.py:765] (7/8) Epoch 10, batch 900, train_loss[loss=3.451, NarTop10Accuracy=0.635, over 6389.00 frames. ], tot_loss[loss=3.575, NarTop10Accuracy=0.602, over 5841.48 frames. ], batch size: 13, lr: 8.61e-03 +2024-08-06 08:51:18,460 INFO [trainer.py:765] (7/8) Epoch 10, batch 1000, train_loss[loss=3.801, NarTop10Accuracy=0.559, over 6331.00 frames. ], tot_loss[loss=3.591, NarTop10Accuracy=0.5989, over 5926.33 frames. ], batch size: 13, lr: 8.59e-03 +2024-08-06 08:51:57,362 INFO [trainer.py:765] (7/8) Epoch 10, batch 1100, train_loss[loss=3.305, NarTop10Accuracy=0.6591, over 6510.00 frames. ], tot_loss[loss=3.6, NarTop10Accuracy=0.5969, over 5972.11 frames. ], batch size: 16, lr: 8.58e-03 +2024-08-06 08:52:32,048 INFO [trainer.py:765] (7/8) Epoch 10, batch 1200, train_loss[loss=3.587, NarTop10Accuracy=0.5973, over 7474.00 frames. ], tot_loss[loss=3.596, NarTop10Accuracy=0.597, over 5958.85 frames. ], batch size: 31, lr: 8.56e-03 +2024-08-06 08:53:06,607 INFO [trainer.py:765] (7/8) Epoch 10, batch 1300, train_loss[loss=3.455, NarTop10Accuracy=0.6274, over 5174.00 frames. ], tot_loss[loss=3.588, NarTop10Accuracy=0.5987, over 6014.40 frames. ], batch size: 6, lr: 8.54e-03 +2024-08-06 08:53:46,880 INFO [trainer.py:765] (7/8) Epoch 10, batch 1400, train_loss[loss=3.613, NarTop10Accuracy=0.5961, over 6041.00 frames. ], tot_loss[loss=3.603, NarTop10Accuracy=0.5961, over 6036.82 frames. ], batch size: 11, lr: 8.53e-03 +2024-08-06 08:54:17,501 INFO [trainer.py:765] (7/8) Epoch 10, batch 1500, train_loss[loss=3.57, NarTop10Accuracy=0.6029, over 6210.00 frames. ], tot_loss[loss=3.589, NarTop10Accuracy=0.5989, over 5963.49 frames. ], batch size: 49, lr: 8.51e-03 +2024-08-06 08:54:45,526 INFO [trainer.py:765] (7/8) Epoch 10, batch 1600, train_loss[loss=3.495, NarTop10Accuracy=0.62, over 7161.00 frames. ], tot_loss[loss=3.588, NarTop10Accuracy=0.5989, over 5948.17 frames. ], batch size: 22, lr: 8.49e-03 +2024-08-06 08:55:12,300 INFO [trainer.py:765] (7/8) Epoch 10, batch 1700, train_loss[loss=3.492, NarTop10Accuracy=0.6159, over 6744.00 frames. ], tot_loss[loss=3.591, NarTop10Accuracy=0.5982, over 5925.23 frames. ], batch size: 14, lr: 8.48e-03 +2024-08-06 08:55:41,989 INFO [trainer.py:765] (7/8) Epoch 10, batch 1800, train_loss[loss=3.637, NarTop10Accuracy=0.5842, over 7072.00 frames. ], tot_loss[loss=3.595, NarTop10Accuracy=0.5975, over 6002.72 frames. ], batch size: 22, lr: 8.46e-03 +2024-08-06 08:56:08,572 INFO [trainer.py:765] (7/8) Epoch 10, batch 1900, train_loss[loss=4.087, NarTop10Accuracy=0.4995, over 6198.00 frames. ], tot_loss[loss=3.6, NarTop10Accuracy=0.5965, over 6049.25 frames. ], batch size: 49, lr: 8.45e-03 +2024-08-06 08:56:34,287 INFO [trainer.py:765] (7/8) Epoch 10, batch 2000, train_loss[loss=3.714, NarTop10Accuracy=0.5776, over 5429.00 frames. ], tot_loss[loss=3.605, NarTop10Accuracy=0.5956, over 6018.33 frames. ], batch size: 50, lr: 8.43e-03 +2024-08-06 08:56:59,751 INFO [trainer.py:765] (7/8) Epoch 10, batch 2100, train_loss[loss=3.417, NarTop10Accuracy=0.6314, over 3952.00 frames. ], tot_loss[loss=3.601, NarTop10Accuracy=0.5963, over 6003.24 frames. ], batch size: 4, lr: 8.41e-03 +2024-08-06 08:57:25,280 INFO [trainer.py:765] (7/8) Epoch 10, batch 2200, train_loss[loss=3.635, NarTop10Accuracy=0.5857, over 7054.00 frames. ], tot_loss[loss=3.599, NarTop10Accuracy=0.5969, over 6048.72 frames. ], batch size: 30, lr: 8.40e-03 +2024-08-06 08:57:50,683 INFO [trainer.py:765] (7/8) Epoch 10, batch 2300, train_loss[loss=3.345, NarTop10Accuracy=0.6538, over 5885.00 frames. ], tot_loss[loss=3.61, NarTop10Accuracy=0.5947, over 6064.05 frames. ], batch size: 9, lr: 8.38e-03 +2024-08-06 08:58:15,345 INFO [trainer.py:765] (7/8) Epoch 10, batch 2400, train_loss[loss=3.474, NarTop10Accuracy=0.6192, over 5032.00 frames. ], tot_loss[loss=3.612, NarTop10Accuracy=0.5943, over 5885.61 frames. ], batch size: 7, lr: 8.37e-03 +2024-08-06 08:58:38,809 INFO [trainer.py:765] (7/8) Epoch 10, batch 2500, train_loss[loss=3.584, NarTop10Accuracy=0.6007, over 5024.00 frames. ], tot_loss[loss=3.601, NarTop10Accuracy=0.5957, over 5551.21 frames. ], batch size: 6, lr: 8.35e-03 +2024-08-06 08:59:00,363 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 09:00:03,680 INFO [trainer.py:765] (7/8) Epoch 11, batch 100, train_loss[loss=3.403, NarTop10Accuracy=0.646, over 7191.00 frames. ], tot_loss[loss=3.53, NarTop10Accuracy=0.6114, over 2375.61 frames. ], batch size: 30, lr: 7.96e-03 +2024-08-06 09:00:30,915 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 09:00:41,217 INFO [trainer.py:811] (7/8) Epoch 11, validation: loss=3.404, NarTop10Accuracy=0.6396, over 1907754.00 frames. +2024-08-06 09:00:41,218 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 09:00:41,774 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 1.800e+02 1.980e+02 2.200e+02 4.491e+02, threshold=3.959e+02, percent-clipped=0.2 +2024-08-06 09:00:46,859 INFO [trainer.py:765] (7/8) Epoch 11, batch 200, train_loss[loss=3.732, NarTop10Accuracy=0.5621, over 6791.00 frames. ], tot_loss[loss=3.528, NarTop10Accuracy=0.6114, over 3855.62 frames. ], batch size: 17, lr: 7.94e-03 +2024-08-06 09:01:17,853 INFO [trainer.py:765] (7/8) Epoch 11, batch 300, train_loss[loss=3.453, NarTop10Accuracy=0.631, over 7059.00 frames. ], tot_loss[loss=3.545, NarTop10Accuracy=0.6084, over 4662.79 frames. ], batch size: 22, lr: 7.93e-03 +2024-08-06 09:01:50,534 INFO [trainer.py:765] (7/8) Epoch 11, batch 400, train_loss[loss=3.276, NarTop10Accuracy=0.6714, over 5102.00 frames. ], tot_loss[loss=3.535, NarTop10Accuracy=0.6108, over 5106.69 frames. ], batch size: 7, lr: 7.91e-03 +2024-08-06 09:02:21,239 INFO [trainer.py:765] (7/8) Epoch 11, batch 500, train_loss[loss=3.353, NarTop10Accuracy=0.6551, over 6112.00 frames. ], tot_loss[loss=3.531, NarTop10Accuracy=0.6116, over 5392.07 frames. ], batch size: 11, lr: 7.90e-03 +2024-08-06 09:03:01,742 INFO [trainer.py:765] (7/8) Epoch 11, batch 600, train_loss[loss=3.694, NarTop10Accuracy=0.588, over 5850.00 frames. ], tot_loss[loss=3.537, NarTop10Accuracy=0.6102, over 5660.84 frames. ], batch size: 9, lr: 7.88e-03 +2024-08-06 09:03:38,237 INFO [trainer.py:765] (7/8) Epoch 11, batch 700, train_loss[loss=3.607, NarTop10Accuracy=0.5992, over 5094.00 frames. ], tot_loss[loss=3.547, NarTop10Accuracy=0.6082, over 5746.94 frames. ], batch size: 6, lr: 7.87e-03 +2024-08-06 09:04:10,756 INFO [trainer.py:765] (7/8) Epoch 11, batch 800, train_loss[loss=3.675, NarTop10Accuracy=0.5875, over 5059.00 frames. ], tot_loss[loss=3.558, NarTop10Accuracy=0.6058, over 5800.08 frames. ], batch size: 6, lr: 7.86e-03 +2024-08-06 09:04:50,084 INFO [trainer.py:765] (7/8) Epoch 11, batch 900, train_loss[loss=3.431, NarTop10Accuracy=0.6395, over 6278.00 frames. ], tot_loss[loss=3.549, NarTop10Accuracy=0.6068, over 5804.54 frames. ], batch size: 13, lr: 7.84e-03 +2024-08-06 09:05:27,014 INFO [trainer.py:765] (7/8) Epoch 11, batch 1000, train_loss[loss=3.339, NarTop10Accuracy=0.6439, over 6201.00 frames. ], tot_loss[loss=3.555, NarTop10Accuracy=0.6057, over 5916.87 frames. ], batch size: 13, lr: 7.83e-03 +2024-08-06 09:06:00,351 INFO [trainer.py:765] (7/8) Epoch 11, batch 1100, train_loss[loss=3.477, NarTop10Accuracy=0.6212, over 6768.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6028, over 5962.11 frames. ], batch size: 17, lr: 7.81e-03 +2024-08-06 09:06:40,946 INFO [trainer.py:765] (7/8) Epoch 11, batch 1200, train_loss[loss=3.614, NarTop10Accuracy=0.5955, over 7046.00 frames. ], tot_loss[loss=3.564, NarTop10Accuracy=0.6034, over 5954.95 frames. ], batch size: 30, lr: 7.80e-03 +2024-08-06 09:07:15,494 INFO [trainer.py:765] (7/8) Epoch 11, batch 1300, train_loss[loss=3.227, NarTop10Accuracy=0.6746, over 5093.00 frames. ], tot_loss[loss=3.571, NarTop10Accuracy=0.6019, over 6018.83 frames. ], batch size: 6, lr: 7.79e-03 +2024-08-06 09:07:47,629 INFO [trainer.py:765] (7/8) Epoch 11, batch 1400, train_loss[loss=3.647, NarTop10Accuracy=0.5753, over 6233.00 frames. ], tot_loss[loss=3.591, NarTop10Accuracy=0.5982, over 6045.97 frames. ], batch size: 11, lr: 7.77e-03 +2024-08-06 09:08:18,987 INFO [trainer.py:765] (7/8) Epoch 11, batch 1500, train_loss[loss=3.528, NarTop10Accuracy=0.6177, over 6150.00 frames. ], tot_loss[loss=3.589, NarTop10Accuracy=0.5986, over 5980.67 frames. ], batch size: 53, lr: 7.76e-03 +2024-08-06 09:08:47,149 INFO [trainer.py:765] (7/8) Epoch 11, batch 1600, train_loss[loss=3.359, NarTop10Accuracy=0.6491, over 6987.00 frames. ], tot_loss[loss=3.585, NarTop10Accuracy=0.5999, over 5953.14 frames. ], batch size: 22, lr: 7.74e-03 +2024-08-06 09:09:13,951 INFO [trainer.py:765] (7/8) Epoch 11, batch 1700, train_loss[loss=3.394, NarTop10Accuracy=0.6353, over 6301.00 frames. ], tot_loss[loss=3.582, NarTop10Accuracy=0.6008, over 5949.88 frames. ], batch size: 13, lr: 7.73e-03 +2024-08-06 09:09:40,733 INFO [trainer.py:765] (7/8) Epoch 11, batch 1800, train_loss[loss=3.678, NarTop10Accuracy=0.5803, over 7084.00 frames. ], tot_loss[loss=3.588, NarTop10Accuracy=0.5993, over 6007.73 frames. ], batch size: 22, lr: 7.72e-03 +2024-08-06 09:10:07,342 INFO [trainer.py:765] (7/8) Epoch 11, batch 1900, train_loss[loss=3.792, NarTop10Accuracy=0.5654, over 6108.00 frames. ], tot_loss[loss=3.594, NarTop10Accuracy=0.5976, over 6050.75 frames. ], batch size: 48, lr: 7.70e-03 +2024-08-06 09:10:33,039 INFO [trainer.py:765] (7/8) Epoch 11, batch 2000, train_loss[loss=3.633, NarTop10Accuracy=0.5949, over 5745.00 frames. ], tot_loss[loss=3.591, NarTop10Accuracy=0.5982, over 6024.60 frames. ], batch size: 49, lr: 7.69e-03 +2024-08-06 09:10:58,442 INFO [trainer.py:765] (7/8) Epoch 11, batch 2100, train_loss[loss=3.292, NarTop10Accuracy=0.6593, over 3950.00 frames. ], tot_loss[loss=3.584, NarTop10Accuracy=0.6001, over 5994.34 frames. ], batch size: 4, lr: 7.68e-03 +2024-08-06 09:11:20,709 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 09:11:31,457 INFO [trainer.py:811] (7/8) Epoch 11, validation: loss=3.372, NarTop10Accuracy=0.6462, over 1907754.00 frames. +2024-08-06 09:11:31,458 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 09:11:31,930 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.800e+02 1.966e+02 2.160e+02 4.000e+02, threshold=3.933e+02, percent-clipped=0.1 +2024-08-06 09:11:34,519 INFO [trainer.py:765] (7/8) Epoch 11, batch 2200, train_loss[loss=3.444, NarTop10Accuracy=0.6223, over 7420.00 frames. ], tot_loss[loss=3.582, NarTop10Accuracy=0.6003, over 6027.65 frames. ], batch size: 32, lr: 7.66e-03 +2024-08-06 09:11:59,939 INFO [trainer.py:765] (7/8) Epoch 11, batch 2300, train_loss[loss=3.573, NarTop10Accuracy=0.6041, over 5842.00 frames. ], tot_loss[loss=3.59, NarTop10Accuracy=0.5989, over 6058.60 frames. ], batch size: 9, lr: 7.65e-03 +2024-08-06 09:12:24,696 INFO [trainer.py:765] (7/8) Epoch 11, batch 2400, train_loss[loss=3.923, NarTop10Accuracy=0.5425, over 5085.00 frames. ], tot_loss[loss=3.6, NarTop10Accuracy=0.597, over 5865.02 frames. ], batch size: 7, lr: 7.64e-03 +2024-08-06 09:12:47,879 INFO [trainer.py:765] (7/8) Epoch 11, batch 2500, train_loss[loss=3.642, NarTop10Accuracy=0.5889, over 4994.00 frames. ], tot_loss[loss=3.568, NarTop10Accuracy=0.6023, over 5531.51 frames. ], batch size: 6, lr: 7.62e-03 +2024-08-06 09:13:09,290 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 09:14:12,278 INFO [trainer.py:765] (7/8) Epoch 12, batch 100, train_loss[loss=3.332, NarTop10Accuracy=0.6517, over 7543.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6111, over 2369.19 frames. ], batch size: 31, lr: 7.29e-03 +2024-08-06 09:14:48,095 INFO [trainer.py:765] (7/8) Epoch 12, batch 200, train_loss[loss=3.451, NarTop10Accuracy=0.624, over 6788.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.617, over 3853.98 frames. ], batch size: 17, lr: 7.28e-03 +2024-08-06 09:15:20,021 INFO [trainer.py:765] (7/8) Epoch 12, batch 300, train_loss[loss=3.384, NarTop10Accuracy=0.6452, over 7173.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6176, over 4648.88 frames. ], batch size: 22, lr: 7.27e-03 +2024-08-06 09:15:52,633 INFO [trainer.py:765] (7/8) Epoch 12, batch 400, train_loss[loss=3.695, NarTop10Accuracy=0.5863, over 5203.00 frames. ], tot_loss[loss=3.513, NarTop10Accuracy=0.6155, over 5118.35 frames. ], batch size: 7, lr: 7.25e-03 +2024-08-06 09:16:26,433 INFO [trainer.py:765] (7/8) Epoch 12, batch 500, train_loss[loss=3.342, NarTop10Accuracy=0.6459, over 6284.00 frames. ], tot_loss[loss=3.517, NarTop10Accuracy=0.6147, over 5400.57 frames. ], batch size: 11, lr: 7.24e-03 +2024-08-06 09:16:59,238 INFO [trainer.py:765] (7/8) Epoch 12, batch 600, train_loss[loss=3.486, NarTop10Accuracy=0.6271, over 5852.00 frames. ], tot_loss[loss=3.535, NarTop10Accuracy=0.6102, over 5676.72 frames. ], batch size: 9, lr: 7.23e-03 +2024-08-06 09:17:36,318 INFO [trainer.py:765] (7/8) Epoch 12, batch 700, train_loss[loss=3.23, NarTop10Accuracy=0.6799, over 4965.00 frames. ], tot_loss[loss=3.523, NarTop10Accuracy=0.613, over 5755.97 frames. ], batch size: 6, lr: 7.22e-03 +2024-08-06 09:18:07,752 INFO [trainer.py:765] (7/8) Epoch 12, batch 800, train_loss[loss=3.733, NarTop10Accuracy=0.5769, over 5151.00 frames. ], tot_loss[loss=3.53, NarTop10Accuracy=0.6116, over 5798.55 frames. ], batch size: 6, lr: 7.21e-03 +2024-08-06 09:18:43,778 INFO [trainer.py:765] (7/8) Epoch 12, batch 900, train_loss[loss=3.771, NarTop10Accuracy=0.5642, over 6314.00 frames. ], tot_loss[loss=3.548, NarTop10Accuracy=0.6083, over 5825.32 frames. ], batch size: 13, lr: 7.19e-03 +2024-08-06 09:19:17,689 INFO [trainer.py:765] (7/8) Epoch 12, batch 1000, train_loss[loss=3.478, NarTop10Accuracy=0.6166, over 6102.00 frames. ], tot_loss[loss=3.542, NarTop10Accuracy=0.6087, over 5935.54 frames. ], batch size: 13, lr: 7.18e-03 +2024-08-06 09:19:52,426 INFO [trainer.py:765] (7/8) Epoch 12, batch 1100, train_loss[loss=3.735, NarTop10Accuracy=0.5582, over 6843.00 frames. ], tot_loss[loss=3.55, NarTop10Accuracy=0.6072, over 5966.80 frames. ], batch size: 17, lr: 7.17e-03 +2024-08-06 09:20:29,442 INFO [trainer.py:765] (7/8) Epoch 12, batch 1200, train_loss[loss=3.398, NarTop10Accuracy=0.638, over 7052.00 frames. ], tot_loss[loss=3.551, NarTop10Accuracy=0.606, over 5953.52 frames. ], batch size: 30, lr: 7.16e-03 +2024-08-06 09:21:02,826 INFO [trainer.py:765] (7/8) Epoch 12, batch 1300, train_loss[loss=3.473, NarTop10Accuracy=0.6101, over 4974.00 frames. ], tot_loss[loss=3.548, NarTop10Accuracy=0.6069, over 6039.35 frames. ], batch size: 6, lr: 7.15e-03 +2024-08-06 09:21:36,981 INFO [trainer.py:765] (7/8) Epoch 12, batch 1400, train_loss[loss=3.553, NarTop10Accuracy=0.603, over 6183.00 frames. ], tot_loss[loss=3.554, NarTop10Accuracy=0.6058, over 6043.80 frames. ], batch size: 11, lr: 7.13e-03 +2024-08-06 09:22:09,919 INFO [trainer.py:765] (7/8) Epoch 12, batch 1500, train_loss[loss=3.568, NarTop10Accuracy=0.6094, over 6290.00 frames. ], tot_loss[loss=3.557, NarTop10Accuracy=0.6052, over 5984.33 frames. ], batch size: 49, lr: 7.12e-03 +2024-08-06 09:22:38,026 INFO [trainer.py:765] (7/8) Epoch 12, batch 1600, train_loss[loss=3.514, NarTop10Accuracy=0.6118, over 7168.00 frames. ], tot_loss[loss=3.563, NarTop10Accuracy=0.6043, over 5967.19 frames. ], batch size: 22, lr: 7.11e-03 +2024-08-06 09:22:39,858 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 09:22:49,889 INFO [trainer.py:811] (7/8) Epoch 12, validation: loss=3.364, NarTop10Accuracy=0.6481, over 1907754.00 frames. +2024-08-06 09:22:49,889 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 09:22:50,413 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.796e+02 1.978e+02 2.176e+02 4.603e+02, threshold=3.957e+02, percent-clipped=0.2 +2024-08-06 09:23:14,785 INFO [trainer.py:765] (7/8) Epoch 12, batch 1700, train_loss[loss=3.181, NarTop10Accuracy=0.6817, over 6266.00 frames. ], tot_loss[loss=3.563, NarTop10Accuracy=0.6044, over 5932.39 frames. ], batch size: 13, lr: 7.10e-03 +2024-08-06 09:23:41,386 INFO [trainer.py:765] (7/8) Epoch 12, batch 1800, train_loss[loss=3.35, NarTop10Accuracy=0.6504, over 7277.00 frames. ], tot_loss[loss=3.552, NarTop10Accuracy=0.6065, over 6002.82 frames. ], batch size: 22, lr: 7.09e-03 +2024-08-06 09:24:07,957 INFO [trainer.py:765] (7/8) Epoch 12, batch 1900, train_loss[loss=3.594, NarTop10Accuracy=0.5981, over 5923.00 frames. ], tot_loss[loss=3.559, NarTop10Accuracy=0.6047, over 6041.83 frames. ], batch size: 48, lr: 7.08e-03 +2024-08-06 09:24:33,618 INFO [trainer.py:765] (7/8) Epoch 12, batch 2000, train_loss[loss=3.516, NarTop10Accuracy=0.617, over 5609.00 frames. ], tot_loss[loss=3.56, NarTop10Accuracy=0.6049, over 6003.38 frames. ], batch size: 49, lr: 7.07e-03 +2024-08-06 09:24:59,038 INFO [trainer.py:765] (7/8) Epoch 12, batch 2100, train_loss[loss=3.622, NarTop10Accuracy=0.5912, over 3926.00 frames. ], tot_loss[loss=3.559, NarTop10Accuracy=0.6049, over 5991.47 frames. ], batch size: 4, lr: 7.05e-03 +2024-08-06 09:25:24,509 INFO [trainer.py:765] (7/8) Epoch 12, batch 2200, train_loss[loss=3.299, NarTop10Accuracy=0.6526, over 6924.00 frames. ], tot_loss[loss=3.559, NarTop10Accuracy=0.6052, over 6022.63 frames. ], batch size: 30, lr: 7.04e-03 +2024-08-06 09:25:49,926 INFO [trainer.py:765] (7/8) Epoch 12, batch 2300, train_loss[loss=3.768, NarTop10Accuracy=0.5635, over 5865.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.6029, over 6070.95 frames. ], batch size: 9, lr: 7.03e-03 +2024-08-06 09:26:14,656 INFO [trainer.py:765] (7/8) Epoch 12, batch 2400, train_loss[loss=3.254, NarTop10Accuracy=0.6679, over 5236.00 frames. ], tot_loss[loss=3.57, NarTop10Accuracy=0.6027, over 5897.15 frames. ], batch size: 7, lr: 7.02e-03 +2024-08-06 09:26:38,154 INFO [trainer.py:765] (7/8) Epoch 12, batch 2500, train_loss[loss=3.494, NarTop10Accuracy=0.619, over 4954.00 frames. ], tot_loss[loss=3.551, NarTop10Accuracy=0.6061, over 5551.56 frames. ], batch size: 6, lr: 7.01e-03 +2024-08-06 09:26:59,460 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 09:28:03,611 INFO [trainer.py:765] (7/8) Epoch 13, batch 100, train_loss[loss=3.627, NarTop10Accuracy=0.5992, over 6957.00 frames. ], tot_loss[loss=3.53, NarTop10Accuracy=0.6127, over 2368.46 frames. ], batch size: 30, lr: 6.72e-03 +2024-08-06 09:28:36,905 INFO [trainer.py:765] (7/8) Epoch 13, batch 200, train_loss[loss=3.431, NarTop10Accuracy=0.6278, over 6791.00 frames. ], tot_loss[loss=3.512, NarTop10Accuracy=0.6163, over 3858.08 frames. ], batch size: 17, lr: 6.71e-03 +2024-08-06 09:29:07,170 INFO [trainer.py:765] (7/8) Epoch 13, batch 300, train_loss[loss=3.238, NarTop10Accuracy=0.6609, over 7063.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6192, over 4655.58 frames. ], batch size: 22, lr: 6.70e-03 +2024-08-06 09:29:41,038 INFO [trainer.py:765] (7/8) Epoch 13, batch 400, train_loss[loss=3.399, NarTop10Accuracy=0.6519, over 5195.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6218, over 5106.17 frames. ], batch size: 7, lr: 6.69e-03 +2024-08-06 09:30:13,730 INFO [trainer.py:765] (7/8) Epoch 13, batch 500, train_loss[loss=3.684, NarTop10Accuracy=0.5827, over 6185.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6191, over 5392.46 frames. ], batch size: 11, lr: 6.68e-03 +2024-08-06 09:30:47,198 INFO [trainer.py:765] (7/8) Epoch 13, batch 600, train_loss[loss=3.416, NarTop10Accuracy=0.6352, over 5798.00 frames. ], tot_loss[loss=3.496, NarTop10Accuracy=0.6183, over 5652.30 frames. ], batch size: 9, lr: 6.67e-03 +2024-08-06 09:31:23,821 INFO [trainer.py:765] (7/8) Epoch 13, batch 700, train_loss[loss=3.373, NarTop10Accuracy=0.6302, over 5035.00 frames. ], tot_loss[loss=3.499, NarTop10Accuracy=0.6175, over 5724.51 frames. ], batch size: 6, lr: 6.66e-03 +2024-08-06 09:31:58,208 INFO [trainer.py:765] (7/8) Epoch 13, batch 800, train_loss[loss=3.211, NarTop10Accuracy=0.6748, over 5003.00 frames. ], tot_loss[loss=3.502, NarTop10Accuracy=0.6172, over 5781.50 frames. ], batch size: 6, lr: 6.65e-03 +2024-08-06 09:32:29,193 INFO [trainer.py:765] (7/8) Epoch 13, batch 900, train_loss[loss=3.374, NarTop10Accuracy=0.6487, over 6751.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6183, over 5811.34 frames. ], batch size: 14, lr: 6.64e-03 +2024-08-06 09:33:03,133 INFO [trainer.py:765] (7/8) Epoch 13, batch 1000, train_loss[loss=3.533, NarTop10Accuracy=0.613, over 6263.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.616, over 5909.25 frames. ], batch size: 13, lr: 6.63e-03 +2024-08-06 09:33:14,218 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 09:33:24,525 INFO [trainer.py:811] (7/8) Epoch 13, validation: loss=3.389, NarTop10Accuracy=0.6428, over 1907754.00 frames. +2024-08-06 09:33:24,526 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 09:33:25,132 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.457e+02 1.794e+02 1.964e+02 2.145e+02 3.608e+02, threshold=3.929e+02, percent-clipped=0.0 +2024-08-06 09:33:51,714 INFO [trainer.py:765] (7/8) Epoch 13, batch 1100, train_loss[loss=3.784, NarTop10Accuracy=0.5607, over 6903.00 frames. ], tot_loss[loss=3.525, NarTop10Accuracy=0.6121, over 5955.65 frames. ], batch size: 17, lr: 6.62e-03 +2024-08-06 09:34:25,486 INFO [trainer.py:765] (7/8) Epoch 13, batch 1200, train_loss[loss=3.515, NarTop10Accuracy=0.6226, over 7153.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.6137, over 5952.12 frames. ], batch size: 30, lr: 6.61e-03 +2024-08-06 09:35:05,085 INFO [trainer.py:765] (7/8) Epoch 13, batch 1300, train_loss[loss=3.607, NarTop10Accuracy=0.5942, over 5135.00 frames. ], tot_loss[loss=3.519, NarTop10Accuracy=0.6133, over 6019.47 frames. ], batch size: 6, lr: 6.60e-03 +2024-08-06 09:35:36,405 INFO [trainer.py:765] (7/8) Epoch 13, batch 1400, train_loss[loss=3.6, NarTop10Accuracy=0.5963, over 6218.00 frames. ], tot_loss[loss=3.529, NarTop10Accuracy=0.6116, over 6035.84 frames. ], batch size: 11, lr: 6.59e-03 +2024-08-06 09:36:07,320 INFO [trainer.py:765] (7/8) Epoch 13, batch 1500, train_loss[loss=3.659, NarTop10Accuracy=0.5854, over 6381.00 frames. ], tot_loss[loss=3.533, NarTop10Accuracy=0.6105, over 5989.01 frames. ], batch size: 48, lr: 6.58e-03 +2024-08-06 09:36:35,389 INFO [trainer.py:765] (7/8) Epoch 13, batch 1600, train_loss[loss=3.757, NarTop10Accuracy=0.5719, over 7046.00 frames. ], tot_loss[loss=3.536, NarTop10Accuracy=0.6095, over 5963.68 frames. ], batch size: 22, lr: 6.57e-03 +2024-08-06 09:37:02,144 INFO [trainer.py:765] (7/8) Epoch 13, batch 1700, train_loss[loss=3.467, NarTop10Accuracy=0.6307, over 6628.00 frames. ], tot_loss[loss=3.544, NarTop10Accuracy=0.6084, over 5945.25 frames. ], batch size: 14, lr: 6.56e-03 +2024-08-06 09:37:28,778 INFO [trainer.py:765] (7/8) Epoch 13, batch 1800, train_loss[loss=3.378, NarTop10Accuracy=0.6425, over 7067.00 frames. ], tot_loss[loss=3.532, NarTop10Accuracy=0.611, over 6010.85 frames. ], batch size: 22, lr: 6.55e-03 +2024-08-06 09:37:55,387 INFO [trainer.py:765] (7/8) Epoch 13, batch 1900, train_loss[loss=3.57, NarTop10Accuracy=0.6103, over 6358.00 frames. ], tot_loss[loss=3.541, NarTop10Accuracy=0.6093, over 6043.73 frames. ], batch size: 49, lr: 6.54e-03 +2024-08-06 09:38:21,122 INFO [trainer.py:765] (7/8) Epoch 13, batch 2000, train_loss[loss=3.59, NarTop10Accuracy=0.6114, over 5869.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6101, over 6016.56 frames. ], batch size: 49, lr: 6.53e-03 +2024-08-06 09:38:49,691 INFO [trainer.py:765] (7/8) Epoch 13, batch 2100, train_loss[loss=3.317, NarTop10Accuracy=0.6717, over 3973.00 frames. ], tot_loss[loss=3.535, NarTop10Accuracy=0.6107, over 5989.10 frames. ], batch size: 4, lr: 6.52e-03 +2024-08-06 09:39:15,107 INFO [trainer.py:765] (7/8) Epoch 13, batch 2200, train_loss[loss=3.56, NarTop10Accuracy=0.5969, over 6989.00 frames. ], tot_loss[loss=3.538, NarTop10Accuracy=0.6097, over 6028.18 frames. ], batch size: 30, lr: 6.51e-03 +2024-08-06 09:39:40,618 INFO [trainer.py:765] (7/8) Epoch 13, batch 2300, train_loss[loss=3.361, NarTop10Accuracy=0.651, over 5813.00 frames. ], tot_loss[loss=3.547, NarTop10Accuracy=0.6074, over 6066.44 frames. ], batch size: 9, lr: 6.50e-03 +2024-08-06 09:40:05,343 INFO [trainer.py:765] (7/8) Epoch 13, batch 2400, train_loss[loss=3.321, NarTop10Accuracy=0.662, over 5173.00 frames. ], tot_loss[loss=3.551, NarTop10Accuracy=0.6068, over 5898.03 frames. ], batch size: 7, lr: 6.49e-03 +2024-08-06 09:40:28,768 INFO [trainer.py:765] (7/8) Epoch 13, batch 2500, train_loss[loss=3.281, NarTop10Accuracy=0.6519, over 4990.00 frames. ], tot_loss[loss=3.53, NarTop10Accuracy=0.6108, over 5568.45 frames. ], batch size: 6, lr: 6.48e-03 +2024-08-06 09:40:49,981 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 09:41:48,981 INFO [trainer.py:765] (7/8) Epoch 14, batch 100, train_loss[loss=3.325, NarTop10Accuracy=0.6554, over 7062.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6194, over 2386.42 frames. ], batch size: 30, lr: 6.24e-03 +2024-08-06 09:42:22,937 INFO [trainer.py:765] (7/8) Epoch 14, batch 200, train_loss[loss=3.562, NarTop10Accuracy=0.5967, over 7001.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6251, over 3877.43 frames. ], batch size: 17, lr: 6.23e-03 +2024-08-06 09:42:58,415 INFO [trainer.py:765] (7/8) Epoch 14, batch 300, train_loss[loss=3.756, NarTop10Accuracy=0.5661, over 7180.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6188, over 4676.02 frames. ], batch size: 22, lr: 6.22e-03 +2024-08-06 09:43:30,440 INFO [trainer.py:765] (7/8) Epoch 14, batch 400, train_loss[loss=3.351, NarTop10Accuracy=0.6466, over 5169.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6191, over 5115.44 frames. ], batch size: 7, lr: 6.21e-03 +2024-08-06 09:43:42,487 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 09:43:53,651 INFO [trainer.py:811] (7/8) Epoch 14, validation: loss=3.321, NarTop10Accuracy=0.6566, over 1907754.00 frames. +2024-08-06 09:43:53,651 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 09:43:54,211 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.805e+02 1.968e+02 2.158e+02 4.264e+02, threshold=3.936e+02, percent-clipped=0.2 +2024-08-06 09:44:11,700 INFO [trainer.py:765] (7/8) Epoch 14, batch 500, train_loss[loss=3.507, NarTop10Accuracy=0.6242, over 6147.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6208, over 5398.73 frames. ], batch size: 11, lr: 6.20e-03 +2024-08-06 09:44:47,166 INFO [trainer.py:765] (7/8) Epoch 14, batch 600, train_loss[loss=3.656, NarTop10Accuracy=0.5741, over 5918.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6228, over 5670.58 frames. ], batch size: 9, lr: 6.19e-03 +2024-08-06 09:45:19,803 INFO [trainer.py:765] (7/8) Epoch 14, batch 700, train_loss[loss=3.886, NarTop10Accuracy=0.5405, over 5115.00 frames. ], tot_loss[loss=3.474, NarTop10Accuracy=0.6231, over 5745.80 frames. ], batch size: 6, lr: 6.18e-03 +2024-08-06 09:45:58,435 INFO [trainer.py:765] (7/8) Epoch 14, batch 800, train_loss[loss=3.157, NarTop10Accuracy=0.6748, over 4175.00 frames. ], tot_loss[loss=3.491, NarTop10Accuracy=0.6195, over 5802.07 frames. ], batch size: 5, lr: 6.17e-03 +2024-08-06 09:46:35,420 INFO [trainer.py:765] (7/8) Epoch 14, batch 900, train_loss[loss=3.745, NarTop10Accuracy=0.5694, over 6272.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6201, over 5832.74 frames. ], batch size: 13, lr: 6.17e-03 +2024-08-06 09:47:08,399 INFO [trainer.py:765] (7/8) Epoch 14, batch 1000, train_loss[loss=3.451, NarTop10Accuracy=0.6276, over 6196.00 frames. ], tot_loss[loss=3.496, NarTop10Accuracy=0.6183, over 5931.78 frames. ], batch size: 13, lr: 6.16e-03 +2024-08-06 09:47:47,663 INFO [trainer.py:765] (7/8) Epoch 14, batch 1100, train_loss[loss=3.291, NarTop10Accuracy=0.6561, over 6774.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.617, over 5967.86 frames. ], batch size: 17, lr: 6.15e-03 +2024-08-06 09:48:23,500 INFO [trainer.py:765] (7/8) Epoch 14, batch 1200, train_loss[loss=3.469, NarTop10Accuracy=0.6251, over 7425.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6169, over 5977.85 frames. ], batch size: 31, lr: 6.14e-03 +2024-08-06 09:48:57,972 INFO [trainer.py:765] (7/8) Epoch 14, batch 1300, train_loss[loss=3.447, NarTop10Accuracy=0.6284, over 5039.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6174, over 6020.89 frames. ], batch size: 6, lr: 6.13e-03 +2024-08-06 09:49:30,234 INFO [trainer.py:765] (7/8) Epoch 14, batch 1400, train_loss[loss=3.314, NarTop10Accuracy=0.6489, over 6062.00 frames. ], tot_loss[loss=3.515, NarTop10Accuracy=0.6138, over 6030.13 frames. ], batch size: 11, lr: 6.12e-03 +2024-08-06 09:50:07,531 INFO [trainer.py:765] (7/8) Epoch 14, batch 1500, train_loss[loss=3.696, NarTop10Accuracy=0.5832, over 5920.00 frames. ], tot_loss[loss=3.514, NarTop10Accuracy=0.614, over 5965.59 frames. ], batch size: 49, lr: 6.11e-03 +2024-08-06 09:50:35,637 INFO [trainer.py:765] (7/8) Epoch 14, batch 1600, train_loss[loss=3.547, NarTop10Accuracy=0.6084, over 7012.00 frames. ], tot_loss[loss=3.507, NarTop10Accuracy=0.6157, over 5950.11 frames. ], batch size: 22, lr: 6.10e-03 +2024-08-06 09:51:02,378 INFO [trainer.py:765] (7/8) Epoch 14, batch 1700, train_loss[loss=3.343, NarTop10Accuracy=0.6557, over 6397.00 frames. ], tot_loss[loss=3.506, NarTop10Accuracy=0.616, over 5919.00 frames. ], batch size: 13, lr: 6.10e-03 +2024-08-06 09:51:28,994 INFO [trainer.py:765] (7/8) Epoch 14, batch 1800, train_loss[loss=3.524, NarTop10Accuracy=0.6126, over 7217.00 frames. ], tot_loss[loss=3.502, NarTop10Accuracy=0.6172, over 5992.32 frames. ], batch size: 22, lr: 6.09e-03 +2024-08-06 09:51:55,729 INFO [trainer.py:765] (7/8) Epoch 14, batch 1900, train_loss[loss=3.794, NarTop10Accuracy=0.5685, over 5765.00 frames. ], tot_loss[loss=3.515, NarTop10Accuracy=0.6141, over 6046.35 frames. ], batch size: 49, lr: 6.08e-03 +2024-08-06 09:52:21,503 INFO [trainer.py:765] (7/8) Epoch 14, batch 2000, train_loss[loss=3.715, NarTop10Accuracy=0.5771, over 6171.00 frames. ], tot_loss[loss=3.522, NarTop10Accuracy=0.6129, over 6021.51 frames. ], batch size: 48, lr: 6.07e-03 +2024-08-06 09:52:47,012 INFO [trainer.py:765] (7/8) Epoch 14, batch 2100, train_loss[loss=3.344, NarTop10Accuracy=0.6336, over 3951.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.6152, over 5994.80 frames. ], batch size: 4, lr: 6.06e-03 +2024-08-06 09:53:12,481 INFO [trainer.py:765] (7/8) Epoch 14, batch 2200, train_loss[loss=3.294, NarTop10Accuracy=0.6615, over 7396.00 frames. ], tot_loss[loss=3.518, NarTop10Accuracy=0.6134, over 6026.35 frames. ], batch size: 31, lr: 6.05e-03 +2024-08-06 09:53:37,975 INFO [trainer.py:765] (7/8) Epoch 14, batch 2300, train_loss[loss=3.347, NarTop10Accuracy=0.6409, over 5840.00 frames. ], tot_loss[loss=3.525, NarTop10Accuracy=0.6119, over 6057.29 frames. ], batch size: 9, lr: 6.05e-03 +2024-08-06 09:54:02,717 INFO [trainer.py:765] (7/8) Epoch 14, batch 2400, train_loss[loss=3.504, NarTop10Accuracy=0.612, over 5114.00 frames. ], tot_loss[loss=3.534, NarTop10Accuracy=0.6103, over 5869.80 frames. ], batch size: 7, lr: 6.04e-03 +2024-08-06 09:54:12,822 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 09:54:24,304 INFO [trainer.py:811] (7/8) Epoch 14, validation: loss=3.364, NarTop10Accuracy=0.6477, over 1907754.00 frames. +2024-08-06 09:54:24,304 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 09:54:24,752 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.815e+02 1.970e+02 2.165e+02 3.684e+02, threshold=3.939e+02, percent-clipped=0.0 +2024-08-06 09:54:37,619 INFO [trainer.py:765] (7/8) Epoch 14, batch 2500, train_loss[loss=3.617, NarTop10Accuracy=0.6123, over 4313.00 frames. ], tot_loss[loss=3.505, NarTop10Accuracy=0.6156, over 5534.17 frames. ], batch size: 5, lr: 6.03e-03 +2024-08-06 09:54:58,735 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 09:56:03,095 INFO [trainer.py:765] (7/8) Epoch 15, batch 100, train_loss[loss=3.696, NarTop10Accuracy=0.5839, over 7449.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6272, over 2381.03 frames. ], batch size: 31, lr: 5.81e-03 +2024-08-06 09:56:35,978 INFO [trainer.py:765] (7/8) Epoch 15, batch 200, train_loss[loss=3.4, NarTop10Accuracy=0.6431, over 6917.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6275, over 3890.33 frames. ], batch size: 17, lr: 5.81e-03 +2024-08-06 09:57:07,652 INFO [trainer.py:765] (7/8) Epoch 15, batch 300, train_loss[loss=3.202, NarTop10Accuracy=0.681, over 7129.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6279, over 4692.55 frames. ], batch size: 22, lr: 5.80e-03 +2024-08-06 09:57:38,462 INFO [trainer.py:765] (7/8) Epoch 15, batch 400, train_loss[loss=3.632, NarTop10Accuracy=0.5963, over 5251.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6291, over 5132.88 frames. ], batch size: 7, lr: 5.79e-03 +2024-08-06 09:58:12,233 INFO [trainer.py:765] (7/8) Epoch 15, batch 500, train_loss[loss=3.556, NarTop10Accuracy=0.6035, over 6145.00 frames. ], tot_loss[loss=3.466, NarTop10Accuracy=0.6247, over 5408.63 frames. ], batch size: 11, lr: 5.78e-03 +2024-08-06 09:58:47,542 INFO [trainer.py:765] (7/8) Epoch 15, batch 600, train_loss[loss=3.558, NarTop10Accuracy=0.6141, over 5803.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6239, over 5683.12 frames. ], batch size: 9, lr: 5.77e-03 +2024-08-06 09:59:17,061 INFO [trainer.py:765] (7/8) Epoch 15, batch 700, train_loss[loss=3.386, NarTop10Accuracy=0.6395, over 5086.00 frames. ], tot_loss[loss=3.48, NarTop10Accuracy=0.6216, over 5746.74 frames. ], batch size: 6, lr: 5.77e-03 +2024-08-06 09:59:55,588 INFO [trainer.py:765] (7/8) Epoch 15, batch 800, train_loss[loss=3.616, NarTop10Accuracy=0.5883, over 5074.00 frames. ], tot_loss[loss=3.481, NarTop10Accuracy=0.6217, over 5796.16 frames. ], batch size: 6, lr: 5.76e-03 +2024-08-06 10:00:32,024 INFO [trainer.py:765] (7/8) Epoch 15, batch 900, train_loss[loss=3.201, NarTop10Accuracy=0.6683, over 6283.00 frames. ], tot_loss[loss=3.478, NarTop10Accuracy=0.6217, over 5826.83 frames. ], batch size: 13, lr: 5.75e-03 +2024-08-06 10:01:05,539 INFO [trainer.py:765] (7/8) Epoch 15, batch 1000, train_loss[loss=3.385, NarTop10Accuracy=0.6407, over 6610.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6217, over 5912.39 frames. ], batch size: 14, lr: 5.74e-03 +2024-08-06 10:01:45,153 INFO [trainer.py:765] (7/8) Epoch 15, batch 1100, train_loss[loss=3.554, NarTop10Accuracy=0.6007, over 6767.00 frames. ], tot_loss[loss=3.496, NarTop10Accuracy=0.618, over 5947.68 frames. ], batch size: 17, lr: 5.74e-03 +2024-08-06 10:02:18,756 INFO [trainer.py:765] (7/8) Epoch 15, batch 1200, train_loss[loss=3.871, NarTop10Accuracy=0.5373, over 6962.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.619, over 5950.52 frames. ], batch size: 31, lr: 5.73e-03 +2024-08-06 10:02:51,921 INFO [trainer.py:765] (7/8) Epoch 15, batch 1300, train_loss[loss=3.519, NarTop10Accuracy=0.6158, over 4890.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6188, over 6019.97 frames. ], batch size: 6, lr: 5.72e-03 +2024-08-06 10:03:25,435 INFO [trainer.py:765] (7/8) Epoch 15, batch 1400, train_loss[loss=3.808, NarTop10Accuracy=0.5596, over 6235.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.617, over 6037.34 frames. ], batch size: 11, lr: 5.71e-03 +2024-08-06 10:03:59,041 INFO [trainer.py:765] (7/8) Epoch 15, batch 1500, train_loss[loss=3.511, NarTop10Accuracy=0.6203, over 6074.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6183, over 5971.22 frames. ], batch size: 49, lr: 5.71e-03 +2024-08-06 10:04:27,106 INFO [trainer.py:765] (7/8) Epoch 15, batch 1600, train_loss[loss=3.616, NarTop10Accuracy=0.5921, over 7115.00 frames. ], tot_loss[loss=3.48, NarTop10Accuracy=0.621, over 5953.55 frames. ], batch size: 22, lr: 5.70e-03 +2024-08-06 10:04:53,907 INFO [trainer.py:765] (7/8) Epoch 15, batch 1700, train_loss[loss=3.449, NarTop10Accuracy=0.6319, over 6190.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.62, over 5945.85 frames. ], batch size: 13, lr: 5.69e-03 +2024-08-06 10:05:20,728 INFO [trainer.py:765] (7/8) Epoch 15, batch 1800, train_loss[loss=3.753, NarTop10Accuracy=0.5624, over 7070.00 frames. ], tot_loss[loss=3.507, NarTop10Accuracy=0.6157, over 5990.63 frames. ], batch size: 22, lr: 5.68e-03 +2024-08-06 10:05:37,266 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 10:05:47,411 INFO [trainer.py:811] (7/8) Epoch 15, validation: loss=3.325, NarTop10Accuracy=0.6551, over 1907754.00 frames. +2024-08-06 10:05:47,412 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 10:05:47,919 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 1.835e+02 1.986e+02 2.156e+02 4.531e+02, threshold=3.972e+02, percent-clipped=0.1 +2024-08-06 10:05:57,568 INFO [trainer.py:765] (7/8) Epoch 15, batch 1900, train_loss[loss=3.597, NarTop10Accuracy=0.6033, over 6109.00 frames. ], tot_loss[loss=3.507, NarTop10Accuracy=0.6161, over 6040.71 frames. ], batch size: 50, lr: 5.68e-03 +2024-08-06 10:06:23,371 INFO [trainer.py:765] (7/8) Epoch 15, batch 2000, train_loss[loss=3.462, NarTop10Accuracy=0.6339, over 5910.00 frames. ], tot_loss[loss=3.501, NarTop10Accuracy=0.6171, over 6013.17 frames. ], batch size: 48, lr: 5.67e-03 +2024-08-06 10:06:48,758 INFO [trainer.py:765] (7/8) Epoch 15, batch 2100, train_loss[loss=3.268, NarTop10Accuracy=0.6523, over 4035.00 frames. ], tot_loss[loss=3.504, NarTop10Accuracy=0.6165, over 5986.57 frames. ], batch size: 4, lr: 5.66e-03 +2024-08-06 10:07:14,170 INFO [trainer.py:765] (7/8) Epoch 15, batch 2200, train_loss[loss=3.44, NarTop10Accuracy=0.6298, over 7079.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6183, over 6034.77 frames. ], batch size: 30, lr: 5.65e-03 +2024-08-06 10:07:39,628 INFO [trainer.py:765] (7/8) Epoch 15, batch 2300, train_loss[loss=3.441, NarTop10Accuracy=0.6312, over 5726.00 frames. ], tot_loss[loss=3.507, NarTop10Accuracy=0.616, over 6056.84 frames. ], batch size: 9, lr: 5.65e-03 +2024-08-06 10:08:04,360 INFO [trainer.py:765] (7/8) Epoch 15, batch 2400, train_loss[loss=3.509, NarTop10Accuracy=0.6056, over 5679.00 frames. ], tot_loss[loss=3.508, NarTop10Accuracy=0.6159, over 5866.54 frames. ], batch size: 8, lr: 5.64e-03 +2024-08-06 10:08:27,712 INFO [trainer.py:765] (7/8) Epoch 15, batch 2500, train_loss[loss=3.68, NarTop10Accuracy=0.5811, over 4355.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.6178, over 5526.39 frames. ], batch size: 5, lr: 5.63e-03 +2024-08-06 10:08:48,921 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 10:09:44,183 INFO [trainer.py:765] (7/8) Epoch 16, batch 100, train_loss[loss=3.393, NarTop10Accuracy=0.6406, over 7435.00 frames. ], tot_loss[loss=3.448, NarTop10Accuracy=0.6289, over 2372.45 frames. ], batch size: 31, lr: 5.44e-03 +2024-08-06 10:10:23,207 INFO [trainer.py:765] (7/8) Epoch 16, batch 200, train_loss[loss=3.415, NarTop10Accuracy=0.638, over 6782.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6315, over 3866.28 frames. ], batch size: 17, lr: 5.44e-03 +2024-08-06 10:10:58,841 INFO [trainer.py:765] (7/8) Epoch 16, batch 300, train_loss[loss=3.235, NarTop10Accuracy=0.6796, over 7274.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6301, over 4669.11 frames. ], batch size: 22, lr: 5.43e-03 +2024-08-06 10:11:29,594 INFO [trainer.py:765] (7/8) Epoch 16, batch 400, train_loss[loss=3.218, NarTop10Accuracy=0.676, over 5211.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6291, over 5108.98 frames. ], batch size: 7, lr: 5.42e-03 +2024-08-06 10:12:02,298 INFO [trainer.py:765] (7/8) Epoch 16, batch 500, train_loss[loss=3.729, NarTop10Accuracy=0.5611, over 6185.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6301, over 5393.91 frames. ], batch size: 11, lr: 5.42e-03 +2024-08-06 10:12:42,340 INFO [trainer.py:765] (7/8) Epoch 16, batch 600, train_loss[loss=3.366, NarTop10Accuracy=0.6512, over 5835.00 frames. ], tot_loss[loss=3.441, NarTop10Accuracy=0.6299, over 5658.31 frames. ], batch size: 9, lr: 5.41e-03 +2024-08-06 10:13:13,949 INFO [trainer.py:765] (7/8) Epoch 16, batch 700, train_loss[loss=3.365, NarTop10Accuracy=0.6412, over 4979.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6258, over 5744.47 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:13:46,285 INFO [trainer.py:765] (7/8) Epoch 16, batch 800, train_loss[loss=3.437, NarTop10Accuracy=0.6241, over 5042.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6274, over 5795.05 frames. ], batch size: 6, lr: 5.40e-03 +2024-08-06 10:14:23,295 INFO [trainer.py:765] (7/8) Epoch 16, batch 900, train_loss[loss=3.746, NarTop10Accuracy=0.5657, over 6696.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.628, over 5821.82 frames. ], batch size: 14, lr: 5.39e-03 +2024-08-06 10:15:00,059 INFO [trainer.py:765] (7/8) Epoch 16, batch 1000, train_loss[loss=3.621, NarTop10Accuracy=0.5926, over 6624.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6263, over 5923.29 frames. ], batch size: 14, lr: 5.38e-03 +2024-08-06 10:15:30,509 INFO [trainer.py:765] (7/8) Epoch 16, batch 1100, train_loss[loss=3.194, NarTop10Accuracy=0.6808, over 7068.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.622, over 5971.03 frames. ], batch size: 18, lr: 5.38e-03 +2024-08-06 10:16:11,383 INFO [trainer.py:765] (7/8) Epoch 16, batch 1200, train_loss[loss=3.48, NarTop10Accuracy=0.6142, over 7312.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6216, over 5956.02 frames. ], batch size: 31, lr: 5.37e-03 +2024-08-06 10:16:39,396 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 10:16:49,676 INFO [trainer.py:811] (7/8) Epoch 16, validation: loss=3.375, NarTop10Accuracy=0.6455, over 1907754.00 frames. +2024-08-06 10:16:49,676 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 10:16:52,482 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 1.814e+02 1.975e+02 2.151e+02 4.776e+02, threshold=3.950e+02, percent-clipped=0.2 +2024-08-06 10:16:58,042 INFO [trainer.py:765] (7/8) Epoch 16, batch 1300, train_loss[loss=3.457, NarTop10Accuracy=0.6169, over 5098.00 frames. ], tot_loss[loss=3.468, NarTop10Accuracy=0.6235, over 6008.44 frames. ], batch size: 6, lr: 5.36e-03 +2024-08-06 10:17:29,375 INFO [trainer.py:765] (7/8) Epoch 16, batch 1400, train_loss[loss=3.354, NarTop10Accuracy=0.6381, over 6196.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6237, over 6030.73 frames. ], batch size: 11, lr: 5.36e-03 +2024-08-06 10:18:02,353 INFO [trainer.py:765] (7/8) Epoch 16, batch 1500, train_loss[loss=3.585, NarTop10Accuracy=0.6045, over 6633.00 frames. ], tot_loss[loss=3.465, NarTop10Accuracy=0.624, over 5946.43 frames. ], batch size: 50, lr: 5.35e-03 +2024-08-06 10:18:30,469 INFO [trainer.py:765] (7/8) Epoch 16, batch 1600, train_loss[loss=3.734, NarTop10Accuracy=0.5717, over 7121.00 frames. ], tot_loss[loss=3.483, NarTop10Accuracy=0.6205, over 5939.93 frames. ], batch size: 22, lr: 5.34e-03 +2024-08-06 10:18:57,272 INFO [trainer.py:765] (7/8) Epoch 16, batch 1700, train_loss[loss=3.687, NarTop10Accuracy=0.5759, over 6524.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6223, over 5930.65 frames. ], batch size: 14, lr: 5.34e-03 +2024-08-06 10:19:23,979 INFO [trainer.py:765] (7/8) Epoch 16, batch 1800, train_loss[loss=3.714, NarTop10Accuracy=0.5775, over 7031.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6212, over 5994.55 frames. ], batch size: 22, lr: 5.33e-03 +2024-08-06 10:19:50,772 INFO [trainer.py:765] (7/8) Epoch 16, batch 1900, train_loss[loss=3.65, NarTop10Accuracy=0.5848, over 6038.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6217, over 6025.71 frames. ], batch size: 50, lr: 5.32e-03 +2024-08-06 10:20:16,601 INFO [trainer.py:765] (7/8) Epoch 16, batch 2000, train_loss[loss=3.499, NarTop10Accuracy=0.6229, over 5775.00 frames. ], tot_loss[loss=3.497, NarTop10Accuracy=0.6177, over 5998.91 frames. ], batch size: 48, lr: 5.32e-03 +2024-08-06 10:20:42,160 INFO [trainer.py:765] (7/8) Epoch 16, batch 2100, train_loss[loss=3.352, NarTop10Accuracy=0.6348, over 4789.00 frames. ], tot_loss[loss=3.505, NarTop10Accuracy=0.6161, over 5996.45 frames. ], batch size: 5, lr: 5.31e-03 +2024-08-06 10:21:07,651 INFO [trainer.py:765] (7/8) Epoch 16, batch 2200, train_loss[loss=3.397, NarTop10Accuracy=0.6459, over 7139.00 frames. ], tot_loss[loss=3.502, NarTop10Accuracy=0.6172, over 6037.74 frames. ], batch size: 30, lr: 5.30e-03 +2024-08-06 10:21:36,081 INFO [trainer.py:765] (7/8) Epoch 16, batch 2300, train_loss[loss=3.637, NarTop10Accuracy=0.5852, over 5733.00 frames. ], tot_loss[loss=3.516, NarTop10Accuracy=0.6145, over 6056.75 frames. ], batch size: 9, lr: 5.30e-03 +2024-08-06 10:22:00,907 INFO [trainer.py:765] (7/8) Epoch 16, batch 2400, train_loss[loss=3.221, NarTop10Accuracy=0.6759, over 5106.00 frames. ], tot_loss[loss=3.511, NarTop10Accuracy=0.6154, over 5883.65 frames. ], batch size: 7, lr: 5.29e-03 +2024-08-06 10:22:24,289 INFO [trainer.py:765] (7/8) Epoch 16, batch 2500, train_loss[loss=3.615, NarTop10Accuracy=0.602, over 5099.00 frames. ], tot_loss[loss=3.492, NarTop10Accuracy=0.6192, over 5539.31 frames. ], batch size: 6, lr: 5.28e-03 +2024-08-06 10:22:45,438 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 10:23:45,726 INFO [trainer.py:765] (7/8) Epoch 17, batch 100, train_loss[loss=3.386, NarTop10Accuracy=0.6442, over 7391.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6314, over 2360.49 frames. ], batch size: 31, lr: 5.12e-03 +2024-08-06 10:24:19,032 INFO [trainer.py:765] (7/8) Epoch 17, batch 200, train_loss[loss=3.404, NarTop10Accuracy=0.6435, over 6900.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6344, over 3869.10 frames. ], batch size: 17, lr: 5.11e-03 +2024-08-06 10:24:53,440 INFO [trainer.py:765] (7/8) Epoch 17, batch 300, train_loss[loss=3.494, NarTop10Accuracy=0.6216, over 7109.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6331, over 4670.76 frames. ], batch size: 22, lr: 5.10e-03 +2024-08-06 10:25:28,012 INFO [trainer.py:765] (7/8) Epoch 17, batch 400, train_loss[loss=3.475, NarTop10Accuracy=0.6264, over 5194.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6347, over 5117.82 frames. ], batch size: 7, lr: 5.10e-03 +2024-08-06 10:25:58,605 INFO [trainer.py:765] (7/8) Epoch 17, batch 500, train_loss[loss=3.471, NarTop10Accuracy=0.6168, over 6173.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6327, over 5406.56 frames. ], batch size: 11, lr: 5.09e-03 +2024-08-06 10:26:29,755 INFO [trainer.py:765] (7/8) Epoch 17, batch 600, train_loss[loss=3.723, NarTop10Accuracy=0.5724, over 5819.00 frames. ], tot_loss[loss=3.428, NarTop10Accuracy=0.6328, over 5674.85 frames. ], batch size: 9, lr: 5.09e-03 +2024-08-06 10:27:07,498 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 10:27:17,547 INFO [trainer.py:811] (7/8) Epoch 17, validation: loss=3.327, NarTop10Accuracy=0.6554, over 1907754.00 frames. +2024-08-06 10:27:17,548 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 10:27:18,066 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 1.825e+02 1.985e+02 2.150e+02 4.169e+02, threshold=3.970e+02, percent-clipped=0.2 +2024-08-06 10:27:18,071 INFO [trainer.py:765] (7/8) Epoch 17, batch 700, train_loss[loss=3.254, NarTop10Accuracy=0.6661, over 5070.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6296, over 5736.89 frames. ], batch size: 6, lr: 5.08e-03 +2024-08-06 10:27:49,841 INFO [trainer.py:765] (7/8) Epoch 17, batch 800, train_loss[loss=3.185, NarTop10Accuracy=0.6716, over 5126.00 frames. ], tot_loss[loss=3.442, NarTop10Accuracy=0.6293, over 5799.95 frames. ], batch size: 6, lr: 5.07e-03 +2024-08-06 10:28:24,837 INFO [trainer.py:765] (7/8) Epoch 17, batch 900, train_loss[loss=3.252, NarTop10Accuracy=0.6621, over 6162.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6278, over 5828.77 frames. ], batch size: 13, lr: 5.07e-03 +2024-08-06 10:28:59,683 INFO [trainer.py:765] (7/8) Epoch 17, batch 1000, train_loss[loss=3.462, NarTop10Accuracy=0.6215, over 6220.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6287, over 5930.89 frames. ], batch size: 13, lr: 5.06e-03 +2024-08-06 10:29:36,658 INFO [trainer.py:765] (7/8) Epoch 17, batch 1100, train_loss[loss=3.33, NarTop10Accuracy=0.6638, over 6771.00 frames. ], tot_loss[loss=3.459, NarTop10Accuracy=0.6255, over 5941.44 frames. ], batch size: 17, lr: 5.06e-03 +2024-08-06 10:30:08,240 INFO [trainer.py:765] (7/8) Epoch 17, batch 1200, train_loss[loss=3.39, NarTop10Accuracy=0.6346, over 7505.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6251, over 5944.89 frames. ], batch size: 31, lr: 5.05e-03 +2024-08-06 10:30:47,101 INFO [trainer.py:765] (7/8) Epoch 17, batch 1300, train_loss[loss=3.466, NarTop10Accuracy=0.6313, over 5120.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6244, over 6010.78 frames. ], batch size: 6, lr: 5.04e-03 +2024-08-06 10:31:20,892 INFO [trainer.py:765] (7/8) Epoch 17, batch 1400, train_loss[loss=3.44, NarTop10Accuracy=0.6215, over 6129.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.6244, over 6041.60 frames. ], batch size: 11, lr: 5.04e-03 +2024-08-06 10:31:51,400 INFO [trainer.py:765] (7/8) Epoch 17, batch 1500, train_loss[loss=3.493, NarTop10Accuracy=0.6298, over 6294.00 frames. ], tot_loss[loss=3.461, NarTop10Accuracy=0.625, over 5972.68 frames. ], batch size: 49, lr: 5.03e-03 +2024-08-06 10:32:19,399 INFO [trainer.py:765] (7/8) Epoch 17, batch 1600, train_loss[loss=3.464, NarTop10Accuracy=0.6133, over 7095.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6222, over 5939.03 frames. ], batch size: 22, lr: 5.03e-03 +2024-08-06 10:32:50,393 INFO [trainer.py:765] (7/8) Epoch 17, batch 1700, train_loss[loss=3.697, NarTop10Accuracy=0.5771, over 6296.00 frames. ], tot_loss[loss=3.49, NarTop10Accuracy=0.6195, over 5927.11 frames. ], batch size: 13, lr: 5.02e-03 +2024-08-06 10:33:17,035 INFO [trainer.py:765] (7/8) Epoch 17, batch 1800, train_loss[loss=3.722, NarTop10Accuracy=0.5657, over 6985.00 frames. ], tot_loss[loss=3.498, NarTop10Accuracy=0.618, over 5993.14 frames. ], batch size: 22, lr: 5.02e-03 +2024-08-06 10:33:43,596 INFO [trainer.py:765] (7/8) Epoch 17, batch 1900, train_loss[loss=4.003, NarTop10Accuracy=0.516, over 6081.00 frames. ], tot_loss[loss=3.487, NarTop10Accuracy=0.6202, over 6033.72 frames. ], batch size: 49, lr: 5.01e-03 +2024-08-06 10:34:09,287 INFO [trainer.py:765] (7/8) Epoch 17, batch 2000, train_loss[loss=3.898, NarTop10Accuracy=0.5371, over 5433.00 frames. ], tot_loss[loss=3.485, NarTop10Accuracy=0.6209, over 5992.29 frames. ], batch size: 48, lr: 5.00e-03 +2024-08-06 10:34:34,801 INFO [trainer.py:765] (7/8) Epoch 17, batch 2100, train_loss[loss=3.139, NarTop10Accuracy=0.6749, over 3914.00 frames. ], tot_loss[loss=3.495, NarTop10Accuracy=0.6183, over 5982.75 frames. ], batch size: 4, lr: 5.00e-03 +2024-08-06 10:35:00,244 INFO [trainer.py:765] (7/8) Epoch 17, batch 2200, train_loss[loss=3.599, NarTop10Accuracy=0.6042, over 7255.00 frames. ], tot_loss[loss=3.484, NarTop10Accuracy=0.6209, over 6034.38 frames. ], batch size: 30, lr: 4.99e-03 +2024-08-06 10:35:25,732 INFO [trainer.py:765] (7/8) Epoch 17, batch 2300, train_loss[loss=3.472, NarTop10Accuracy=0.6259, over 5773.00 frames. ], tot_loss[loss=3.494, NarTop10Accuracy=0.6194, over 6066.21 frames. ], batch size: 9, lr: 4.99e-03 +2024-08-06 10:35:50,526 INFO [trainer.py:765] (7/8) Epoch 17, batch 2400, train_loss[loss=3.711, NarTop10Accuracy=0.5759, over 5119.00 frames. ], tot_loss[loss=3.503, NarTop10Accuracy=0.6178, over 5875.68 frames. ], batch size: 7, lr: 4.98e-03 +2024-08-06 10:36:14,104 INFO [trainer.py:765] (7/8) Epoch 17, batch 2500, train_loss[loss=3.638, NarTop10Accuracy=0.5895, over 5176.00 frames. ], tot_loss[loss=3.488, NarTop10Accuracy=0.6206, over 5534.64 frames. ], batch size: 6, lr: 4.98e-03 +2024-08-06 10:36:35,062 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 10:37:32,051 INFO [trainer.py:765] (7/8) Epoch 18, batch 100, train_loss[loss=3.305, NarTop10Accuracy=0.6615, over 7588.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.638, over 2369.44 frames. ], batch size: 31, lr: 4.83e-03 +2024-08-06 10:37:39,161 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 10:37:49,085 INFO [trainer.py:811] (7/8) Epoch 18, validation: loss=3.339, NarTop10Accuracy=0.6526, over 1907754.00 frames. +2024-08-06 10:37:49,085 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 10:37:49,684 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 1.841e+02 1.993e+02 2.161e+02 3.871e+02, threshold=3.985e+02, percent-clipped=0.0 +2024-08-06 10:38:18,144 INFO [trainer.py:765] (7/8) Epoch 18, batch 200, train_loss[loss=3.382, NarTop10Accuracy=0.6413, over 6858.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6356, over 3878.20 frames. ], batch size: 17, lr: 4.82e-03 +2024-08-06 10:38:50,198 INFO [trainer.py:765] (7/8) Epoch 18, batch 300, train_loss[loss=3.519, NarTop10Accuracy=0.6158, over 7061.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6358, over 4674.66 frames. ], batch size: 22, lr: 4.81e-03 +2024-08-06 10:39:23,743 INFO [trainer.py:765] (7/8) Epoch 18, batch 400, train_loss[loss=3.002, NarTop10Accuracy=0.7104, over 5059.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6354, over 5119.23 frames. ], batch size: 7, lr: 4.81e-03 +2024-08-06 10:39:54,103 INFO [trainer.py:765] (7/8) Epoch 18, batch 500, train_loss[loss=3.408, NarTop10Accuracy=0.6422, over 6211.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.636, over 5415.18 frames. ], batch size: 11, lr: 4.80e-03 +2024-08-06 10:40:28,526 INFO [trainer.py:765] (7/8) Epoch 18, batch 600, train_loss[loss=3.506, NarTop10Accuracy=0.6208, over 5790.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6371, over 5684.30 frames. ], batch size: 9, lr: 4.80e-03 +2024-08-06 10:41:02,143 INFO [trainer.py:765] (7/8) Epoch 18, batch 700, train_loss[loss=3.33, NarTop10Accuracy=0.6601, over 5057.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6335, over 5749.56 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:41:38,519 INFO [trainer.py:765] (7/8) Epoch 18, batch 800, train_loss[loss=3.475, NarTop10Accuracy=0.6272, over 4945.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6325, over 5810.61 frames. ], batch size: 6, lr: 4.79e-03 +2024-08-06 10:42:12,611 INFO [trainer.py:765] (7/8) Epoch 18, batch 900, train_loss[loss=3.366, NarTop10Accuracy=0.6485, over 6674.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6303, over 5835.47 frames. ], batch size: 14, lr: 4.78e-03 +2024-08-06 10:42:46,703 INFO [trainer.py:765] (7/8) Epoch 18, batch 1000, train_loss[loss=3.365, NarTop10Accuracy=0.6497, over 6236.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.63, over 5923.35 frames. ], batch size: 13, lr: 4.78e-03 +2024-08-06 10:43:24,182 INFO [trainer.py:765] (7/8) Epoch 18, batch 1100, train_loss[loss=3.561, NarTop10Accuracy=0.596, over 7098.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6257, over 5958.45 frames. ], batch size: 17, lr: 4.77e-03 +2024-08-06 10:44:02,363 INFO [trainer.py:765] (7/8) Epoch 18, batch 1200, train_loss[loss=3.537, NarTop10Accuracy=0.6111, over 7625.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6263, over 5956.81 frames. ], batch size: 32, lr: 4.77e-03 +2024-08-06 10:44:35,919 INFO [trainer.py:765] (7/8) Epoch 18, batch 1300, train_loss[loss=3.207, NarTop10Accuracy=0.6711, over 4941.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6282, over 6027.93 frames. ], batch size: 6, lr: 4.76e-03 +2024-08-06 10:45:10,238 INFO [trainer.py:765] (7/8) Epoch 18, batch 1400, train_loss[loss=3.364, NarTop10Accuracy=0.6471, over 6186.00 frames. ], tot_loss[loss=3.451, NarTop10Accuracy=0.627, over 6032.59 frames. ], batch size: 11, lr: 4.76e-03 +2024-08-06 10:45:40,975 INFO [trainer.py:765] (7/8) Epoch 18, batch 1500, train_loss[loss=3.818, NarTop10Accuracy=0.5506, over 6732.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.627, over 5983.19 frames. ], batch size: 49, lr: 4.75e-03 +2024-08-06 10:46:09,055 INFO [trainer.py:765] (7/8) Epoch 18, batch 1600, train_loss[loss=3.325, NarTop10Accuracy=0.6565, over 7217.00 frames. ], tot_loss[loss=3.463, NarTop10Accuracy=0.6242, over 5960.73 frames. ], batch size: 22, lr: 4.75e-03 +2024-08-06 10:46:35,858 INFO [trainer.py:765] (7/8) Epoch 18, batch 1700, train_loss[loss=3.805, NarTop10Accuracy=0.5542, over 6357.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.6257, over 5945.18 frames. ], batch size: 13, lr: 4.74e-03 +2024-08-06 10:47:02,438 INFO [trainer.py:765] (7/8) Epoch 18, batch 1800, train_loss[loss=3.476, NarTop10Accuracy=0.6232, over 7211.00 frames. ], tot_loss[loss=3.462, NarTop10Accuracy=0.625, over 5999.66 frames. ], batch size: 22, lr: 4.74e-03 +2024-08-06 10:47:29,093 INFO [trainer.py:765] (7/8) Epoch 18, batch 1900, train_loss[loss=3.705, NarTop10Accuracy=0.577, over 5397.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6223, over 6032.68 frames. ], batch size: 48, lr: 4.73e-03 +2024-08-06 10:47:54,884 INFO [trainer.py:765] (7/8) Epoch 18, batch 2000, train_loss[loss=3.565, NarTop10Accuracy=0.6066, over 6069.00 frames. ], tot_loss[loss=3.477, NarTop10Accuracy=0.6222, over 5997.29 frames. ], batch size: 50, lr: 4.73e-03 +2024-08-06 10:48:20,370 INFO [trainer.py:765] (7/8) Epoch 18, batch 2100, train_loss[loss=3.388, NarTop10Accuracy=0.6428, over 4888.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6226, over 5990.89 frames. ], batch size: 5, lr: 4.72e-03 +2024-08-06 10:48:24,747 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 10:48:35,039 INFO [trainer.py:811] (7/8) Epoch 18, validation: loss=3.307, NarTop10Accuracy=0.6593, over 1907754.00 frames. +2024-08-06 10:48:35,040 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 10:48:35,535 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 1.855e+02 2.003e+02 2.193e+02 3.481e+02, threshold=4.005e+02, percent-clipped=0.0 +2024-08-06 10:48:56,096 INFO [trainer.py:765] (7/8) Epoch 18, batch 2200, train_loss[loss=3.426, NarTop10Accuracy=0.6318, over 7428.00 frames. ], tot_loss[loss=3.469, NarTop10Accuracy=0.6241, over 6019.93 frames. ], batch size: 31, lr: 4.72e-03 +2024-08-06 10:49:21,521 INFO [trainer.py:765] (7/8) Epoch 18, batch 2300, train_loss[loss=3.231, NarTop10Accuracy=0.6661, over 5718.00 frames. ], tot_loss[loss=3.476, NarTop10Accuracy=0.6231, over 6049.59 frames. ], batch size: 9, lr: 4.71e-03 +2024-08-06 10:49:46,256 INFO [trainer.py:765] (7/8) Epoch 18, batch 2400, train_loss[loss=3.455, NarTop10Accuracy=0.6312, over 5195.00 frames. ], tot_loss[loss=3.479, NarTop10Accuracy=0.6219, over 5862.63 frames. ], batch size: 7, lr: 4.71e-03 +2024-08-06 10:50:09,708 INFO [trainer.py:765] (7/8) Epoch 18, batch 2500, train_loss[loss=3.312, NarTop10Accuracy=0.6486, over 4975.00 frames. ], tot_loss[loss=3.47, NarTop10Accuracy=0.6236, over 5529.94 frames. ], batch size: 6, lr: 4.70e-03 +2024-08-06 10:50:31,071 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 10:51:33,564 INFO [trainer.py:765] (7/8) Epoch 19, batch 100, train_loss[loss=3.192, NarTop10Accuracy=0.6702, over 7295.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6372, over 2372.09 frames. ], batch size: 31, lr: 4.57e-03 +2024-08-06 10:52:06,164 INFO [trainer.py:765] (7/8) Epoch 19, batch 200, train_loss[loss=3.468, NarTop10Accuracy=0.6147, over 6953.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6376, over 3876.14 frames. ], batch size: 17, lr: 4.56e-03 +2024-08-06 10:52:40,031 INFO [trainer.py:765] (7/8) Epoch 19, batch 300, train_loss[loss=3.413, NarTop10Accuracy=0.6292, over 7208.00 frames. ], tot_loss[loss=3.408, NarTop10Accuracy=0.6372, over 4697.07 frames. ], batch size: 22, lr: 4.56e-03 +2024-08-06 10:53:12,829 INFO [trainer.py:765] (7/8) Epoch 19, batch 400, train_loss[loss=3.416, NarTop10Accuracy=0.6342, over 5712.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6352, over 5156.42 frames. ], batch size: 8, lr: 4.55e-03 +2024-08-06 10:53:45,020 INFO [trainer.py:765] (7/8) Epoch 19, batch 500, train_loss[loss=3.221, NarTop10Accuracy=0.659, over 6070.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6354, over 5430.38 frames. ], batch size: 11, lr: 4.55e-03 +2024-08-06 10:54:18,600 INFO [trainer.py:765] (7/8) Epoch 19, batch 600, train_loss[loss=3.423, NarTop10Accuracy=0.6341, over 5853.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6342, over 5684.18 frames. ], batch size: 9, lr: 4.54e-03 +2024-08-06 10:54:54,112 INFO [trainer.py:765] (7/8) Epoch 19, batch 700, train_loss[loss=3.416, NarTop10Accuracy=0.6228, over 5035.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.634, over 5733.57 frames. ], batch size: 6, lr: 4.54e-03 +2024-08-06 10:55:29,925 INFO [trainer.py:765] (7/8) Epoch 19, batch 800, train_loss[loss=3.171, NarTop10Accuracy=0.6877, over 5004.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6343, over 5789.33 frames. ], batch size: 6, lr: 4.53e-03 +2024-08-06 10:56:02,238 INFO [trainer.py:765] (7/8) Epoch 19, batch 900, train_loss[loss=3.537, NarTop10Accuracy=0.6195, over 6715.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6331, over 5829.15 frames. ], batch size: 14, lr: 4.53e-03 +2024-08-06 10:56:38,299 INFO [trainer.py:765] (7/8) Epoch 19, batch 1000, train_loss[loss=3.256, NarTop10Accuracy=0.6748, over 6314.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6313, over 5922.08 frames. ], batch size: 13, lr: 4.52e-03 +2024-08-06 10:57:15,187 INFO [trainer.py:765] (7/8) Epoch 19, batch 1100, train_loss[loss=3.28, NarTop10Accuracy=0.6513, over 6927.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6286, over 5958.49 frames. ], batch size: 17, lr: 4.52e-03 +2024-08-06 10:57:46,665 INFO [trainer.py:765] (7/8) Epoch 19, batch 1200, train_loss[loss=3.291, NarTop10Accuracy=0.6577, over 7158.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6282, over 5963.77 frames. ], batch size: 31, lr: 4.51e-03 +2024-08-06 10:58:23,900 INFO [trainer.py:765] (7/8) Epoch 19, batch 1300, train_loss[loss=3.406, NarTop10Accuracy=0.6365, over 4918.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6295, over 6037.72 frames. ], batch size: 6, lr: 4.51e-03 +2024-08-06 10:58:58,028 INFO [trainer.py:765] (7/8) Epoch 19, batch 1400, train_loss[loss=3.144, NarTop10Accuracy=0.6912, over 6205.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6295, over 6065.63 frames. ], batch size: 11, lr: 4.50e-03 +2024-08-06 10:59:30,770 INFO [trainer.py:765] (7/8) Epoch 19, batch 1500, train_loss[loss=3.726, NarTop10Accuracy=0.5744, over 5748.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6296, over 5993.85 frames. ], batch size: 48, lr: 4.50e-03 +2024-08-06 10:59:40,831 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 10:59:50,899 INFO [trainer.py:811] (7/8) Epoch 19, validation: loss=3.276, NarTop10Accuracy=0.6653, over 1907754.00 frames. +2024-08-06 10:59:50,900 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 10:59:51,426 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 1.829e+02 1.984e+02 2.176e+02 3.542e+02, threshold=3.967e+02, percent-clipped=0.0 +2024-08-06 11:00:08,816 INFO [trainer.py:765] (7/8) Epoch 19, batch 1600, train_loss[loss=3.704, NarTop10Accuracy=0.5729, over 7045.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6294, over 5966.66 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:00:35,588 INFO [trainer.py:765] (7/8) Epoch 19, batch 1700, train_loss[loss=3.392, NarTop10Accuracy=0.6355, over 6183.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6283, over 5951.37 frames. ], batch size: 13, lr: 4.49e-03 +2024-08-06 11:01:02,257 INFO [trainer.py:765] (7/8) Epoch 19, batch 1800, train_loss[loss=3.213, NarTop10Accuracy=0.6678, over 7020.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6284, over 6008.68 frames. ], batch size: 22, lr: 4.49e-03 +2024-08-06 11:01:28,931 INFO [trainer.py:765] (7/8) Epoch 19, batch 1900, train_loss[loss=3.5, NarTop10Accuracy=0.6168, over 5989.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6262, over 6031.33 frames. ], batch size: 51, lr: 4.48e-03 +2024-08-06 11:01:54,633 INFO [trainer.py:765] (7/8) Epoch 19, batch 2000, train_loss[loss=3.515, NarTop10Accuracy=0.6179, over 6246.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6277, over 6017.96 frames. ], batch size: 48, lr: 4.48e-03 +2024-08-06 11:02:20,186 INFO [trainer.py:765] (7/8) Epoch 19, batch 2100, train_loss[loss=3.287, NarTop10Accuracy=0.6592, over 4899.00 frames. ], tot_loss[loss=3.454, NarTop10Accuracy=0.6269, over 5995.68 frames. ], batch size: 5, lr: 4.47e-03 +2024-08-06 11:02:45,695 INFO [trainer.py:765] (7/8) Epoch 19, batch 2200, train_loss[loss=3.444, NarTop10Accuracy=0.6327, over 7282.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.627, over 6047.29 frames. ], batch size: 30, lr: 4.47e-03 +2024-08-06 11:03:11,131 INFO [trainer.py:765] (7/8) Epoch 19, batch 2300, train_loss[loss=3.211, NarTop10Accuracy=0.675, over 5754.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.626, over 6069.71 frames. ], batch size: 9, lr: 4.46e-03 +2024-08-06 11:03:35,950 INFO [trainer.py:765] (7/8) Epoch 19, batch 2400, train_loss[loss=3.403, NarTop10Accuracy=0.638, over 5118.00 frames. ], tot_loss[loss=3.475, NarTop10Accuracy=0.6228, over 5892.05 frames. ], batch size: 7, lr: 4.46e-03 +2024-08-06 11:03:59,406 INFO [trainer.py:765] (7/8) Epoch 19, batch 2500, train_loss[loss=3.681, NarTop10Accuracy=0.576, over 5133.00 frames. ], tot_loss[loss=3.432, NarTop10Accuracy=0.631, over 5561.22 frames. ], batch size: 6, lr: 4.45e-03 +2024-08-06 11:04:23,580 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 11:05:26,560 INFO [trainer.py:765] (7/8) Epoch 20, batch 100, train_loss[loss=3.49, NarTop10Accuracy=0.6112, over 7454.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6397, over 2369.09 frames. ], batch size: 32, lr: 4.33e-03 +2024-08-06 11:05:57,409 INFO [trainer.py:765] (7/8) Epoch 20, batch 200, train_loss[loss=3.344, NarTop10Accuracy=0.6406, over 6860.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6387, over 3864.10 frames. ], batch size: 17, lr: 4.33e-03 +2024-08-06 11:06:30,633 INFO [trainer.py:765] (7/8) Epoch 20, batch 300, train_loss[loss=3.249, NarTop10Accuracy=0.6736, over 7243.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6388, over 4672.24 frames. ], batch size: 22, lr: 4.32e-03 +2024-08-06 11:07:06,395 INFO [trainer.py:765] (7/8) Epoch 20, batch 400, train_loss[loss=3.463, NarTop10Accuracy=0.6287, over 5109.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6385, over 5116.86 frames. ], batch size: 7, lr: 4.32e-03 +2024-08-06 11:07:38,165 INFO [trainer.py:765] (7/8) Epoch 20, batch 500, train_loss[loss=3.428, NarTop10Accuracy=0.6434, over 6059.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6386, over 5395.44 frames. ], batch size: 11, lr: 4.31e-03 +2024-08-06 11:08:11,567 INFO [trainer.py:765] (7/8) Epoch 20, batch 600, train_loss[loss=3.281, NarTop10Accuracy=0.6695, over 5659.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6397, over 5667.79 frames. ], batch size: 9, lr: 4.31e-03 +2024-08-06 11:08:46,273 INFO [trainer.py:765] (7/8) Epoch 20, batch 700, train_loss[loss=3.296, NarTop10Accuracy=0.6596, over 5069.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6372, over 5733.29 frames. ], batch size: 6, lr: 4.31e-03 +2024-08-06 11:09:23,424 INFO [trainer.py:765] (7/8) Epoch 20, batch 800, train_loss[loss=3.401, NarTop10Accuracy=0.6281, over 5117.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6349, over 5799.64 frames. ], batch size: 6, lr: 4.30e-03 +2024-08-06 11:09:53,512 INFO [trainer.py:765] (7/8) Epoch 20, batch 900, train_loss[loss=3.35, NarTop10Accuracy=0.6516, over 6232.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6337, over 5816.15 frames. ], batch size: 13, lr: 4.30e-03 +2024-08-06 11:10:12,198 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 11:10:23,738 INFO [trainer.py:811] (7/8) Epoch 20, validation: loss=3.279, NarTop10Accuracy=0.6658, over 1907754.00 frames. +2024-08-06 11:10:23,739 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 11:10:24,298 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.847e+02 2.007e+02 2.180e+02 4.417e+02, threshold=4.013e+02, percent-clipped=0.1 +2024-08-06 11:10:42,965 INFO [trainer.py:765] (7/8) Epoch 20, batch 1000, train_loss[loss=3.341, NarTop10Accuracy=0.6492, over 6295.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6344, over 5921.40 frames. ], batch size: 13, lr: 4.29e-03 +2024-08-06 11:11:21,022 INFO [trainer.py:765] (7/8) Epoch 20, batch 1100, train_loss[loss=3.414, NarTop10Accuracy=0.6405, over 6634.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6315, over 5958.03 frames. ], batch size: 17, lr: 4.29e-03 +2024-08-06 11:11:55,394 INFO [trainer.py:765] (7/8) Epoch 20, batch 1200, train_loss[loss=3.391, NarTop10Accuracy=0.6422, over 7305.00 frames. ], tot_loss[loss=3.444, NarTop10Accuracy=0.6289, over 5962.97 frames. ], batch size: 31, lr: 4.28e-03 +2024-08-06 11:12:30,752 INFO [trainer.py:765] (7/8) Epoch 20, batch 1300, train_loss[loss=3.527, NarTop10Accuracy=0.6097, over 5098.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6327, over 6018.81 frames. ], batch size: 6, lr: 4.28e-03 +2024-08-06 11:13:10,292 INFO [trainer.py:765] (7/8) Epoch 20, batch 1400, train_loss[loss=3.641, NarTop10Accuracy=0.5881, over 6079.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6296, over 6047.07 frames. ], batch size: 11, lr: 4.28e-03 +2024-08-06 11:13:38,990 INFO [trainer.py:765] (7/8) Epoch 20, batch 1500, train_loss[loss=3.658, NarTop10Accuracy=0.5926, over 6678.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.6295, over 5969.21 frames. ], batch size: 53, lr: 4.27e-03 +2024-08-06 11:14:07,052 INFO [trainer.py:765] (7/8) Epoch 20, batch 1600, train_loss[loss=3.409, NarTop10Accuracy=0.6194, over 7206.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6293, over 5934.55 frames. ], batch size: 22, lr: 4.27e-03 +2024-08-06 11:14:33,911 INFO [trainer.py:765] (7/8) Epoch 20, batch 1700, train_loss[loss=3.533, NarTop10Accuracy=0.6133, over 6219.00 frames. ], tot_loss[loss=3.434, NarTop10Accuracy=0.6308, over 5922.83 frames. ], batch size: 13, lr: 4.26e-03 +2024-08-06 11:15:00,591 INFO [trainer.py:765] (7/8) Epoch 20, batch 1800, train_loss[loss=3.409, NarTop10Accuracy=0.6455, over 7176.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6288, over 6003.58 frames. ], batch size: 22, lr: 4.26e-03 +2024-08-06 11:15:27,277 INFO [trainer.py:765] (7/8) Epoch 20, batch 1900, train_loss[loss=3.449, NarTop10Accuracy=0.6181, over 5990.00 frames. ], tot_loss[loss=3.464, NarTop10Accuracy=0.6246, over 6035.14 frames. ], batch size: 49, lr: 4.26e-03 +2024-08-06 11:15:56,438 INFO [trainer.py:765] (7/8) Epoch 20, batch 2000, train_loss[loss=3.557, NarTop10Accuracy=0.605, over 6901.00 frames. ], tot_loss[loss=3.453, NarTop10Accuracy=0.6269, over 6008.88 frames. ], batch size: 49, lr: 4.25e-03 +2024-08-06 11:16:21,958 INFO [trainer.py:765] (7/8) Epoch 20, batch 2100, train_loss[loss=3.067, NarTop10Accuracy=0.69, over 3981.00 frames. ], tot_loss[loss=3.457, NarTop10Accuracy=0.626, over 6003.44 frames. ], batch size: 4, lr: 4.25e-03 +2024-08-06 11:16:47,406 INFO [trainer.py:765] (7/8) Epoch 20, batch 2200, train_loss[loss=3.372, NarTop10Accuracy=0.6409, over 7439.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6277, over 6042.78 frames. ], batch size: 30, lr: 4.24e-03 +2024-08-06 11:17:12,909 INFO [trainer.py:765] (7/8) Epoch 20, batch 2300, train_loss[loss=3.603, NarTop10Accuracy=0.5976, over 5773.00 frames. ], tot_loss[loss=3.458, NarTop10Accuracy=0.6259, over 6076.39 frames. ], batch size: 9, lr: 4.24e-03 +2024-08-06 11:17:37,715 INFO [trainer.py:765] (7/8) Epoch 20, batch 2400, train_loss[loss=3.158, NarTop10Accuracy=0.6852, over 5145.00 frames. ], tot_loss[loss=3.46, NarTop10Accuracy=0.6253, over 5874.91 frames. ], batch size: 7, lr: 4.24e-03 +2024-08-06 11:18:01,248 INFO [trainer.py:765] (7/8) Epoch 20, batch 2500, train_loss[loss=3.51, NarTop10Accuracy=0.6033, over 4909.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6312, over 5540.13 frames. ], batch size: 6, lr: 4.23e-03 +2024-08-06 11:18:22,206 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 11:19:21,459 INFO [trainer.py:765] (7/8) Epoch 21, batch 100, train_loss[loss=3.447, NarTop10Accuracy=0.63, over 7135.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6405, over 2378.27 frames. ], batch size: 30, lr: 4.12e-03 +2024-08-06 11:19:56,522 INFO [trainer.py:765] (7/8) Epoch 21, batch 200, train_loss[loss=3.361, NarTop10Accuracy=0.6427, over 6897.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6402, over 3867.32 frames. ], batch size: 17, lr: 4.12e-03 +2024-08-06 11:20:26,597 INFO [trainer.py:765] (7/8) Epoch 21, batch 300, train_loss[loss=3.624, NarTop10Accuracy=0.5887, over 7308.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6428, over 4663.28 frames. ], batch size: 22, lr: 4.11e-03 +2024-08-06 11:20:54,240 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 11:21:04,970 INFO [trainer.py:811] (7/8) Epoch 21, validation: loss=3.291, NarTop10Accuracy=0.6625, over 1907754.00 frames. +2024-08-06 11:21:04,970 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 11:21:05,486 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 1.858e+02 2.007e+02 2.193e+02 3.729e+02, threshold=4.015e+02, percent-clipped=0.0 +2024-08-06 11:21:12,220 INFO [trainer.py:765] (7/8) Epoch 21, batch 400, train_loss[loss=3.647, NarTop10Accuracy=0.5945, over 5134.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6398, over 5106.78 frames. ], batch size: 7, lr: 4.11e-03 +2024-08-06 11:21:47,568 INFO [trainer.py:765] (7/8) Epoch 21, batch 500, train_loss[loss=3.287, NarTop10Accuracy=0.6583, over 6031.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6423, over 5398.87 frames. ], batch size: 11, lr: 4.11e-03 +2024-08-06 11:22:18,237 INFO [trainer.py:765] (7/8) Epoch 21, batch 600, train_loss[loss=3.661, NarTop10Accuracy=0.5872, over 5679.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6407, over 5662.47 frames. ], batch size: 9, lr: 4.10e-03 +2024-08-06 11:22:56,842 INFO [trainer.py:765] (7/8) Epoch 21, batch 700, train_loss[loss=3.312, NarTop10Accuracy=0.652, over 5048.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6368, over 5735.39 frames. ], batch size: 6, lr: 4.10e-03 +2024-08-06 11:23:33,074 INFO [trainer.py:765] (7/8) Epoch 21, batch 800, train_loss[loss=3.205, NarTop10Accuracy=0.6675, over 4950.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6357, over 5810.29 frames. ], batch size: 6, lr: 4.09e-03 +2024-08-06 11:24:03,021 INFO [trainer.py:765] (7/8) Epoch 21, batch 900, train_loss[loss=3.687, NarTop10Accuracy=0.5849, over 6217.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6356, over 5806.43 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 11:24:37,088 INFO [trainer.py:765] (7/8) Epoch 21, batch 1000, train_loss[loss=3.345, NarTop10Accuracy=0.6701, over 6180.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6327, over 5908.78 frames. ], batch size: 13, lr: 4.09e-03 +2024-08-06 11:25:16,427 INFO [trainer.py:765] (7/8) Epoch 21, batch 1100, train_loss[loss=3.51, NarTop10Accuracy=0.6149, over 6833.00 frames. ], tot_loss[loss=3.437, NarTop10Accuracy=0.6307, over 5933.92 frames. ], batch size: 17, lr: 4.08e-03 +2024-08-06 11:25:47,739 INFO [trainer.py:765] (7/8) Epoch 21, batch 1200, train_loss[loss=3.349, NarTop10Accuracy=0.6453, over 7417.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6329, over 5949.92 frames. ], batch size: 31, lr: 4.08e-03 +2024-08-06 11:26:23,056 INFO [trainer.py:765] (7/8) Epoch 21, batch 1300, train_loss[loss=3.494, NarTop10Accuracy=0.624, over 5002.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6341, over 6024.49 frames. ], batch size: 6, lr: 4.07e-03 +2024-08-06 11:27:00,081 INFO [trainer.py:765] (7/8) Epoch 21, batch 1400, train_loss[loss=3.443, NarTop10Accuracy=0.6365, over 6249.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6328, over 6034.08 frames. ], batch size: 11, lr: 4.07e-03 +2024-08-06 11:27:35,326 INFO [trainer.py:765] (7/8) Epoch 21, batch 1500, train_loss[loss=3.72, NarTop10Accuracy=0.5671, over 5909.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.633, over 5980.37 frames. ], batch size: 48, lr: 4.07e-03 +2024-08-06 11:28:03,314 INFO [trainer.py:765] (7/8) Epoch 21, batch 1600, train_loss[loss=3.232, NarTop10Accuracy=0.6691, over 7220.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6323, over 5950.36 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 11:28:30,104 INFO [trainer.py:765] (7/8) Epoch 21, batch 1700, train_loss[loss=3.58, NarTop10Accuracy=0.6061, over 6265.00 frames. ], tot_loss[loss=3.435, NarTop10Accuracy=0.631, over 5934.56 frames. ], batch size: 13, lr: 4.06e-03 +2024-08-06 11:28:56,640 INFO [trainer.py:765] (7/8) Epoch 21, batch 1800, train_loss[loss=3.595, NarTop10Accuracy=0.6059, over 7195.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.6289, over 5991.94 frames. ], batch size: 22, lr: 4.06e-03 +2024-08-06 11:29:23,197 INFO [trainer.py:765] (7/8) Epoch 21, batch 1900, train_loss[loss=3.602, NarTop10Accuracy=0.5927, over 5724.00 frames. ], tot_loss[loss=3.443, NarTop10Accuracy=0.6292, over 6023.90 frames. ], batch size: 48, lr: 4.05e-03 +2024-08-06 11:29:49,027 INFO [trainer.py:765] (7/8) Epoch 21, batch 2000, train_loss[loss=3.506, NarTop10Accuracy=0.6165, over 6452.00 frames. ], tot_loss[loss=3.438, NarTop10Accuracy=0.6298, over 6005.23 frames. ], batch size: 50, lr: 4.05e-03 +2024-08-06 11:30:14,528 INFO [trainer.py:765] (7/8) Epoch 21, batch 2100, train_loss[loss=3.78, NarTop10Accuracy=0.5679, over 3941.00 frames. ], tot_loss[loss=3.445, NarTop10Accuracy=0.629, over 5994.55 frames. ], batch size: 4, lr: 4.04e-03 +2024-08-06 11:30:39,870 INFO [trainer.py:765] (7/8) Epoch 21, batch 2200, train_loss[loss=3.734, NarTop10Accuracy=0.5751, over 7259.00 frames. ], tot_loss[loss=3.449, NarTop10Accuracy=0.6276, over 6025.41 frames. ], batch size: 30, lr: 4.04e-03 +2024-08-06 11:31:05,471 INFO [trainer.py:765] (7/8) Epoch 21, batch 2300, train_loss[loss=3.309, NarTop10Accuracy=0.6553, over 5822.00 frames. ], tot_loss[loss=3.452, NarTop10Accuracy=0.6275, over 6059.66 frames. ], batch size: 9, lr: 4.04e-03 +2024-08-06 11:31:23,873 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 11:31:34,439 INFO [trainer.py:811] (7/8) Epoch 21, validation: loss=3.272, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 11:31:34,439 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 11:31:34,937 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 1.892e+02 2.038e+02 2.210e+02 4.910e+02, threshold=4.076e+02, percent-clipped=0.1 +2024-08-06 11:31:40,753 INFO [trainer.py:765] (7/8) Epoch 21, batch 2400, train_loss[loss=3.161, NarTop10Accuracy=0.684, over 5303.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6268, over 5881.77 frames. ], batch size: 7, lr: 4.03e-03 +2024-08-06 11:32:04,057 INFO [trainer.py:765] (7/8) Epoch 21, batch 2500, train_loss[loss=3.142, NarTop10Accuracy=0.6936, over 5005.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6307, over 5525.46 frames. ], batch size: 6, lr: 4.03e-03 +2024-08-06 11:32:25,609 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 11:33:29,683 INFO [trainer.py:765] (7/8) Epoch 22, batch 100, train_loss[loss=3.605, NarTop10Accuracy=0.6012, over 7073.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6445, over 2367.34 frames. ], batch size: 31, lr: 3.93e-03 +2024-08-06 11:34:05,037 INFO [trainer.py:765] (7/8) Epoch 22, batch 200, train_loss[loss=3.313, NarTop10Accuracy=0.6498, over 6897.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6458, over 3862.27 frames. ], batch size: 17, lr: 3.93e-03 +2024-08-06 11:34:37,619 INFO [trainer.py:765] (7/8) Epoch 22, batch 300, train_loss[loss=3.371, NarTop10Accuracy=0.6501, over 7189.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.645, over 4666.50 frames. ], batch size: 22, lr: 3.92e-03 +2024-08-06 11:35:09,969 INFO [trainer.py:765] (7/8) Epoch 22, batch 400, train_loss[loss=3.296, NarTop10Accuracy=0.6623, over 5016.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6426, over 5124.13 frames. ], batch size: 7, lr: 3.92e-03 +2024-08-06 11:35:42,508 INFO [trainer.py:765] (7/8) Epoch 22, batch 500, train_loss[loss=3.688, NarTop10Accuracy=0.579, over 6287.00 frames. ], tot_loss[loss=3.396, NarTop10Accuracy=0.6398, over 5398.92 frames. ], batch size: 11, lr: 3.91e-03 +2024-08-06 11:36:16,059 INFO [trainer.py:765] (7/8) Epoch 22, batch 600, train_loss[loss=3.237, NarTop10Accuracy=0.6783, over 5665.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6386, over 5674.97 frames. ], batch size: 9, lr: 3.91e-03 +2024-08-06 11:36:53,858 INFO [trainer.py:765] (7/8) Epoch 22, batch 700, train_loss[loss=3.262, NarTop10Accuracy=0.6692, over 5092.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6384, over 5741.39 frames. ], batch size: 6, lr: 3.91e-03 +2024-08-06 11:37:28,480 INFO [trainer.py:765] (7/8) Epoch 22, batch 800, train_loss[loss=3.218, NarTop10Accuracy=0.689, over 5029.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6372, over 5803.33 frames. ], batch size: 6, lr: 3.90e-03 +2024-08-06 11:38:03,950 INFO [trainer.py:765] (7/8) Epoch 22, batch 900, train_loss[loss=3.413, NarTop10Accuracy=0.6327, over 6261.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6378, over 5813.44 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 11:38:38,329 INFO [trainer.py:765] (7/8) Epoch 22, batch 1000, train_loss[loss=3.271, NarTop10Accuracy=0.664, over 6356.00 frames. ], tot_loss[loss=3.41, NarTop10Accuracy=0.6361, over 5907.28 frames. ], batch size: 13, lr: 3.90e-03 +2024-08-06 11:39:14,789 INFO [trainer.py:765] (7/8) Epoch 22, batch 1100, train_loss[loss=3.429, NarTop10Accuracy=0.6326, over 6936.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6362, over 5947.01 frames. ], batch size: 17, lr: 3.89e-03 +2024-08-06 11:39:48,523 INFO [trainer.py:765] (7/8) Epoch 22, batch 1200, train_loss[loss=3.361, NarTop10Accuracy=0.6469, over 7325.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6365, over 5940.35 frames. ], batch size: 31, lr: 3.89e-03 +2024-08-06 11:40:25,246 INFO [trainer.py:765] (7/8) Epoch 22, batch 1300, train_loss[loss=3.212, NarTop10Accuracy=0.6685, over 5190.00 frames. ], tot_loss[loss=3.409, NarTop10Accuracy=0.6358, over 6020.27 frames. ], batch size: 6, lr: 3.89e-03 +2024-08-06 11:41:00,610 INFO [trainer.py:765] (7/8) Epoch 22, batch 1400, train_loss[loss=3.504, NarTop10Accuracy=0.6154, over 6123.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6344, over 6036.62 frames. ], batch size: 11, lr: 3.88e-03 +2024-08-06 11:41:31,585 INFO [trainer.py:765] (7/8) Epoch 22, batch 1500, train_loss[loss=3.633, NarTop10Accuracy=0.5937, over 6081.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6327, over 5974.64 frames. ], batch size: 49, lr: 3.88e-03 +2024-08-06 11:41:59,677 INFO [trainer.py:765] (7/8) Epoch 22, batch 1600, train_loss[loss=3.21, NarTop10Accuracy=0.6768, over 7044.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6319, over 5962.74 frames. ], batch size: 22, lr: 3.88e-03 +2024-08-06 11:42:26,464 INFO [trainer.py:765] (7/8) Epoch 22, batch 1700, train_loss[loss=3.307, NarTop10Accuracy=0.6622, over 6357.00 frames. ], tot_loss[loss=3.43, NarTop10Accuracy=0.6314, over 5954.01 frames. ], batch size: 13, lr: 3.87e-03 +2024-08-06 11:42:50,723 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 11:43:00,818 INFO [trainer.py:811] (7/8) Epoch 22, validation: loss=3.305, NarTop10Accuracy=0.6597, over 1907754.00 frames. +2024-08-06 11:43:00,819 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 11:43:01,327 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.500e+02 1.900e+02 2.042e+02 2.234e+02 3.494e+02, threshold=4.085e+02, percent-clipped=0.0 +2024-08-06 11:43:03,218 INFO [trainer.py:765] (7/8) Epoch 22, batch 1800, train_loss[loss=3.339, NarTop10Accuracy=0.6531, over 7198.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.631, over 6023.40 frames. ], batch size: 22, lr: 3.87e-03 +2024-08-06 11:43:29,751 INFO [trainer.py:765] (7/8) Epoch 22, batch 1900, train_loss[loss=3.721, NarTop10Accuracy=0.5688, over 6232.00 frames. ], tot_loss[loss=3.44, NarTop10Accuracy=0.6295, over 6036.35 frames. ], batch size: 50, lr: 3.87e-03 +2024-08-06 11:43:55,485 INFO [trainer.py:765] (7/8) Epoch 22, batch 2000, train_loss[loss=3.877, NarTop10Accuracy=0.5465, over 5904.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6314, over 6017.99 frames. ], batch size: 48, lr: 3.86e-03 +2024-08-06 11:44:20,932 INFO [trainer.py:765] (7/8) Epoch 22, batch 2100, train_loss[loss=3.322, NarTop10Accuracy=0.6496, over 4912.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.633, over 5995.29 frames. ], batch size: 5, lr: 3.86e-03 +2024-08-06 11:44:46,456 INFO [trainer.py:765] (7/8) Epoch 22, batch 2200, train_loss[loss=3.633, NarTop10Accuracy=0.5874, over 7240.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.6323, over 6026.57 frames. ], batch size: 30, lr: 3.86e-03 +2024-08-06 11:45:11,882 INFO [trainer.py:765] (7/8) Epoch 22, batch 2300, train_loss[loss=3.191, NarTop10Accuracy=0.6641, over 5594.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6303, over 6060.79 frames. ], batch size: 9, lr: 3.85e-03 +2024-08-06 11:45:36,583 INFO [trainer.py:765] (7/8) Epoch 22, batch 2400, train_loss[loss=3.507, NarTop10Accuracy=0.6278, over 5143.00 frames. ], tot_loss[loss=3.455, NarTop10Accuracy=0.6265, over 5864.95 frames. ], batch size: 7, lr: 3.85e-03 +2024-08-06 11:46:00,081 INFO [trainer.py:765] (7/8) Epoch 22, batch 2500, train_loss[loss=3.182, NarTop10Accuracy=0.6817, over 5045.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6318, over 5524.89 frames. ], batch size: 6, lr: 3.85e-03 +2024-08-06 11:46:21,520 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 11:47:20,476 INFO [trainer.py:765] (7/8) Epoch 23, batch 100, train_loss[loss=3.232, NarTop10Accuracy=0.6629, over 7158.00 frames. ], tot_loss[loss=3.376, NarTop10Accuracy=0.6444, over 2372.03 frames. ], batch size: 30, lr: 3.75e-03 +2024-08-06 11:47:52,035 INFO [trainer.py:765] (7/8) Epoch 23, batch 200, train_loss[loss=3.46, NarTop10Accuracy=0.636, over 6806.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.649, over 3863.90 frames. ], batch size: 17, lr: 3.75e-03 +2024-08-06 11:48:33,921 INFO [trainer.py:765] (7/8) Epoch 23, batch 300, train_loss[loss=3.25, NarTop10Accuracy=0.6636, over 6975.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.646, over 4691.49 frames. ], batch size: 22, lr: 3.75e-03 +2024-08-06 11:49:06,656 INFO [trainer.py:765] (7/8) Epoch 23, batch 400, train_loss[loss=3.233, NarTop10Accuracy=0.6574, over 5127.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6431, over 5136.19 frames. ], batch size: 7, lr: 3.74e-03 +2024-08-06 11:49:37,618 INFO [trainer.py:765] (7/8) Epoch 23, batch 500, train_loss[loss=3.38, NarTop10Accuracy=0.6383, over 6070.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6453, over 5410.92 frames. ], batch size: 11, lr: 3.74e-03 +2024-08-06 11:50:06,740 INFO [trainer.py:765] (7/8) Epoch 23, batch 600, train_loss[loss=3.662, NarTop10Accuracy=0.5827, over 5854.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.6441, over 5682.89 frames. ], batch size: 9, lr: 3.74e-03 +2024-08-06 11:50:47,600 INFO [trainer.py:765] (7/8) Epoch 23, batch 700, train_loss[loss=3.212, NarTop10Accuracy=0.6764, over 4314.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.641, over 5744.52 frames. ], batch size: 5, lr: 3.73e-03 +2024-08-06 11:51:21,344 INFO [trainer.py:765] (7/8) Epoch 23, batch 800, train_loss[loss=3.046, NarTop10Accuracy=0.6999, over 5149.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6407, over 5802.05 frames. ], batch size: 6, lr: 3.73e-03 +2024-08-06 11:51:52,396 INFO [trainer.py:765] (7/8) Epoch 23, batch 900, train_loss[loss=3.177, NarTop10Accuracy=0.688, over 6661.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6412, over 5821.83 frames. ], batch size: 14, lr: 3.73e-03 +2024-08-06 11:52:33,918 INFO [trainer.py:765] (7/8) Epoch 23, batch 1000, train_loss[loss=3.158, NarTop10Accuracy=0.6866, over 6780.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6411, over 5932.64 frames. ], batch size: 14, lr: 3.73e-03 +2024-08-06 11:53:08,608 INFO [trainer.py:765] (7/8) Epoch 23, batch 1100, train_loss[loss=3.454, NarTop10Accuracy=0.6332, over 6769.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6376, over 5978.02 frames. ], batch size: 17, lr: 3.72e-03 +2024-08-06 11:53:40,339 INFO [trainer.py:765] (7/8) Epoch 23, batch 1200, train_loss[loss=3.572, NarTop10Accuracy=0.6108, over 7374.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6361, over 5959.58 frames. ], batch size: 31, lr: 3.72e-03 +2024-08-06 11:53:42,824 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 11:53:53,935 INFO [trainer.py:811] (7/8) Epoch 23, validation: loss=3.236, NarTop10Accuracy=0.6739, over 1907754.00 frames. +2024-08-06 11:53:53,935 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 11:53:54,457 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.901e+02 2.047e+02 2.234e+02 4.368e+02, threshold=4.093e+02, percent-clipped=0.1 +2024-08-06 11:54:30,447 INFO [trainer.py:765] (7/8) Epoch 23, batch 1300, train_loss[loss=3.205, NarTop10Accuracy=0.6724, over 5080.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6354, over 6018.97 frames. ], batch size: 6, lr: 3.72e-03 +2024-08-06 11:55:04,197 INFO [trainer.py:765] (7/8) Epoch 23, batch 1400, train_loss[loss=3.465, NarTop10Accuracy=0.6339, over 6204.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6357, over 6038.52 frames. ], batch size: 11, lr: 3.71e-03 +2024-08-06 11:55:35,398 INFO [trainer.py:765] (7/8) Epoch 23, batch 1500, train_loss[loss=3.51, NarTop10Accuracy=0.6192, over 6299.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6336, over 5974.06 frames. ], batch size: 49, lr: 3.71e-03 +2024-08-06 11:56:03,427 INFO [trainer.py:765] (7/8) Epoch 23, batch 1600, train_loss[loss=3.305, NarTop10Accuracy=0.6564, over 7281.00 frames. ], tot_loss[loss=3.416, NarTop10Accuracy=0.6342, over 5962.33 frames. ], batch size: 22, lr: 3.71e-03 +2024-08-06 11:56:30,201 INFO [trainer.py:765] (7/8) Epoch 23, batch 1700, train_loss[loss=3.565, NarTop10Accuracy=0.5939, over 6347.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6309, over 5943.00 frames. ], batch size: 13, lr: 3.70e-03 +2024-08-06 11:56:56,968 INFO [trainer.py:765] (7/8) Epoch 23, batch 1800, train_loss[loss=3.305, NarTop10Accuracy=0.6582, over 7153.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6349, over 5989.58 frames. ], batch size: 22, lr: 3.70e-03 +2024-08-06 11:57:23,596 INFO [trainer.py:765] (7/8) Epoch 23, batch 1900, train_loss[loss=3.484, NarTop10Accuracy=0.6283, over 6012.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6328, over 6035.97 frames. ], batch size: 49, lr: 3.70e-03 +2024-08-06 11:57:49,251 INFO [trainer.py:765] (7/8) Epoch 23, batch 2000, train_loss[loss=3.618, NarTop10Accuracy=0.594, over 6022.00 frames. ], tot_loss[loss=3.429, NarTop10Accuracy=0.6321, over 6014.49 frames. ], batch size: 50, lr: 3.69e-03 +2024-08-06 11:58:14,769 INFO [trainer.py:765] (7/8) Epoch 23, batch 2100, train_loss[loss=3.41, NarTop10Accuracy=0.6384, over 3986.00 frames. ], tot_loss[loss=3.433, NarTop10Accuracy=0.6309, over 5994.48 frames. ], batch size: 4, lr: 3.69e-03 +2024-08-06 11:58:40,237 INFO [trainer.py:765] (7/8) Epoch 23, batch 2200, train_loss[loss=3.586, NarTop10Accuracy=0.6087, over 7282.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6319, over 6024.91 frames. ], batch size: 30, lr: 3.69e-03 +2024-08-06 11:59:08,915 INFO [trainer.py:765] (7/8) Epoch 23, batch 2300, train_loss[loss=3.075, NarTop10Accuracy=0.7, over 5722.00 frames. ], tot_loss[loss=3.446, NarTop10Accuracy=0.6287, over 6055.76 frames. ], batch size: 9, lr: 3.68e-03 +2024-08-06 11:59:33,601 INFO [trainer.py:765] (7/8) Epoch 23, batch 2400, train_loss[loss=3.097, NarTop10Accuracy=0.6956, over 5100.00 frames. ], tot_loss[loss=3.456, NarTop10Accuracy=0.6263, over 5885.00 frames. ], batch size: 7, lr: 3.68e-03 +2024-08-06 11:59:57,010 INFO [trainer.py:765] (7/8) Epoch 23, batch 2500, train_loss[loss=3.37, NarTop10Accuracy=0.6466, over 5082.00 frames. ], tot_loss[loss=3.424, NarTop10Accuracy=0.6323, over 5538.09 frames. ], batch size: 6, lr: 3.68e-03 +2024-08-06 12:00:18,140 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 12:01:22,110 INFO [trainer.py:765] (7/8) Epoch 24, batch 100, train_loss[loss=3.677, NarTop10Accuracy=0.5868, over 7336.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6429, over 2352.28 frames. ], batch size: 31, lr: 3.59e-03 +2024-08-06 12:01:51,340 INFO [trainer.py:765] (7/8) Epoch 24, batch 200, train_loss[loss=3.603, NarTop10Accuracy=0.5981, over 6916.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6475, over 3853.17 frames. ], batch size: 17, lr: 3.59e-03 +2024-08-06 12:02:23,511 INFO [trainer.py:765] (7/8) Epoch 24, batch 300, train_loss[loss=3.278, NarTop10Accuracy=0.6627, over 7144.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6484, over 4668.93 frames. ], batch size: 22, lr: 3.59e-03 +2024-08-06 12:03:02,846 INFO [trainer.py:765] (7/8) Epoch 24, batch 400, train_loss[loss=3.274, NarTop10Accuracy=0.6797, over 5053.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6481, over 5128.12 frames. ], batch size: 7, lr: 3.59e-03 +2024-08-06 12:03:31,255 INFO [trainer.py:765] (7/8) Epoch 24, batch 500, train_loss[loss=3.121, NarTop10Accuracy=0.6934, over 6014.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6466, over 5416.74 frames. ], batch size: 11, lr: 3.58e-03 +2024-08-06 12:04:00,171 INFO [trainer.py:765] (7/8) Epoch 24, batch 600, train_loss[loss=3.418, NarTop10Accuracy=0.6297, over 5828.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6464, over 5684.54 frames. ], batch size: 9, lr: 3.58e-03 +2024-08-06 12:04:12,529 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 12:04:22,775 INFO [trainer.py:811] (7/8) Epoch 24, validation: loss=3.282, NarTop10Accuracy=0.6644, over 1907754.00 frames. +2024-08-06 12:04:22,776 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 12:04:23,311 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 1.905e+02 2.071e+02 2.258e+02 3.709e+02, threshold=4.142e+02, percent-clipped=0.0 +2024-08-06 12:04:51,733 INFO [trainer.py:765] (7/8) Epoch 24, batch 700, train_loss[loss=3.367, NarTop10Accuracy=0.6625, over 4912.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6454, over 5748.60 frames. ], batch size: 6, lr: 3.58e-03 +2024-08-06 12:05:21,274 INFO [trainer.py:765] (7/8) Epoch 24, batch 800, train_loss[loss=3.538, NarTop10Accuracy=0.5953, over 5039.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6426, over 5810.75 frames. ], batch size: 6, lr: 3.57e-03 +2024-08-06 12:05:51,754 INFO [trainer.py:765] (7/8) Epoch 24, batch 900, train_loss[loss=3.48, NarTop10Accuracy=0.6221, over 6338.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6421, over 5824.75 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 12:06:32,813 INFO [trainer.py:765] (7/8) Epoch 24, batch 1000, train_loss[loss=3.338, NarTop10Accuracy=0.6521, over 6229.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6398, over 5924.41 frames. ], batch size: 13, lr: 3.57e-03 +2024-08-06 12:07:09,040 INFO [trainer.py:765] (7/8) Epoch 24, batch 1100, train_loss[loss=3.403, NarTop10Accuracy=0.6387, over 6947.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6396, over 5961.18 frames. ], batch size: 17, lr: 3.56e-03 +2024-08-06 12:07:38,137 INFO [trainer.py:765] (7/8) Epoch 24, batch 1200, train_loss[loss=3.37, NarTop10Accuracy=0.6471, over 7183.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6373, over 5947.90 frames. ], batch size: 30, lr: 3.56e-03 +2024-08-06 12:08:20,731 INFO [trainer.py:765] (7/8) Epoch 24, batch 1300, train_loss[loss=3.217, NarTop10Accuracy=0.6633, over 5134.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6363, over 6022.07 frames. ], batch size: 6, lr: 3.56e-03 +2024-08-06 12:08:56,065 INFO [trainer.py:765] (7/8) Epoch 24, batch 1400, train_loss[loss=3.539, NarTop10Accuracy=0.6188, over 6009.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6342, over 6055.82 frames. ], batch size: 11, lr: 3.56e-03 +2024-08-06 12:09:24,339 INFO [trainer.py:765] (7/8) Epoch 24, batch 1500, train_loss[loss=3.65, NarTop10Accuracy=0.5906, over 5946.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6341, over 5984.16 frames. ], batch size: 48, lr: 3.55e-03 +2024-08-06 12:09:52,527 INFO [trainer.py:765] (7/8) Epoch 24, batch 1600, train_loss[loss=3.324, NarTop10Accuracy=0.6492, over 7214.00 frames. ], tot_loss[loss=3.412, NarTop10Accuracy=0.6352, over 5953.22 frames. ], batch size: 22, lr: 3.55e-03 +2024-08-06 12:10:22,546 INFO [trainer.py:765] (7/8) Epoch 24, batch 1700, train_loss[loss=3.534, NarTop10Accuracy=0.6225, over 6786.00 frames. ], tot_loss[loss=3.419, NarTop10Accuracy=0.6339, over 5948.54 frames. ], batch size: 14, lr: 3.55e-03 +2024-08-06 12:10:49,273 INFO [trainer.py:765] (7/8) Epoch 24, batch 1800, train_loss[loss=3.287, NarTop10Accuracy=0.66, over 7017.00 frames. ], tot_loss[loss=3.425, NarTop10Accuracy=0.6327, over 6019.30 frames. ], batch size: 22, lr: 3.54e-03 +2024-08-06 12:11:15,847 INFO [trainer.py:765] (7/8) Epoch 24, batch 1900, train_loss[loss=3.406, NarTop10Accuracy=0.6436, over 6096.00 frames. ], tot_loss[loss=3.423, NarTop10Accuracy=0.6329, over 6045.45 frames. ], batch size: 49, lr: 3.54e-03 +2024-08-06 12:11:41,666 INFO [trainer.py:765] (7/8) Epoch 24, batch 2000, train_loss[loss=3.442, NarTop10Accuracy=0.6326, over 6075.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6336, over 6042.79 frames. ], batch size: 49, lr: 3.54e-03 +2024-08-06 12:12:07,104 INFO [trainer.py:765] (7/8) Epoch 24, batch 2100, train_loss[loss=3.115, NarTop10Accuracy=0.6769, over 4743.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6366, over 6021.65 frames. ], batch size: 5, lr: 3.54e-03 +2024-08-06 12:12:33,374 INFO [trainer.py:765] (7/8) Epoch 24, batch 2200, train_loss[loss=3.471, NarTop10Accuracy=0.6253, over 7350.00 frames. ], tot_loss[loss=3.417, NarTop10Accuracy=0.6346, over 6059.83 frames. ], batch size: 31, lr: 3.53e-03 +2024-08-06 12:12:58,772 INFO [trainer.py:765] (7/8) Epoch 24, batch 2300, train_loss[loss=3.672, NarTop10Accuracy=0.5857, over 5706.00 frames. ], tot_loss[loss=3.431, NarTop10Accuracy=0.6319, over 6065.47 frames. ], batch size: 9, lr: 3.53e-03 +2024-08-06 12:13:23,487 INFO [trainer.py:765] (7/8) Epoch 24, batch 2400, train_loss[loss=3.468, NarTop10Accuracy=0.6237, over 5055.00 frames. ], tot_loss[loss=3.436, NarTop10Accuracy=0.6307, over 5883.21 frames. ], batch size: 7, lr: 3.53e-03 +2024-08-06 12:13:47,005 INFO [trainer.py:765] (7/8) Epoch 24, batch 2500, train_loss[loss=3.287, NarTop10Accuracy=0.6723, over 4865.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6366, over 5522.55 frames. ], batch size: 6, lr: 3.52e-03 +2024-08-06 12:14:08,318 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 12:14:50,195 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 12:15:00,657 INFO [trainer.py:811] (7/8) Epoch 25, validation: loss=3.279, NarTop10Accuracy=0.6656, over 1907754.00 frames. +2024-08-06 12:15:00,658 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 12:15:01,363 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.921e+02 2.068e+02 2.276e+02 6.228e+02, threshold=4.136e+02, percent-clipped=0.3 +2024-08-06 12:15:17,917 INFO [trainer.py:765] (7/8) Epoch 25, batch 100, train_loss[loss=3.22, NarTop10Accuracy=0.6704, over 7182.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6501, over 2359.13 frames. ], batch size: 30, lr: 3.45e-03 +2024-08-06 12:15:53,499 INFO [trainer.py:765] (7/8) Epoch 25, batch 200, train_loss[loss=3.395, NarTop10Accuracy=0.6479, over 6841.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6477, over 3866.19 frames. ], batch size: 17, lr: 3.44e-03 +2024-08-06 12:16:23,595 INFO [trainer.py:765] (7/8) Epoch 25, batch 300, train_loss[loss=3.515, NarTop10Accuracy=0.6208, over 7094.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6465, over 4660.65 frames. ], batch size: 22, lr: 3.44e-03 +2024-08-06 12:16:59,162 INFO [trainer.py:765] (7/8) Epoch 25, batch 400, train_loss[loss=3.469, NarTop10Accuracy=0.6197, over 5064.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6455, over 5129.86 frames. ], batch size: 7, lr: 3.44e-03 +2024-08-06 12:17:32,096 INFO [trainer.py:765] (7/8) Epoch 25, batch 500, train_loss[loss=3.194, NarTop10Accuracy=0.6705, over 6154.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6474, over 5409.50 frames. ], batch size: 11, lr: 3.44e-03 +2024-08-06 12:18:05,181 INFO [trainer.py:765] (7/8) Epoch 25, batch 600, train_loss[loss=3.106, NarTop10Accuracy=0.6953, over 5840.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6472, over 5671.42 frames. ], batch size: 9, lr: 3.43e-03 +2024-08-06 12:18:39,597 INFO [trainer.py:765] (7/8) Epoch 25, batch 700, train_loss[loss=3.082, NarTop10Accuracy=0.6963, over 5186.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6459, over 5734.15 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 12:19:16,014 INFO [trainer.py:765] (7/8) Epoch 25, batch 800, train_loss[loss=3.332, NarTop10Accuracy=0.6573, over 5053.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6429, over 5804.48 frames. ], batch size: 6, lr: 3.43e-03 +2024-08-06 12:19:49,557 INFO [trainer.py:765] (7/8) Epoch 25, batch 900, train_loss[loss=3.216, NarTop10Accuracy=0.6744, over 6186.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6426, over 5827.80 frames. ], batch size: 13, lr: 3.43e-03 +2024-08-06 12:20:23,875 INFO [trainer.py:765] (7/8) Epoch 25, batch 1000, train_loss[loss=3.342, NarTop10Accuracy=0.6429, over 6712.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6423, over 5925.86 frames. ], batch size: 14, lr: 3.42e-03 +2024-08-06 12:21:01,915 INFO [trainer.py:765] (7/8) Epoch 25, batch 1100, train_loss[loss=3.196, NarTop10Accuracy=0.668, over 6761.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6378, over 5950.79 frames. ], batch size: 17, lr: 3.42e-03 +2024-08-06 12:21:40,637 INFO [trainer.py:765] (7/8) Epoch 25, batch 1200, train_loss[loss=3.471, NarTop10Accuracy=0.6244, over 7121.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6396, over 5943.03 frames. ], batch size: 30, lr: 3.42e-03 +2024-08-06 12:22:11,837 INFO [trainer.py:765] (7/8) Epoch 25, batch 1300, train_loss[loss=3.326, NarTop10Accuracy=0.6411, over 5008.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.64, over 6017.72 frames. ], batch size: 6, lr: 3.41e-03 +2024-08-06 12:22:48,550 INFO [trainer.py:765] (7/8) Epoch 25, batch 1400, train_loss[loss=3.564, NarTop10Accuracy=0.6004, over 6104.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6385, over 6031.18 frames. ], batch size: 11, lr: 3.41e-03 +2024-08-06 12:23:21,655 INFO [trainer.py:765] (7/8) Epoch 25, batch 1500, train_loss[loss=3.741, NarTop10Accuracy=0.5653, over 6024.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.637, over 5975.34 frames. ], batch size: 48, lr: 3.41e-03 +2024-08-06 12:23:49,717 INFO [trainer.py:765] (7/8) Epoch 25, batch 1600, train_loss[loss=3.348, NarTop10Accuracy=0.6603, over 7238.00 frames. ], tot_loss[loss=3.404, NarTop10Accuracy=0.6373, over 5950.45 frames. ], batch size: 22, lr: 3.41e-03 +2024-08-06 12:24:16,372 INFO [trainer.py:765] (7/8) Epoch 25, batch 1700, train_loss[loss=3.697, NarTop10Accuracy=0.5837, over 6278.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6352, over 5923.56 frames. ], batch size: 13, lr: 3.40e-03 +2024-08-06 12:24:43,092 INFO [trainer.py:765] (7/8) Epoch 25, batch 1800, train_loss[loss=3.551, NarTop10Accuracy=0.6066, over 7160.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6344, over 5993.23 frames. ], batch size: 22, lr: 3.40e-03 +2024-08-06 12:25:09,776 INFO [trainer.py:765] (7/8) Epoch 25, batch 1900, train_loss[loss=3.524, NarTop10Accuracy=0.6129, over 6584.00 frames. ], tot_loss[loss=3.439, NarTop10Accuracy=0.63, over 6035.29 frames. ], batch size: 49, lr: 3.40e-03 +2024-08-06 12:25:35,710 INFO [trainer.py:765] (7/8) Epoch 25, batch 2000, train_loss[loss=3.65, NarTop10Accuracy=0.5914, over 6518.00 frames. ], tot_loss[loss=3.426, NarTop10Accuracy=0.633, over 6015.83 frames. ], batch size: 49, lr: 3.40e-03 +2024-08-06 12:25:47,854 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 12:25:58,846 INFO [trainer.py:811] (7/8) Epoch 25, validation: loss=3.265, NarTop10Accuracy=0.667, over 1907754.00 frames. +2024-08-06 12:25:58,847 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 12:25:59,344 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 1.947e+02 2.092e+02 2.280e+02 8.190e+02, threshold=4.185e+02, percent-clipped=0.2 +2024-08-06 12:26:12,224 INFO [trainer.py:765] (7/8) Epoch 25, batch 2100, train_loss[loss=3.36, NarTop10Accuracy=0.6373, over 4720.00 frames. ], tot_loss[loss=3.418, NarTop10Accuracy=0.6344, over 5995.15 frames. ], batch size: 5, lr: 3.39e-03 +2024-08-06 12:26:37,833 INFO [trainer.py:765] (7/8) Epoch 25, batch 2200, train_loss[loss=3.524, NarTop10Accuracy=0.6184, over 7318.00 frames. ], tot_loss[loss=3.42, NarTop10Accuracy=0.6342, over 6041.11 frames. ], batch size: 31, lr: 3.39e-03 +2024-08-06 12:27:03,344 INFO [trainer.py:765] (7/8) Epoch 25, batch 2300, train_loss[loss=3.629, NarTop10Accuracy=0.5936, over 5730.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6338, over 6074.73 frames. ], batch size: 9, lr: 3.39e-03 +2024-08-06 12:27:28,150 INFO [trainer.py:765] (7/8) Epoch 25, batch 2400, train_loss[loss=3.435, NarTop10Accuracy=0.6373, over 5162.00 frames. ], tot_loss[loss=3.421, NarTop10Accuracy=0.6339, over 5862.87 frames. ], batch size: 7, lr: 3.39e-03 +2024-08-06 12:27:51,732 INFO [trainer.py:765] (7/8) Epoch 25, batch 2500, train_loss[loss=3.473, NarTop10Accuracy=0.6236, over 5056.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6393, over 5520.58 frames. ], batch size: 6, lr: 3.38e-03 +2024-08-06 12:28:12,973 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 12:29:08,881 INFO [trainer.py:765] (7/8) Epoch 26, batch 100, train_loss[loss=3.488, NarTop10Accuracy=0.6097, over 7321.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.649, over 2385.48 frames. ], batch size: 31, lr: 3.31e-03 +2024-08-06 12:29:44,318 INFO [trainer.py:765] (7/8) Epoch 26, batch 200, train_loss[loss=3.207, NarTop10Accuracy=0.6742, over 6829.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6482, over 3869.77 frames. ], batch size: 17, lr: 3.31e-03 +2024-08-06 12:30:19,754 INFO [trainer.py:765] (7/8) Epoch 26, batch 300, train_loss[loss=3.328, NarTop10Accuracy=0.6581, over 7092.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6452, over 4663.66 frames. ], batch size: 22, lr: 3.31e-03 +2024-08-06 12:30:52,510 INFO [trainer.py:765] (7/8) Epoch 26, batch 400, train_loss[loss=3.108, NarTop10Accuracy=0.699, over 5199.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6475, over 5111.25 frames. ], batch size: 7, lr: 3.30e-03 +2024-08-06 12:31:26,531 INFO [trainer.py:765] (7/8) Epoch 26, batch 500, train_loss[loss=3.252, NarTop10Accuracy=0.6587, over 6199.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6494, over 5406.67 frames. ], batch size: 11, lr: 3.30e-03 +2024-08-06 12:31:59,782 INFO [trainer.py:765] (7/8) Epoch 26, batch 600, train_loss[loss=3.612, NarTop10Accuracy=0.5969, over 5781.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6471, over 5668.97 frames. ], batch size: 9, lr: 3.30e-03 +2024-08-06 12:32:36,966 INFO [trainer.py:765] (7/8) Epoch 26, batch 700, train_loss[loss=3.262, NarTop10Accuracy=0.6579, over 5121.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6451, over 5742.32 frames. ], batch size: 6, lr: 3.30e-03 +2024-08-06 12:33:10,808 INFO [trainer.py:765] (7/8) Epoch 26, batch 800, train_loss[loss=3.363, NarTop10Accuracy=0.6467, over 5047.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6434, over 5801.87 frames. ], batch size: 6, lr: 3.29e-03 +2024-08-06 12:33:46,257 INFO [trainer.py:765] (7/8) Epoch 26, batch 900, train_loss[loss=3.47, NarTop10Accuracy=0.6217, over 6335.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6433, over 5821.39 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 12:34:22,902 INFO [trainer.py:765] (7/8) Epoch 26, batch 1000, train_loss[loss=3.346, NarTop10Accuracy=0.6523, over 6301.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.6408, over 5913.71 frames. ], batch size: 13, lr: 3.29e-03 +2024-08-06 12:34:57,797 INFO [trainer.py:765] (7/8) Epoch 26, batch 1100, train_loss[loss=3.302, NarTop10Accuracy=0.6586, over 6764.00 frames. ], tot_loss[loss=3.388, NarTop10Accuracy=0.6401, over 5932.41 frames. ], batch size: 17, lr: 3.29e-03 +2024-08-06 12:35:31,893 INFO [trainer.py:765] (7/8) Epoch 26, batch 1200, train_loss[loss=3.318, NarTop10Accuracy=0.6592, over 7249.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6411, over 5928.68 frames. ], batch size: 30, lr: 3.28e-03 +2024-08-06 12:36:10,657 INFO [trainer.py:765] (7/8) Epoch 26, batch 1300, train_loss[loss=3.395, NarTop10Accuracy=0.6379, over 5081.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.6415, over 6004.84 frames. ], batch size: 6, lr: 3.28e-03 +2024-08-06 12:36:44,564 INFO [trainer.py:765] (7/8) Epoch 26, batch 1400, train_loss[loss=3.366, NarTop10Accuracy=0.6536, over 6096.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6394, over 6024.10 frames. ], batch size: 11, lr: 3.28e-03 +2024-08-06 12:37:03,592 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 12:37:13,567 INFO [trainer.py:811] (7/8) Epoch 26, validation: loss=3.231, NarTop10Accuracy=0.6753, over 1907754.00 frames. +2024-08-06 12:37:13,568 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 12:37:14,077 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 1.928e+02 2.102e+02 2.299e+02 4.602e+02, threshold=4.203e+02, percent-clipped=0.2 +2024-08-06 12:37:23,028 INFO [trainer.py:765] (7/8) Epoch 26, batch 1500, train_loss[loss=3.621, NarTop10Accuracy=0.5971, over 5957.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6387, over 5949.14 frames. ], batch size: 49, lr: 3.28e-03 +2024-08-06 12:37:51,060 INFO [trainer.py:765] (7/8) Epoch 26, batch 1600, train_loss[loss=3.433, NarTop10Accuracy=0.6385, over 6972.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6402, over 5935.09 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:38:17,853 INFO [trainer.py:765] (7/8) Epoch 26, batch 1700, train_loss[loss=3.568, NarTop10Accuracy=0.609, over 6266.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6369, over 5943.12 frames. ], batch size: 13, lr: 3.27e-03 +2024-08-06 12:38:44,384 INFO [trainer.py:765] (7/8) Epoch 26, batch 1800, train_loss[loss=3.327, NarTop10Accuracy=0.6484, over 7211.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6372, over 6003.62 frames. ], batch size: 22, lr: 3.27e-03 +2024-08-06 12:39:10,952 INFO [trainer.py:765] (7/8) Epoch 26, batch 1900, train_loss[loss=3.685, NarTop10Accuracy=0.5884, over 6226.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6362, over 6040.83 frames. ], batch size: 49, lr: 3.27e-03 +2024-08-06 12:39:36,610 INFO [trainer.py:765] (7/8) Epoch 26, batch 2000, train_loss[loss=3.409, NarTop10Accuracy=0.6449, over 6520.00 frames. ], tot_loss[loss=3.415, NarTop10Accuracy=0.6353, over 6010.80 frames. ], batch size: 48, lr: 3.26e-03 +2024-08-06 12:40:02,148 INFO [trainer.py:765] (7/8) Epoch 26, batch 2100, train_loss[loss=3.43, NarTop10Accuracy=0.6321, over 3942.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6382, over 6002.73 frames. ], batch size: 4, lr: 3.26e-03 +2024-08-06 12:40:27,758 INFO [trainer.py:765] (7/8) Epoch 26, batch 2200, train_loss[loss=3.305, NarTop10Accuracy=0.6585, over 7111.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.638, over 6040.04 frames. ], batch size: 30, lr: 3.26e-03 +2024-08-06 12:40:53,232 INFO [trainer.py:765] (7/8) Epoch 26, batch 2300, train_loss[loss=3.253, NarTop10Accuracy=0.6722, over 5670.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6366, over 6065.75 frames. ], batch size: 9, lr: 3.26e-03 +2024-08-06 12:41:17,930 INFO [trainer.py:765] (7/8) Epoch 26, batch 2400, train_loss[loss=3.072, NarTop10Accuracy=0.7006, over 5059.00 frames. ], tot_loss[loss=3.413, NarTop10Accuracy=0.6356, over 5871.42 frames. ], batch size: 7, lr: 3.25e-03 +2024-08-06 12:41:44,477 INFO [trainer.py:765] (7/8) Epoch 26, batch 2500, train_loss[loss=3.42, NarTop10Accuracy=0.6369, over 4262.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6388, over 5542.98 frames. ], batch size: 5, lr: 3.25e-03 +2024-08-06 12:42:05,598 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 12:43:12,532 INFO [trainer.py:765] (7/8) Epoch 27, batch 100, train_loss[loss=3.666, NarTop10Accuracy=0.5907, over 7494.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6479, over 2357.62 frames. ], batch size: 30, lr: 3.19e-03 +2024-08-06 12:43:43,574 INFO [trainer.py:765] (7/8) Epoch 27, batch 200, train_loss[loss=3.55, NarTop10Accuracy=0.6082, over 6769.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6544, over 3859.76 frames. ], batch size: 17, lr: 3.18e-03 +2024-08-06 12:44:13,785 INFO [trainer.py:765] (7/8) Epoch 27, batch 300, train_loss[loss=3.22, NarTop10Accuracy=0.6726, over 7068.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6531, over 4674.70 frames. ], batch size: 22, lr: 3.18e-03 +2024-08-06 12:44:50,460 INFO [trainer.py:765] (7/8) Epoch 27, batch 400, train_loss[loss=2.982, NarTop10Accuracy=0.7071, over 5231.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6532, over 5120.31 frames. ], batch size: 7, lr: 3.18e-03 +2024-08-06 12:45:20,669 INFO [trainer.py:765] (7/8) Epoch 27, batch 500, train_loss[loss=3.291, NarTop10Accuracy=0.6607, over 6227.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6537, over 5393.52 frames. ], batch size: 11, lr: 3.18e-03 +2024-08-06 12:45:55,259 INFO [trainer.py:765] (7/8) Epoch 27, batch 600, train_loss[loss=3.365, NarTop10Accuracy=0.6534, over 5866.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6507, over 5668.82 frames. ], batch size: 9, lr: 3.17e-03 +2024-08-06 12:46:26,746 INFO [trainer.py:765] (7/8) Epoch 27, batch 700, train_loss[loss=3.658, NarTop10Accuracy=0.5897, over 4969.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6505, over 5736.37 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:05,015 INFO [trainer.py:765] (7/8) Epoch 27, batch 800, train_loss[loss=3.34, NarTop10Accuracy=0.6569, over 5092.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6452, over 5777.90 frames. ], batch size: 6, lr: 3.17e-03 +2024-08-06 12:47:32,740 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 12:47:42,765 INFO [trainer.py:811] (7/8) Epoch 27, validation: loss=3.258, NarTop10Accuracy=0.6695, over 1907754.00 frames. +2024-08-06 12:47:42,766 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 12:47:43,336 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 1.939e+02 2.100e+02 2.298e+02 4.859e+02, threshold=4.201e+02, percent-clipped=0.2 +2024-08-06 12:47:47,259 INFO [trainer.py:765] (7/8) Epoch 27, batch 900, train_loss[loss=3.314, NarTop10Accuracy=0.6444, over 6331.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6464, over 5808.80 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 12:48:22,862 INFO [trainer.py:765] (7/8) Epoch 27, batch 1000, train_loss[loss=3.393, NarTop10Accuracy=0.6388, over 6295.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6452, over 5915.41 frames. ], batch size: 13, lr: 3.17e-03 +2024-08-06 12:48:58,084 INFO [trainer.py:765] (7/8) Epoch 27, batch 1100, train_loss[loss=3.499, NarTop10Accuracy=0.6208, over 6783.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6419, over 5938.19 frames. ], batch size: 17, lr: 3.16e-03 +2024-08-06 12:49:34,896 INFO [trainer.py:765] (7/8) Epoch 27, batch 1200, train_loss[loss=3.204, NarTop10Accuracy=0.679, over 7265.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6421, over 5930.30 frames. ], batch size: 31, lr: 3.16e-03 +2024-08-06 12:50:06,242 INFO [trainer.py:765] (7/8) Epoch 27, batch 1300, train_loss[loss=2.923, NarTop10Accuracy=0.7229, over 5117.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.643, over 6010.24 frames. ], batch size: 6, lr: 3.16e-03 +2024-08-06 12:50:42,950 INFO [trainer.py:765] (7/8) Epoch 27, batch 1400, train_loss[loss=3.251, NarTop10Accuracy=0.6632, over 6210.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6405, over 6029.58 frames. ], batch size: 11, lr: 3.16e-03 +2024-08-06 12:51:11,278 INFO [trainer.py:765] (7/8) Epoch 27, batch 1500, train_loss[loss=3.474, NarTop10Accuracy=0.6301, over 6318.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6412, over 5966.51 frames. ], batch size: 49, lr: 3.15e-03 +2024-08-06 12:51:39,352 INFO [trainer.py:765] (7/8) Epoch 27, batch 1600, train_loss[loss=3.443, NarTop10Accuracy=0.6315, over 7638.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6382, over 5951.14 frames. ], batch size: 23, lr: 3.15e-03 +2024-08-06 12:52:06,061 INFO [trainer.py:765] (7/8) Epoch 27, batch 1700, train_loss[loss=3.568, NarTop10Accuracy=0.6131, over 6310.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6366, over 5937.36 frames. ], batch size: 13, lr: 3.15e-03 +2024-08-06 12:52:32,669 INFO [trainer.py:765] (7/8) Epoch 27, batch 1800, train_loss[loss=3.281, NarTop10Accuracy=0.6573, over 7165.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6388, over 6005.55 frames. ], batch size: 22, lr: 3.15e-03 +2024-08-06 12:53:02,292 INFO [trainer.py:765] (7/8) Epoch 27, batch 1900, train_loss[loss=3.752, NarTop10Accuracy=0.5686, over 6766.00 frames. ], tot_loss[loss=3.406, NarTop10Accuracy=0.6365, over 6044.85 frames. ], batch size: 51, lr: 3.14e-03 +2024-08-06 12:53:27,998 INFO [trainer.py:765] (7/8) Epoch 27, batch 2000, train_loss[loss=3.494, NarTop10Accuracy=0.6198, over 6066.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6388, over 6035.36 frames. ], batch size: 49, lr: 3.14e-03 +2024-08-06 12:53:53,538 INFO [trainer.py:765] (7/8) Epoch 27, batch 2100, train_loss[loss=3.528, NarTop10Accuracy=0.6072, over 3886.00 frames. ], tot_loss[loss=3.394, NarTop10Accuracy=0.6388, over 6015.88 frames. ], batch size: 4, lr: 3.14e-03 +2024-08-06 12:54:18,997 INFO [trainer.py:765] (7/8) Epoch 27, batch 2200, train_loss[loss=3.329, NarTop10Accuracy=0.6593, over 7233.00 frames. ], tot_loss[loss=3.395, NarTop10Accuracy=0.6387, over 6040.30 frames. ], batch size: 31, lr: 3.14e-03 +2024-08-06 12:54:44,480 INFO [trainer.py:765] (7/8) Epoch 27, batch 2300, train_loss[loss=3.125, NarTop10Accuracy=0.6956, over 5751.00 frames. ], tot_loss[loss=3.411, NarTop10Accuracy=0.6357, over 6058.30 frames. ], batch size: 9, lr: 3.14e-03 +2024-08-06 12:55:09,218 INFO [trainer.py:765] (7/8) Epoch 27, batch 2400, train_loss[loss=3.555, NarTop10Accuracy=0.612, over 5181.00 frames. ], tot_loss[loss=3.422, NarTop10Accuracy=0.6333, over 5884.25 frames. ], batch size: 7, lr: 3.13e-03 +2024-08-06 12:55:32,727 INFO [trainer.py:765] (7/8) Epoch 27, batch 2500, train_loss[loss=3.336, NarTop10Accuracy=0.646, over 5025.00 frames. ], tot_loss[loss=3.391, NarTop10Accuracy=0.6389, over 5548.62 frames. ], batch size: 6, lr: 3.13e-03 +2024-08-06 12:55:54,030 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 12:56:46,803 INFO [trainer.py:765] (7/8) Epoch 28, batch 100, train_loss[loss=3.182, NarTop10Accuracy=0.6927, over 7338.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6558, over 2387.98 frames. ], batch size: 30, lr: 3.07e-03 +2024-08-06 12:57:23,204 INFO [trainer.py:765] (7/8) Epoch 28, batch 200, train_loss[loss=3.336, NarTop10Accuracy=0.6489, over 6799.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.652, over 3877.51 frames. ], batch size: 17, lr: 3.07e-03 +2024-08-06 12:57:55,704 INFO [trainer.py:765] (7/8) Epoch 28, batch 300, train_loss[loss=3.306, NarTop10Accuracy=0.6563, over 7197.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6486, over 4678.06 frames. ], batch size: 22, lr: 3.07e-03 +2024-08-06 12:57:56,457 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 12:58:06,828 INFO [trainer.py:811] (7/8) Epoch 28, validation: loss=3.275, NarTop10Accuracy=0.6665, over 1907754.00 frames. +2024-08-06 12:58:06,828 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 12:58:07,333 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 1.944e+02 2.106e+02 2.298e+02 4.786e+02, threshold=4.211e+02, percent-clipped=0.1 +2024-08-06 12:58:34,932 INFO [trainer.py:765] (7/8) Epoch 28, batch 400, train_loss[loss=3.289, NarTop10Accuracy=0.6595, over 5078.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6479, over 5126.55 frames. ], batch size: 7, lr: 3.06e-03 +2024-08-06 12:59:11,437 INFO [trainer.py:765] (7/8) Epoch 28, batch 500, train_loss[loss=3.07, NarTop10Accuracy=0.702, over 6169.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6487, over 5409.38 frames. ], batch size: 11, lr: 3.06e-03 +2024-08-06 12:59:44,487 INFO [trainer.py:765] (7/8) Epoch 28, batch 600, train_loss[loss=3.265, NarTop10Accuracy=0.6644, over 5773.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6479, over 5679.96 frames. ], batch size: 9, lr: 3.06e-03 +2024-08-06 13:00:20,013 INFO [trainer.py:765] (7/8) Epoch 28, batch 700, train_loss[loss=3.241, NarTop10Accuracy=0.6662, over 5110.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6474, over 5737.24 frames. ], batch size: 6, lr: 3.06e-03 +2024-08-06 13:00:56,433 INFO [trainer.py:765] (7/8) Epoch 28, batch 800, train_loss[loss=3.056, NarTop10Accuracy=0.7082, over 5054.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6488, over 5782.52 frames. ], batch size: 6, lr: 3.05e-03 +2024-08-06 13:01:31,042 INFO [trainer.py:765] (7/8) Epoch 28, batch 900, train_loss[loss=3.228, NarTop10Accuracy=0.6859, over 6766.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6465, over 5818.36 frames. ], batch size: 14, lr: 3.05e-03 +2024-08-06 13:02:06,494 INFO [trainer.py:765] (7/8) Epoch 28, batch 1000, train_loss[loss=3.577, NarTop10Accuracy=0.6032, over 6629.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6444, over 5920.04 frames. ], batch size: 14, lr: 3.05e-03 +2024-08-06 13:02:41,229 INFO [trainer.py:765] (7/8) Epoch 28, batch 1100, train_loss[loss=3.307, NarTop10Accuracy=0.6505, over 6501.00 frames. ], tot_loss[loss=3.374, NarTop10Accuracy=0.6427, over 5944.22 frames. ], batch size: 16, lr: 3.05e-03 +2024-08-06 13:03:16,895 INFO [trainer.py:765] (7/8) Epoch 28, batch 1200, train_loss[loss=3.554, NarTop10Accuracy=0.6073, over 7609.00 frames. ], tot_loss[loss=3.382, NarTop10Accuracy=0.6409, over 5949.35 frames. ], batch size: 32, lr: 3.05e-03 +2024-08-06 13:03:54,153 INFO [trainer.py:765] (7/8) Epoch 28, batch 1300, train_loss[loss=3.071, NarTop10Accuracy=0.6969, over 4943.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6409, over 6024.02 frames. ], batch size: 6, lr: 3.04e-03 +2024-08-06 13:04:28,712 INFO [trainer.py:765] (7/8) Epoch 28, batch 1400, train_loss[loss=3.484, NarTop10Accuracy=0.6197, over 6224.00 frames. ], tot_loss[loss=3.389, NarTop10Accuracy=0.6391, over 6033.41 frames. ], batch size: 11, lr: 3.04e-03 +2024-08-06 13:05:02,348 INFO [trainer.py:765] (7/8) Epoch 28, batch 1500, train_loss[loss=3.433, NarTop10Accuracy=0.6313, over 6183.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6419, over 5981.47 frames. ], batch size: 49, lr: 3.04e-03 +2024-08-06 13:05:30,370 INFO [trainer.py:765] (7/8) Epoch 28, batch 1600, train_loss[loss=3.667, NarTop10Accuracy=0.5811, over 7094.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6396, over 5941.42 frames. ], batch size: 22, lr: 3.04e-03 +2024-08-06 13:05:57,130 INFO [trainer.py:765] (7/8) Epoch 28, batch 1700, train_loss[loss=3.651, NarTop10Accuracy=0.5891, over 6197.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6397, over 5922.15 frames. ], batch size: 13, lr: 3.04e-03 +2024-08-06 13:06:23,732 INFO [trainer.py:765] (7/8) Epoch 28, batch 1800, train_loss[loss=3.659, NarTop10Accuracy=0.5801, over 7076.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6416, over 5999.61 frames. ], batch size: 22, lr: 3.03e-03 +2024-08-06 13:06:50,372 INFO [trainer.py:765] (7/8) Epoch 28, batch 1900, train_loss[loss=3.534, NarTop10Accuracy=0.6157, over 5773.00 frames. ], tot_loss[loss=3.386, NarTop10Accuracy=0.6407, over 6046.81 frames. ], batch size: 50, lr: 3.03e-03 +2024-08-06 13:07:16,115 INFO [trainer.py:765] (7/8) Epoch 28, batch 2000, train_loss[loss=3.518, NarTop10Accuracy=0.6132, over 6477.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6413, over 6008.74 frames. ], batch size: 48, lr: 3.03e-03 +2024-08-06 13:07:41,546 INFO [trainer.py:765] (7/8) Epoch 28, batch 2100, train_loss[loss=3.574, NarTop10Accuracy=0.591, over 3927.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6391, over 5993.70 frames. ], batch size: 4, lr: 3.03e-03 +2024-08-06 13:08:06,931 INFO [trainer.py:765] (7/8) Epoch 28, batch 2200, train_loss[loss=3.748, NarTop10Accuracy=0.5729, over 7305.00 frames. ], tot_loss[loss=3.402, NarTop10Accuracy=0.6374, over 6042.57 frames. ], batch size: 30, lr: 3.02e-03 +2024-08-06 13:08:32,386 INFO [trainer.py:765] (7/8) Epoch 28, batch 2300, train_loss[loss=3.308, NarTop10Accuracy=0.6712, over 5738.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6352, over 6064.41 frames. ], batch size: 9, lr: 3.02e-03 +2024-08-06 13:08:33,134 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 13:08:43,385 INFO [trainer.py:811] (7/8) Epoch 28, validation: loss=3.224, NarTop10Accuracy=0.676, over 1907754.00 frames. +2024-08-06 13:08:43,386 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 13:08:43,890 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 1.997e+02 2.131e+02 2.314e+02 6.875e+02, threshold=4.261e+02, percent-clipped=0.5 +2024-08-06 13:09:07,389 INFO [trainer.py:765] (7/8) Epoch 28, batch 2400, train_loss[loss=3.787, NarTop10Accuracy=0.5625, over 5195.00 frames. ], tot_loss[loss=3.414, NarTop10Accuracy=0.6348, over 5864.99 frames. ], batch size: 7, lr: 3.02e-03 +2024-08-06 13:09:30,781 INFO [trainer.py:765] (7/8) Epoch 28, batch 2500, train_loss[loss=3.473, NarTop10Accuracy=0.6289, over 5042.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6418, over 5517.42 frames. ], batch size: 6, lr: 3.02e-03 +2024-08-06 13:09:52,177 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 13:10:48,193 INFO [trainer.py:765] (7/8) Epoch 29, batch 100, train_loss[loss=3.692, NarTop10Accuracy=0.5793, over 7403.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6567, over 2364.51 frames. ], batch size: 31, lr: 2.96e-03 +2024-08-06 13:11:20,840 INFO [trainer.py:765] (7/8) Epoch 29, batch 200, train_loss[loss=3.416, NarTop10Accuracy=0.6446, over 6934.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6524, over 3859.14 frames. ], batch size: 17, lr: 2.96e-03 +2024-08-06 13:11:56,949 INFO [trainer.py:765] (7/8) Epoch 29, batch 300, train_loss[loss=3.075, NarTop10Accuracy=0.7027, over 6957.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6525, over 4659.20 frames. ], batch size: 22, lr: 2.96e-03 +2024-08-06 13:12:29,716 INFO [trainer.py:765] (7/8) Epoch 29, batch 400, train_loss[loss=3.025, NarTop10Accuracy=0.7166, over 5095.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6525, over 5119.84 frames. ], batch size: 7, lr: 2.96e-03 +2024-08-06 13:12:59,920 INFO [trainer.py:765] (7/8) Epoch 29, batch 500, train_loss[loss=3.446, NarTop10Accuracy=0.6293, over 6128.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6505, over 5397.22 frames. ], batch size: 11, lr: 2.95e-03 +2024-08-06 13:13:33,546 INFO [trainer.py:765] (7/8) Epoch 29, batch 600, train_loss[loss=3.603, NarTop10Accuracy=0.6029, over 5747.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6476, over 5670.78 frames. ], batch size: 9, lr: 2.95e-03 +2024-08-06 13:14:09,936 INFO [trainer.py:765] (7/8) Epoch 29, batch 700, train_loss[loss=3.516, NarTop10Accuracy=0.6103, over 5063.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6475, over 5750.82 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:14:46,676 INFO [trainer.py:765] (7/8) Epoch 29, batch 800, train_loss[loss=3.349, NarTop10Accuracy=0.6531, over 5184.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6434, over 5822.37 frames. ], batch size: 6, lr: 2.95e-03 +2024-08-06 13:15:17,113 INFO [trainer.py:765] (7/8) Epoch 29, batch 900, train_loss[loss=3.412, NarTop10Accuracy=0.6397, over 6310.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6463, over 5828.12 frames. ], batch size: 13, lr: 2.95e-03 +2024-08-06 13:15:59,363 INFO [trainer.py:765] (7/8) Epoch 29, batch 1000, train_loss[loss=3.512, NarTop10Accuracy=0.61, over 6249.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6457, over 5924.30 frames. ], batch size: 13, lr: 2.94e-03 +2024-08-06 13:16:31,712 INFO [trainer.py:765] (7/8) Epoch 29, batch 1100, train_loss[loss=3.414, NarTop10Accuracy=0.6444, over 6947.00 frames. ], tot_loss[loss=3.378, NarTop10Accuracy=0.6422, over 5949.54 frames. ], batch size: 17, lr: 2.94e-03 +2024-08-06 13:17:04,932 INFO [trainer.py:765] (7/8) Epoch 29, batch 1200, train_loss[loss=3.409, NarTop10Accuracy=0.6282, over 7066.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6427, over 5950.35 frames. ], batch size: 30, lr: 2.94e-03 +2024-08-06 13:17:43,956 INFO [trainer.py:765] (7/8) Epoch 29, batch 1300, train_loss[loss=3.245, NarTop10Accuracy=0.6708, over 5041.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6429, over 6008.58 frames. ], batch size: 6, lr: 2.94e-03 +2024-08-06 13:18:17,923 INFO [trainer.py:765] (7/8) Epoch 29, batch 1400, train_loss[loss=3.768, NarTop10Accuracy=0.5618, over 6089.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6396, over 6045.19 frames. ], batch size: 11, lr: 2.94e-03 +2024-08-06 13:18:48,306 INFO [trainer.py:765] (7/8) Epoch 29, batch 1500, train_loss[loss=3.767, NarTop10Accuracy=0.5617, over 6361.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6411, over 5981.67 frames. ], batch size: 49, lr: 2.93e-03 +2024-08-06 13:19:16,408 INFO [trainer.py:765] (7/8) Epoch 29, batch 1600, train_loss[loss=3.183, NarTop10Accuracy=0.6772, over 7440.00 frames. ], tot_loss[loss=3.384, NarTop10Accuracy=0.641, over 5958.80 frames. ], batch size: 23, lr: 2.93e-03 +2024-08-06 13:19:43,242 INFO [trainer.py:765] (7/8) Epoch 29, batch 1700, train_loss[loss=3.303, NarTop10Accuracy=0.6623, over 6568.00 frames. ], tot_loss[loss=3.39, NarTop10Accuracy=0.6401, over 5936.40 frames. ], batch size: 14, lr: 2.93e-03 +2024-08-06 13:19:49,090 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 13:19:59,386 INFO [trainer.py:811] (7/8) Epoch 29, validation: loss=3.233, NarTop10Accuracy=0.6754, over 1907754.00 frames. +2024-08-06 13:19:59,387 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 13:19:59,903 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.964e+02 2.123e+02 2.299e+02 5.520e+02, threshold=4.246e+02, percent-clipped=0.2 +2024-08-06 13:20:20,107 INFO [trainer.py:765] (7/8) Epoch 29, batch 1800, train_loss[loss=3.518, NarTop10Accuracy=0.6239, over 7220.00 frames. ], tot_loss[loss=3.387, NarTop10Accuracy=0.6405, over 6000.88 frames. ], batch size: 22, lr: 2.93e-03 +2024-08-06 13:20:46,844 INFO [trainer.py:765] (7/8) Epoch 29, batch 1900, train_loss[loss=3.408, NarTop10Accuracy=0.6303, over 6240.00 frames. ], tot_loss[loss=3.403, NarTop10Accuracy=0.6372, over 6024.26 frames. ], batch size: 49, lr: 2.93e-03 +2024-08-06 13:21:12,478 INFO [trainer.py:765] (7/8) Epoch 29, batch 2000, train_loss[loss=3.711, NarTop10Accuracy=0.5784, over 5974.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6383, over 5999.98 frames. ], batch size: 48, lr: 2.92e-03 +2024-08-06 13:21:37,983 INFO [trainer.py:765] (7/8) Epoch 29, batch 2100, train_loss[loss=3.491, NarTop10Accuracy=0.6289, over 3880.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6377, over 5990.18 frames. ], batch size: 4, lr: 2.92e-03 +2024-08-06 13:22:03,359 INFO [trainer.py:765] (7/8) Epoch 29, batch 2200, train_loss[loss=3.327, NarTop10Accuracy=0.6508, over 7314.00 frames. ], tot_loss[loss=3.399, NarTop10Accuracy=0.6381, over 6030.31 frames. ], batch size: 31, lr: 2.92e-03 +2024-08-06 13:22:28,831 INFO [trainer.py:765] (7/8) Epoch 29, batch 2300, train_loss[loss=3.454, NarTop10Accuracy=0.6256, over 5748.00 frames. ], tot_loss[loss=3.405, NarTop10Accuracy=0.6369, over 6060.34 frames. ], batch size: 9, lr: 2.92e-03 +2024-08-06 13:22:53,620 INFO [trainer.py:765] (7/8) Epoch 29, batch 2400, train_loss[loss=3.155, NarTop10Accuracy=0.6721, over 5274.00 frames. ], tot_loss[loss=3.407, NarTop10Accuracy=0.6359, over 5877.25 frames. ], batch size: 7, lr: 2.92e-03 +2024-08-06 13:23:16,978 INFO [trainer.py:765] (7/8) Epoch 29, batch 2500, train_loss[loss=3.529, NarTop10Accuracy=0.6189, over 5068.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6407, over 5542.47 frames. ], batch size: 6, lr: 2.91e-03 +2024-08-06 13:23:38,436 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 13:24:38,391 INFO [trainer.py:765] (7/8) Epoch 30, batch 100, train_loss[loss=3.192, NarTop10Accuracy=0.671, over 7130.00 frames. ], tot_loss[loss=3.271, NarTop10Accuracy=0.6649, over 2368.57 frames. ], batch size: 30, lr: 2.86e-03 +2024-08-06 13:25:14,782 INFO [trainer.py:765] (7/8) Epoch 30, batch 200, train_loss[loss=3.179, NarTop10Accuracy=0.6851, over 6833.00 frames. ], tot_loss[loss=3.291, NarTop10Accuracy=0.6607, over 3858.06 frames. ], batch size: 17, lr: 2.86e-03 +2024-08-06 13:25:46,846 INFO [trainer.py:765] (7/8) Epoch 30, batch 300, train_loss[loss=3.068, NarTop10Accuracy=0.6934, over 7181.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6557, over 4666.67 frames. ], batch size: 22, lr: 2.86e-03 +2024-08-06 13:26:17,539 INFO [trainer.py:765] (7/8) Epoch 30, batch 400, train_loss[loss=3.475, NarTop10Accuracy=0.6303, over 5178.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6525, over 5121.98 frames. ], batch size: 7, lr: 2.86e-03 +2024-08-06 13:26:53,920 INFO [trainer.py:765] (7/8) Epoch 30, batch 500, train_loss[loss=3.14, NarTop10Accuracy=0.6945, over 6151.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6556, over 5394.38 frames. ], batch size: 11, lr: 2.85e-03 +2024-08-06 13:27:25,423 INFO [trainer.py:765] (7/8) Epoch 30, batch 600, train_loss[loss=3.175, NarTop10Accuracy=0.6703, over 5811.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6547, over 5656.03 frames. ], batch size: 9, lr: 2.85e-03 +2024-08-06 13:28:00,307 INFO [trainer.py:765] (7/8) Epoch 30, batch 700, train_loss[loss=3.327, NarTop10Accuracy=0.6443, over 4920.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6538, over 5736.19 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:28:37,477 INFO [trainer.py:765] (7/8) Epoch 30, batch 800, train_loss[loss=3.537, NarTop10Accuracy=0.6064, over 5075.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.65, over 5793.90 frames. ], batch size: 6, lr: 2.85e-03 +2024-08-06 13:29:10,425 INFO [trainer.py:765] (7/8) Epoch 30, batch 900, train_loss[loss=3.361, NarTop10Accuracy=0.631, over 6772.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6482, over 5816.86 frames. ], batch size: 14, lr: 2.85e-03 +2024-08-06 13:29:45,914 INFO [trainer.py:765] (7/8) Epoch 30, batch 1000, train_loss[loss=3.476, NarTop10Accuracy=0.6283, over 6745.00 frames. ], tot_loss[loss=3.361, NarTop10Accuracy=0.6461, over 5932.60 frames. ], batch size: 14, lr: 2.84e-03 +2024-08-06 13:30:24,172 INFO [trainer.py:765] (7/8) Epoch 30, batch 1100, train_loss[loss=3.479, NarTop10Accuracy=0.6237, over 6856.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6448, over 5966.93 frames. ], batch size: 17, lr: 2.84e-03 +2024-08-06 13:30:38,002 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 13:30:48,195 INFO [trainer.py:811] (7/8) Epoch 30, validation: loss=3.239, NarTop10Accuracy=0.6729, over 1907754.00 frames. +2024-08-06 13:30:48,196 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 13:30:48,916 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 1.985e+02 2.139e+02 2.326e+02 4.628e+02, threshold=4.279e+02, percent-clipped=0.1 +2024-08-06 13:31:05,665 INFO [trainer.py:765] (7/8) Epoch 30, batch 1200, train_loss[loss=3.334, NarTop10Accuracy=0.6471, over 7282.00 frames. ], tot_loss[loss=3.367, NarTop10Accuracy=0.6443, over 5967.65 frames. ], batch size: 31, lr: 2.84e-03 +2024-08-06 13:31:43,020 INFO [trainer.py:765] (7/8) Epoch 30, batch 1300, train_loss[loss=3.258, NarTop10Accuracy=0.6762, over 5170.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6451, over 6031.41 frames. ], batch size: 6, lr: 2.84e-03 +2024-08-06 13:32:19,325 INFO [trainer.py:765] (7/8) Epoch 30, batch 1400, train_loss[loss=3.743, NarTop10Accuracy=0.5709, over 6041.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6427, over 6057.29 frames. ], batch size: 11, lr: 2.84e-03 +2024-08-06 13:32:52,335 INFO [trainer.py:765] (7/8) Epoch 30, batch 1500, train_loss[loss=3.696, NarTop10Accuracy=0.5888, over 6241.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6416, over 6007.85 frames. ], batch size: 48, lr: 2.83e-03 +2024-08-06 13:33:20,408 INFO [trainer.py:765] (7/8) Epoch 30, batch 1600, train_loss[loss=3.607, NarTop10Accuracy=0.59, over 7118.00 frames. ], tot_loss[loss=3.392, NarTop10Accuracy=0.6395, over 5966.15 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:33:47,200 INFO [trainer.py:765] (7/8) Epoch 30, batch 1700, train_loss[loss=3.539, NarTop10Accuracy=0.5992, over 6227.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.639, over 5942.15 frames. ], batch size: 13, lr: 2.83e-03 +2024-08-06 13:34:13,887 INFO [trainer.py:765] (7/8) Epoch 30, batch 1800, train_loss[loss=3.642, NarTop10Accuracy=0.5948, over 7043.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6411, over 6005.65 frames. ], batch size: 22, lr: 2.83e-03 +2024-08-06 13:34:40,547 INFO [trainer.py:765] (7/8) Epoch 30, batch 1900, train_loss[loss=3.651, NarTop10Accuracy=0.5947, over 5756.00 frames. ], tot_loss[loss=3.397, NarTop10Accuracy=0.6383, over 6063.22 frames. ], batch size: 49, lr: 2.83e-03 +2024-08-06 13:35:06,315 INFO [trainer.py:765] (7/8) Epoch 30, batch 2000, train_loss[loss=3.735, NarTop10Accuracy=0.5804, over 6381.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6415, over 6038.11 frames. ], batch size: 48, lr: 2.83e-03 +2024-08-06 13:35:31,872 INFO [trainer.py:765] (7/8) Epoch 30, batch 2100, train_loss[loss=3.646, NarTop10Accuracy=0.5909, over 4738.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6416, over 6017.30 frames. ], batch size: 5, lr: 2.82e-03 +2024-08-06 13:36:00,553 INFO [trainer.py:765] (7/8) Epoch 30, batch 2200, train_loss[loss=3.333, NarTop10Accuracy=0.6577, over 7342.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6417, over 6058.59 frames. ], batch size: 30, lr: 2.82e-03 +2024-08-06 13:36:26,029 INFO [trainer.py:765] (7/8) Epoch 30, batch 2300, train_loss[loss=3.437, NarTop10Accuracy=0.6253, over 5832.00 frames. ], tot_loss[loss=3.393, NarTop10Accuracy=0.6399, over 6077.75 frames. ], batch size: 9, lr: 2.82e-03 +2024-08-06 13:36:50,824 INFO [trainer.py:765] (7/8) Epoch 30, batch 2400, train_loss[loss=3.222, NarTop10Accuracy=0.6696, over 5153.00 frames. ], tot_loss[loss=3.401, NarTop10Accuracy=0.6385, over 5882.20 frames. ], batch size: 7, lr: 2.82e-03 +2024-08-06 13:37:14,388 INFO [trainer.py:765] (7/8) Epoch 30, batch 2500, train_loss[loss=3.061, NarTop10Accuracy=0.7045, over 5002.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6455, over 5534.66 frames. ], batch size: 6, lr: 2.82e-03 +2024-08-06 13:37:36,037 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 13:38:28,438 INFO [trainer.py:765] (7/8) Epoch 31, batch 100, train_loss[loss=3.249, NarTop10Accuracy=0.6702, over 7238.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6609, over 2358.22 frames. ], batch size: 30, lr: 2.77e-03 +2024-08-06 13:39:02,651 INFO [trainer.py:765] (7/8) Epoch 31, batch 200, train_loss[loss=2.992, NarTop10Accuracy=0.7091, over 6857.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6572, over 3855.48 frames. ], batch size: 17, lr: 2.76e-03 +2024-08-06 13:39:34,676 INFO [trainer.py:765] (7/8) Epoch 31, batch 300, train_loss[loss=3.325, NarTop10Accuracy=0.6592, over 7209.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6568, over 4671.28 frames. ], batch size: 22, lr: 2.76e-03 +2024-08-06 13:40:07,363 INFO [trainer.py:765] (7/8) Epoch 31, batch 400, train_loss[loss=3.571, NarTop10Accuracy=0.6137, over 5280.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6513, over 5119.44 frames. ], batch size: 7, lr: 2.76e-03 +2024-08-06 13:40:37,813 INFO [trainer.py:765] (7/8) Epoch 31, batch 500, train_loss[loss=3.17, NarTop10Accuracy=0.6871, over 6050.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6518, over 5398.90 frames. ], batch size: 11, lr: 2.76e-03 +2024-08-06 13:40:58,298 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 13:41:08,777 INFO [trainer.py:811] (7/8) Epoch 31, validation: loss=3.268, NarTop10Accuracy=0.6673, over 1907754.00 frames. +2024-08-06 13:41:08,778 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 13:41:09,338 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 1.987e+02 2.143e+02 2.328e+02 4.341e+02, threshold=4.287e+02, percent-clipped=0.1 +2024-08-06 13:41:20,862 INFO [trainer.py:765] (7/8) Epoch 31, batch 600, train_loss[loss=3.212, NarTop10Accuracy=0.6846, over 5745.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6514, over 5670.25 frames. ], batch size: 9, lr: 2.76e-03 +2024-08-06 13:41:54,259 INFO [trainer.py:765] (7/8) Epoch 31, batch 700, train_loss[loss=3.462, NarTop10Accuracy=0.628, over 4992.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6495, over 5757.93 frames. ], batch size: 6, lr: 2.76e-03 +2024-08-06 13:42:32,157 INFO [trainer.py:765] (7/8) Epoch 31, batch 800, train_loss[loss=3.217, NarTop10Accuracy=0.6827, over 4940.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6481, over 5811.42 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:43:06,274 INFO [trainer.py:765] (7/8) Epoch 31, batch 900, train_loss[loss=3.311, NarTop10Accuracy=0.6558, over 6202.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6507, over 5815.32 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 13:43:38,009 INFO [trainer.py:765] (7/8) Epoch 31, batch 1000, train_loss[loss=3.449, NarTop10Accuracy=0.6155, over 6179.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6487, over 5910.12 frames. ], batch size: 13, lr: 2.75e-03 +2024-08-06 13:44:14,512 INFO [trainer.py:765] (7/8) Epoch 31, batch 1100, train_loss[loss=3.511, NarTop10Accuracy=0.6172, over 6819.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6483, over 5958.31 frames. ], batch size: 17, lr: 2.75e-03 +2024-08-06 13:44:53,785 INFO [trainer.py:765] (7/8) Epoch 31, batch 1200, train_loss[loss=3.402, NarTop10Accuracy=0.6373, over 7652.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6477, over 5946.81 frames. ], batch size: 32, lr: 2.75e-03 +2024-08-06 13:45:25,076 INFO [trainer.py:765] (7/8) Epoch 31, batch 1300, train_loss[loss=3.335, NarTop10Accuracy=0.6578, over 4959.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6463, over 6011.41 frames. ], batch size: 6, lr: 2.75e-03 +2024-08-06 13:45:58,740 INFO [trainer.py:765] (7/8) Epoch 31, batch 1400, train_loss[loss=3.376, NarTop10Accuracy=0.6496, over 6091.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6443, over 6037.89 frames. ], batch size: 11, lr: 2.74e-03 +2024-08-06 13:46:33,490 INFO [trainer.py:765] (7/8) Epoch 31, batch 1500, train_loss[loss=3.577, NarTop10Accuracy=0.6028, over 6582.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6452, over 5964.48 frames. ], batch size: 48, lr: 2.74e-03 +2024-08-06 13:47:04,657 INFO [trainer.py:765] (7/8) Epoch 31, batch 1600, train_loss[loss=3.241, NarTop10Accuracy=0.6697, over 7327.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6441, over 5962.83 frames. ], batch size: 23, lr: 2.74e-03 +2024-08-06 13:47:31,423 INFO [trainer.py:765] (7/8) Epoch 31, batch 1700, train_loss[loss=3.52, NarTop10Accuracy=0.6072, over 6284.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.6442, over 5940.09 frames. ], batch size: 13, lr: 2.74e-03 +2024-08-06 13:47:58,016 INFO [trainer.py:765] (7/8) Epoch 31, batch 1800, train_loss[loss=3.445, NarTop10Accuracy=0.6238, over 6962.00 frames. ], tot_loss[loss=3.365, NarTop10Accuracy=0.6449, over 5995.75 frames. ], batch size: 22, lr: 2.74e-03 +2024-08-06 13:48:24,576 INFO [trainer.py:765] (7/8) Epoch 31, batch 1900, train_loss[loss=3.497, NarTop10Accuracy=0.6173, over 6145.00 frames. ], tot_loss[loss=3.379, NarTop10Accuracy=0.6427, over 6046.53 frames. ], batch size: 49, lr: 2.74e-03 +2024-08-06 13:48:50,257 INFO [trainer.py:765] (7/8) Epoch 31, batch 2000, train_loss[loss=3.582, NarTop10Accuracy=0.6065, over 6310.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6432, over 6026.60 frames. ], batch size: 50, lr: 2.73e-03 +2024-08-06 13:49:15,764 INFO [trainer.py:765] (7/8) Epoch 31, batch 2100, train_loss[loss=3.105, NarTop10Accuracy=0.6954, over 3927.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.645, over 6006.55 frames. ], batch size: 4, lr: 2.73e-03 +2024-08-06 13:49:41,278 INFO [trainer.py:765] (7/8) Epoch 31, batch 2200, train_loss[loss=3.533, NarTop10Accuracy=0.6181, over 7480.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6458, over 6037.29 frames. ], batch size: 30, lr: 2.73e-03 +2024-08-06 13:50:06,707 INFO [trainer.py:765] (7/8) Epoch 31, batch 2300, train_loss[loss=3.35, NarTop10Accuracy=0.6537, over 5765.00 frames. ], tot_loss[loss=3.385, NarTop10Accuracy=0.641, over 6068.54 frames. ], batch size: 9, lr: 2.73e-03 +2024-08-06 13:50:31,392 INFO [trainer.py:765] (7/8) Epoch 31, batch 2400, train_loss[loss=3.499, NarTop10Accuracy=0.6226, over 5155.00 frames. ], tot_loss[loss=3.4, NarTop10Accuracy=0.6376, over 5881.04 frames. ], batch size: 7, lr: 2.73e-03 +2024-08-06 13:50:54,891 INFO [trainer.py:765] (7/8) Epoch 31, batch 2500, train_loss[loss=3.391, NarTop10Accuracy=0.6354, over 5077.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6442, over 5536.87 frames. ], batch size: 6, lr: 2.72e-03 +2024-08-06 13:51:08,994 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 13:51:19,069 INFO [trainer.py:811] (7/8) Epoch 31, validation: loss=3.234, NarTop10Accuracy=0.6746, over 1907754.00 frames. +2024-08-06 13:51:19,070 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 13:51:19,540 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.007e+02 2.182e+02 2.368e+02 4.565e+02, threshold=4.363e+02, percent-clipped=0.1 +2024-08-06 13:51:26,108 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 13:52:19,910 INFO [trainer.py:765] (7/8) Epoch 32, batch 100, train_loss[loss=3.201, NarTop10Accuracy=0.6704, over 7278.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6559, over 2380.95 frames. ], batch size: 31, lr: 2.68e-03 +2024-08-06 13:52:52,538 INFO [trainer.py:765] (7/8) Epoch 32, batch 200, train_loss[loss=3.434, NarTop10Accuracy=0.6302, over 7003.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.6542, over 3882.12 frames. ], batch size: 17, lr: 2.68e-03 +2024-08-06 13:53:28,093 INFO [trainer.py:765] (7/8) Epoch 32, batch 300, train_loss[loss=3.235, NarTop10Accuracy=0.6694, over 6989.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6529, over 4675.71 frames. ], batch size: 22, lr: 2.68e-03 +2024-08-06 13:54:00,886 INFO [trainer.py:765] (7/8) Epoch 32, batch 400, train_loss[loss=3.425, NarTop10Accuracy=0.6233, over 5098.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6522, over 5119.76 frames. ], batch size: 7, lr: 2.67e-03 +2024-08-06 13:54:32,821 INFO [trainer.py:765] (7/8) Epoch 32, batch 500, train_loss[loss=3.052, NarTop10Accuracy=0.7175, over 6165.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6526, over 5390.92 frames. ], batch size: 11, lr: 2.67e-03 +2024-08-06 13:55:01,772 INFO [trainer.py:765] (7/8) Epoch 32, batch 600, train_loss[loss=3.363, NarTop10Accuracy=0.6523, over 5847.00 frames. ], tot_loss[loss=3.319, NarTop10Accuracy=0.6549, over 5665.43 frames. ], batch size: 9, lr: 2.67e-03 +2024-08-06 13:55:41,511 INFO [trainer.py:765] (7/8) Epoch 32, batch 700, train_loss[loss=3.153, NarTop10Accuracy=0.6855, over 4962.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6523, over 5741.73 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:13,172 INFO [trainer.py:765] (7/8) Epoch 32, batch 800, train_loss[loss=2.877, NarTop10Accuracy=0.7382, over 5002.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6504, over 5777.47 frames. ], batch size: 6, lr: 2.67e-03 +2024-08-06 13:56:43,166 INFO [trainer.py:765] (7/8) Epoch 32, batch 900, train_loss[loss=3.662, NarTop10Accuracy=0.5903, over 6324.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6509, over 5801.37 frames. ], batch size: 13, lr: 2.67e-03 +2024-08-06 13:57:24,520 INFO [trainer.py:765] (7/8) Epoch 32, batch 1000, train_loss[loss=3.631, NarTop10Accuracy=0.5853, over 6240.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6501, over 5903.00 frames. ], batch size: 13, lr: 2.66e-03 +2024-08-06 13:57:57,452 INFO [trainer.py:765] (7/8) Epoch 32, batch 1100, train_loss[loss=3.322, NarTop10Accuracy=0.6656, over 6385.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6455, over 5929.20 frames. ], batch size: 16, lr: 2.66e-03 +2024-08-06 13:58:30,541 INFO [trainer.py:765] (7/8) Epoch 32, batch 1200, train_loss[loss=3.229, NarTop10Accuracy=0.6699, over 7484.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6451, over 5929.15 frames. ], batch size: 30, lr: 2.66e-03 +2024-08-06 13:59:08,259 INFO [trainer.py:765] (7/8) Epoch 32, batch 1300, train_loss[loss=3.31, NarTop10Accuracy=0.6563, over 5103.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6431, over 6015.72 frames. ], batch size: 6, lr: 2.66e-03 +2024-08-06 13:59:42,265 INFO [trainer.py:765] (7/8) Epoch 32, batch 1400, train_loss[loss=3.387, NarTop10Accuracy=0.6318, over 6403.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6433, over 6026.86 frames. ], batch size: 11, lr: 2.66e-03 +2024-08-06 14:00:12,975 INFO [trainer.py:765] (7/8) Epoch 32, batch 1500, train_loss[loss=3.737, NarTop10Accuracy=0.5683, over 5945.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6441, over 5964.70 frames. ], batch size: 49, lr: 2.66e-03 +2024-08-06 14:00:40,823 INFO [trainer.py:765] (7/8) Epoch 32, batch 1600, train_loss[loss=3.262, NarTop10Accuracy=0.6684, over 7055.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6443, over 5940.71 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:01:07,533 INFO [trainer.py:765] (7/8) Epoch 32, batch 1700, train_loss[loss=3.184, NarTop10Accuracy=0.6916, over 6394.00 frames. ], tot_loss[loss=3.371, NarTop10Accuracy=0.644, over 5930.13 frames. ], batch size: 13, lr: 2.65e-03 +2024-08-06 14:01:34,089 INFO [trainer.py:765] (7/8) Epoch 32, batch 1800, train_loss[loss=3.183, NarTop10Accuracy=0.6945, over 7041.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6441, over 6006.81 frames. ], batch size: 22, lr: 2.65e-03 +2024-08-06 14:02:00,636 INFO [trainer.py:765] (7/8) Epoch 32, batch 1900, train_loss[loss=3.464, NarTop10Accuracy=0.6192, over 6478.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6422, over 6045.08 frames. ], batch size: 50, lr: 2.65e-03 +2024-08-06 14:02:20,590 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 14:02:30,653 INFO [trainer.py:811] (7/8) Epoch 32, validation: loss=3.204, NarTop10Accuracy=0.6812, over 1907754.00 frames. +2024-08-06 14:02:30,653 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 14:02:31,152 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.032e+02 2.200e+02 2.392e+02 6.182e+02, threshold=4.401e+02, percent-clipped=0.1 +2024-08-06 14:02:36,383 INFO [trainer.py:765] (7/8) Epoch 32, batch 2000, train_loss[loss=3.524, NarTop10Accuracy=0.6111, over 6091.00 frames. ], tot_loss[loss=3.37, NarTop10Accuracy=0.644, over 6020.40 frames. ], batch size: 49, lr: 2.65e-03 +2024-08-06 14:03:01,698 INFO [trainer.py:765] (7/8) Epoch 32, batch 2100, train_loss[loss=3.305, NarTop10Accuracy=0.6537, over 4737.00 frames. ], tot_loss[loss=3.377, NarTop10Accuracy=0.6425, over 6003.12 frames. ], batch size: 5, lr: 2.65e-03 +2024-08-06 14:03:27,176 INFO [trainer.py:765] (7/8) Epoch 32, batch 2200, train_loss[loss=3.697, NarTop10Accuracy=0.5834, over 7084.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6459, over 6034.06 frames. ], batch size: 30, lr: 2.64e-03 +2024-08-06 14:03:52,585 INFO [trainer.py:765] (7/8) Epoch 32, batch 2300, train_loss[loss=3.705, NarTop10Accuracy=0.5745, over 5807.00 frames. ], tot_loss[loss=3.375, NarTop10Accuracy=0.6432, over 6057.04 frames. ], batch size: 9, lr: 2.64e-03 +2024-08-06 14:04:17,274 INFO [trainer.py:765] (7/8) Epoch 32, batch 2400, train_loss[loss=3.402, NarTop10Accuracy=0.6456, over 5184.00 frames. ], tot_loss[loss=3.383, NarTop10Accuracy=0.6415, over 5866.35 frames. ], batch size: 7, lr: 2.64e-03 +2024-08-06 14:04:40,635 INFO [trainer.py:765] (7/8) Epoch 32, batch 2500, train_loss[loss=3.191, NarTop10Accuracy=0.6867, over 4221.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.6467, over 5516.22 frames. ], batch size: 5, lr: 2.64e-03 +2024-08-06 14:05:02,267 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 14:06:02,906 INFO [trainer.py:765] (7/8) Epoch 33, batch 100, train_loss[loss=3.617, NarTop10Accuracy=0.5951, over 7232.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6547, over 2373.35 frames. ], batch size: 31, lr: 2.60e-03 +2024-08-06 14:06:36,079 INFO [trainer.py:765] (7/8) Epoch 33, batch 200, train_loss[loss=3.335, NarTop10Accuracy=0.6477, over 6932.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6562, over 3868.85 frames. ], batch size: 17, lr: 2.59e-03 +2024-08-06 14:07:12,147 INFO [trainer.py:765] (7/8) Epoch 33, batch 300, train_loss[loss=3.196, NarTop10Accuracy=0.6846, over 6897.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6566, over 4689.81 frames. ], batch size: 21, lr: 2.59e-03 +2024-08-06 14:07:48,255 INFO [trainer.py:765] (7/8) Epoch 33, batch 400, train_loss[loss=3.384, NarTop10Accuracy=0.6458, over 5189.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.656, over 5129.12 frames. ], batch size: 7, lr: 2.59e-03 +2024-08-06 14:08:18,547 INFO [trainer.py:765] (7/8) Epoch 33, batch 500, train_loss[loss=3.154, NarTop10Accuracy=0.6963, over 6138.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6547, over 5404.15 frames. ], batch size: 11, lr: 2.59e-03 +2024-08-06 14:08:49,792 INFO [trainer.py:765] (7/8) Epoch 33, batch 600, train_loss[loss=3.258, NarTop10Accuracy=0.6723, over 5771.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6533, over 5660.93 frames. ], batch size: 9, lr: 2.59e-03 +2024-08-06 14:09:32,926 INFO [trainer.py:765] (7/8) Epoch 33, batch 700, train_loss[loss=3.303, NarTop10Accuracy=0.666, over 4973.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6527, over 5732.36 frames. ], batch size: 6, lr: 2.59e-03 +2024-08-06 14:10:04,596 INFO [trainer.py:765] (7/8) Epoch 33, batch 800, train_loss[loss=3, NarTop10Accuracy=0.725, over 5001.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6503, over 5775.64 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 14:10:35,386 INFO [trainer.py:765] (7/8) Epoch 33, batch 900, train_loss[loss=3.372, NarTop10Accuracy=0.6473, over 6617.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6512, over 5793.74 frames. ], batch size: 14, lr: 2.58e-03 +2024-08-06 14:11:15,069 INFO [trainer.py:765] (7/8) Epoch 33, batch 1000, train_loss[loss=3.329, NarTop10Accuracy=0.6514, over 6396.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6498, over 5904.32 frames. ], batch size: 13, lr: 2.58e-03 +2024-08-06 14:11:47,302 INFO [trainer.py:765] (7/8) Epoch 33, batch 1100, train_loss[loss=3.524, NarTop10Accuracy=0.6135, over 6833.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6492, over 5944.33 frames. ], batch size: 17, lr: 2.58e-03 +2024-08-06 14:12:20,928 INFO [trainer.py:765] (7/8) Epoch 33, batch 1200, train_loss[loss=3.478, NarTop10Accuracy=0.6191, over 7100.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6474, over 5938.80 frames. ], batch size: 30, lr: 2.58e-03 +2024-08-06 14:12:57,629 INFO [trainer.py:765] (7/8) Epoch 33, batch 1300, train_loss[loss=3.263, NarTop10Accuracy=0.6633, over 5018.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6481, over 6007.92 frames. ], batch size: 6, lr: 2.58e-03 +2024-08-06 14:13:30,666 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 14:13:41,686 INFO [trainer.py:811] (7/8) Epoch 33, validation: loss=3.242, NarTop10Accuracy=0.6732, over 1907754.00 frames. +2024-08-06 14:13:41,687 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 14:13:42,264 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.031e+02 2.174e+02 2.363e+02 4.871e+02, threshold=4.347e+02, percent-clipped=0.1 +2024-08-06 14:13:42,803 INFO [trainer.py:765] (7/8) Epoch 33, batch 1400, train_loss[loss=3.156, NarTop10Accuracy=0.6894, over 6147.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6492, over 6041.31 frames. ], batch size: 11, lr: 2.58e-03 +2024-08-06 14:14:11,245 INFO [trainer.py:765] (7/8) Epoch 33, batch 1500, train_loss[loss=3.435, NarTop10Accuracy=0.6207, over 5785.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.647, over 5972.57 frames. ], batch size: 49, lr: 2.57e-03 +2024-08-06 14:14:39,191 INFO [trainer.py:765] (7/8) Epoch 33, batch 1600, train_loss[loss=3.297, NarTop10Accuracy=0.6601, over 7120.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6449, over 5957.37 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:05,857 INFO [trainer.py:765] (7/8) Epoch 33, batch 1700, train_loss[loss=3.563, NarTop10Accuracy=0.6085, over 6182.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6457, over 5926.60 frames. ], batch size: 13, lr: 2.57e-03 +2024-08-06 14:15:32,589 INFO [trainer.py:765] (7/8) Epoch 33, batch 1800, train_loss[loss=3.349, NarTop10Accuracy=0.6475, over 7117.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6483, over 5990.04 frames. ], batch size: 22, lr: 2.57e-03 +2024-08-06 14:15:59,214 INFO [trainer.py:765] (7/8) Epoch 33, batch 1900, train_loss[loss=3.447, NarTop10Accuracy=0.6311, over 5881.00 frames. ], tot_loss[loss=3.366, NarTop10Accuracy=0.6452, over 6040.48 frames. ], batch size: 49, lr: 2.57e-03 +2024-08-06 14:16:24,894 INFO [trainer.py:765] (7/8) Epoch 33, batch 2000, train_loss[loss=3.549, NarTop10Accuracy=0.6122, over 6131.00 frames. ], tot_loss[loss=3.354, NarTop10Accuracy=0.647, over 6000.51 frames. ], batch size: 50, lr: 2.57e-03 +2024-08-06 14:16:50,349 INFO [trainer.py:765] (7/8) Epoch 33, batch 2100, train_loss[loss=2.892, NarTop10Accuracy=0.7157, over 4846.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6464, over 5994.49 frames. ], batch size: 5, lr: 2.56e-03 +2024-08-06 14:17:15,825 INFO [trainer.py:765] (7/8) Epoch 33, batch 2200, train_loss[loss=3.479, NarTop10Accuracy=0.6319, over 7176.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6472, over 6034.75 frames. ], batch size: 30, lr: 2.56e-03 +2024-08-06 14:17:41,308 INFO [trainer.py:765] (7/8) Epoch 33, batch 2300, train_loss[loss=3.101, NarTop10Accuracy=0.6817, over 5833.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6441, over 6050.90 frames. ], batch size: 9, lr: 2.56e-03 +2024-08-06 14:18:10,143 INFO [trainer.py:765] (7/8) Epoch 33, batch 2400, train_loss[loss=3.498, NarTop10Accuracy=0.6282, over 5098.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6423, over 5866.13 frames. ], batch size: 7, lr: 2.56e-03 +2024-08-06 14:18:33,706 INFO [trainer.py:765] (7/8) Epoch 33, batch 2500, train_loss[loss=3.296, NarTop10Accuracy=0.6553, over 5116.00 frames. ], tot_loss[loss=3.346, NarTop10Accuracy=0.6484, over 5537.94 frames. ], batch size: 6, lr: 2.56e-03 +2024-08-06 14:18:54,626 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 14:19:51,932 INFO [trainer.py:765] (7/8) Epoch 34, batch 100, train_loss[loss=3.174, NarTop10Accuracy=0.6843, over 7287.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.6589, over 2381.06 frames. ], batch size: 31, lr: 2.52e-03 +2024-08-06 14:20:24,372 INFO [trainer.py:765] (7/8) Epoch 34, batch 200, train_loss[loss=3.212, NarTop10Accuracy=0.6851, over 6980.00 frames. ], tot_loss[loss=3.292, NarTop10Accuracy=0.6609, over 3884.35 frames. ], batch size: 17, lr: 2.52e-03 +2024-08-06 14:21:00,841 INFO [trainer.py:765] (7/8) Epoch 34, batch 300, train_loss[loss=3.267, NarTop10Accuracy=0.6605, over 6805.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6567, over 4669.97 frames. ], batch size: 21, lr: 2.51e-03 +2024-08-06 14:21:31,448 INFO [trainer.py:765] (7/8) Epoch 34, batch 400, train_loss[loss=3.134, NarTop10Accuracy=0.7014, over 4994.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6556, over 5115.47 frames. ], batch size: 7, lr: 2.51e-03 +2024-08-06 14:22:01,875 INFO [trainer.py:765] (7/8) Epoch 34, batch 500, train_loss[loss=3.252, NarTop10Accuracy=0.6627, over 6218.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6553, over 5410.98 frames. ], batch size: 11, lr: 2.51e-03 +2024-08-06 14:22:36,825 INFO [trainer.py:765] (7/8) Epoch 34, batch 600, train_loss[loss=3.347, NarTop10Accuracy=0.6506, over 5831.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6541, over 5674.66 frames. ], batch size: 9, lr: 2.51e-03 +2024-08-06 14:23:14,605 INFO [trainer.py:765] (7/8) Epoch 34, batch 700, train_loss[loss=3.17, NarTop10Accuracy=0.6768, over 4964.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6531, over 5741.29 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:46,606 INFO [trainer.py:765] (7/8) Epoch 34, batch 800, train_loss[loss=3.36, NarTop10Accuracy=0.641, over 5088.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6524, over 5798.66 frames. ], batch size: 6, lr: 2.51e-03 +2024-08-06 14:23:50,719 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 14:24:00,855 INFO [trainer.py:811] (7/8) Epoch 34, validation: loss=3.226, NarTop10Accuracy=0.6758, over 1907754.00 frames. +2024-08-06 14:24:00,856 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 14:24:01,413 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.033e+02 2.200e+02 2.391e+02 5.918e+02, threshold=4.399e+02, percent-clipped=0.1 +2024-08-06 14:24:28,898 INFO [trainer.py:765] (7/8) Epoch 34, batch 900, train_loss[loss=3.304, NarTop10Accuracy=0.6731, over 6235.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.652, over 5811.56 frames. ], batch size: 13, lr: 2.51e-03 +2024-08-06 14:25:05,287 INFO [trainer.py:765] (7/8) Epoch 34, batch 1000, train_loss[loss=3.327, NarTop10Accuracy=0.6529, over 6261.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6504, over 5919.91 frames. ], batch size: 13, lr: 2.50e-03 +2024-08-06 14:25:37,996 INFO [trainer.py:765] (7/8) Epoch 34, batch 1100, train_loss[loss=3.543, NarTop10Accuracy=0.6049, over 6940.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6496, over 5959.28 frames. ], batch size: 17, lr: 2.50e-03 +2024-08-06 14:26:13,974 INFO [trainer.py:765] (7/8) Epoch 34, batch 1200, train_loss[loss=3.438, NarTop10Accuracy=0.6425, over 7085.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6498, over 5933.23 frames. ], batch size: 30, lr: 2.50e-03 +2024-08-06 14:26:52,652 INFO [trainer.py:765] (7/8) Epoch 34, batch 1300, train_loss[loss=3.659, NarTop10Accuracy=0.5828, over 4895.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6491, over 6011.38 frames. ], batch size: 6, lr: 2.50e-03 +2024-08-06 14:27:24,383 INFO [trainer.py:765] (7/8) Epoch 34, batch 1400, train_loss[loss=3.197, NarTop10Accuracy=0.6861, over 6103.00 frames. ], tot_loss[loss=3.349, NarTop10Accuracy=0.6483, over 6035.75 frames. ], batch size: 11, lr: 2.50e-03 +2024-08-06 14:27:52,726 INFO [trainer.py:765] (7/8) Epoch 34, batch 1500, train_loss[loss=3.668, NarTop10Accuracy=0.5924, over 6137.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.651, over 5972.73 frames. ], batch size: 49, lr: 2.50e-03 +2024-08-06 14:28:20,672 INFO [trainer.py:765] (7/8) Epoch 34, batch 1600, train_loss[loss=3.164, NarTop10Accuracy=0.678, over 7020.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6472, over 5950.10 frames. ], batch size: 22, lr: 2.50e-03 +2024-08-06 14:28:47,383 INFO [trainer.py:765] (7/8) Epoch 34, batch 1700, train_loss[loss=3.405, NarTop10Accuracy=0.634, over 6233.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6441, over 5920.40 frames. ], batch size: 13, lr: 2.49e-03 +2024-08-06 14:29:14,009 INFO [trainer.py:765] (7/8) Epoch 34, batch 1800, train_loss[loss=3.661, NarTop10Accuracy=0.5807, over 7090.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6474, over 5981.75 frames. ], batch size: 22, lr: 2.49e-03 +2024-08-06 14:29:43,751 INFO [trainer.py:765] (7/8) Epoch 34, batch 1900, train_loss[loss=3.578, NarTop10Accuracy=0.6057, over 5869.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.643, over 6026.17 frames. ], batch size: 48, lr: 2.49e-03 +2024-08-06 14:30:09,515 INFO [trainer.py:765] (7/8) Epoch 34, batch 2000, train_loss[loss=3.554, NarTop10Accuracy=0.6097, over 5920.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6433, over 5998.49 frames. ], batch size: 49, lr: 2.49e-03 +2024-08-06 14:30:35,015 INFO [trainer.py:765] (7/8) Epoch 34, batch 2100, train_loss[loss=3.156, NarTop10Accuracy=0.7015, over 3845.00 frames. ], tot_loss[loss=3.363, NarTop10Accuracy=0.6452, over 5983.44 frames. ], batch size: 4, lr: 2.49e-03 +2024-08-06 14:31:00,510 INFO [trainer.py:765] (7/8) Epoch 34, batch 2200, train_loss[loss=3.577, NarTop10Accuracy=0.609, over 7240.00 frames. ], tot_loss[loss=3.364, NarTop10Accuracy=0.6447, over 6030.91 frames. ], batch size: 30, lr: 2.49e-03 +2024-08-06 14:31:25,978 INFO [trainer.py:765] (7/8) Epoch 34, batch 2300, train_loss[loss=3.085, NarTop10Accuracy=0.7102, over 5731.00 frames. ], tot_loss[loss=3.369, NarTop10Accuracy=0.6437, over 6053.74 frames. ], batch size: 9, lr: 2.49e-03 +2024-08-06 14:31:50,751 INFO [trainer.py:765] (7/8) Epoch 34, batch 2400, train_loss[loss=3.251, NarTop10Accuracy=0.6604, over 5089.00 frames. ], tot_loss[loss=3.368, NarTop10Accuracy=0.6442, over 5866.92 frames. ], batch size: 7, lr: 2.48e-03 +2024-08-06 14:32:14,248 INFO [trainer.py:765] (7/8) Epoch 34, batch 2500, train_loss[loss=3.006, NarTop10Accuracy=0.7027, over 4976.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.649, over 5533.31 frames. ], batch size: 6, lr: 2.48e-03 +2024-08-06 14:32:35,395 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 14:33:26,337 INFO [trainer.py:765] (7/8) Epoch 35, batch 100, train_loss[loss=3.285, NarTop10Accuracy=0.6589, over 7304.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6595, over 2362.52 frames. ], batch size: 31, lr: 2.44e-03 +2024-08-06 14:34:03,581 INFO [trainer.py:765] (7/8) Epoch 35, batch 200, train_loss[loss=3.428, NarTop10Accuracy=0.6401, over 6964.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.658, over 3860.80 frames. ], batch size: 17, lr: 2.44e-03 +2024-08-06 14:34:13,186 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 14:34:23,574 INFO [trainer.py:811] (7/8) Epoch 35, validation: loss=3.163, NarTop10Accuracy=0.689, over 1907754.00 frames. +2024-08-06 14:34:23,575 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 14:34:24,109 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.042e+02 2.203e+02 2.360e+02 4.181e+02, threshold=4.406e+02, percent-clipped=0.0 +2024-08-06 14:34:44,663 INFO [trainer.py:765] (7/8) Epoch 35, batch 300, train_loss[loss=3.536, NarTop10Accuracy=0.6066, over 7050.00 frames. ], tot_loss[loss=3.298, NarTop10Accuracy=0.6592, over 4659.40 frames. ], batch size: 22, lr: 2.44e-03 +2024-08-06 14:35:13,541 INFO [trainer.py:765] (7/8) Epoch 35, batch 400, train_loss[loss=3.125, NarTop10Accuracy=0.695, over 5054.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6571, over 5115.31 frames. ], batch size: 7, lr: 2.44e-03 +2024-08-06 14:35:48,186 INFO [trainer.py:765] (7/8) Epoch 35, batch 500, train_loss[loss=3.489, NarTop10Accuracy=0.6112, over 6121.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6567, over 5424.05 frames. ], batch size: 11, lr: 2.44e-03 +2024-08-06 14:36:22,746 INFO [trainer.py:765] (7/8) Epoch 35, batch 600, train_loss[loss=3.159, NarTop10Accuracy=0.6971, over 5849.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6545, over 5680.95 frames. ], batch size: 9, lr: 2.44e-03 +2024-08-06 14:36:57,825 INFO [trainer.py:765] (7/8) Epoch 35, batch 700, train_loss[loss=3.271, NarTop10Accuracy=0.6564, over 5244.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6553, over 5745.44 frames. ], batch size: 6, lr: 2.44e-03 +2024-08-06 14:37:29,768 INFO [trainer.py:765] (7/8) Epoch 35, batch 800, train_loss[loss=3.209, NarTop10Accuracy=0.679, over 5204.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6552, over 5801.45 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:38:03,302 INFO [trainer.py:765] (7/8) Epoch 35, batch 900, train_loss[loss=3.133, NarTop10Accuracy=0.6988, over 6239.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6516, over 5826.28 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 14:38:43,708 INFO [trainer.py:765] (7/8) Epoch 35, batch 1000, train_loss[loss=3.502, NarTop10Accuracy=0.6203, over 6234.00 frames. ], tot_loss[loss=3.333, NarTop10Accuracy=0.6522, over 5933.66 frames. ], batch size: 13, lr: 2.43e-03 +2024-08-06 14:39:16,566 INFO [trainer.py:765] (7/8) Epoch 35, batch 1100, train_loss[loss=3.657, NarTop10Accuracy=0.5811, over 6862.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6524, over 5952.45 frames. ], batch size: 17, lr: 2.43e-03 +2024-08-06 14:39:50,837 INFO [trainer.py:765] (7/8) Epoch 35, batch 1200, train_loss[loss=3.354, NarTop10Accuracy=0.6555, over 7346.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6508, over 5942.25 frames. ], batch size: 30, lr: 2.43e-03 +2024-08-06 14:40:33,952 INFO [trainer.py:765] (7/8) Epoch 35, batch 1300, train_loss[loss=3.38, NarTop10Accuracy=0.6448, over 4948.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6507, over 6008.14 frames. ], batch size: 6, lr: 2.43e-03 +2024-08-06 14:41:03,182 INFO [trainer.py:765] (7/8) Epoch 35, batch 1400, train_loss[loss=3.335, NarTop10Accuracy=0.6507, over 6113.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.6478, over 6023.49 frames. ], batch size: 11, lr: 2.43e-03 +2024-08-06 14:41:33,823 INFO [trainer.py:765] (7/8) Epoch 35, batch 1500, train_loss[loss=3.503, NarTop10Accuracy=0.619, over 6313.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.6474, over 5966.65 frames. ], batch size: 49, lr: 2.43e-03 +2024-08-06 14:42:01,776 INFO [trainer.py:765] (7/8) Epoch 35, batch 1600, train_loss[loss=3.574, NarTop10Accuracy=0.597, over 7229.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.648, over 5952.52 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 14:42:28,465 INFO [trainer.py:765] (7/8) Epoch 35, batch 1700, train_loss[loss=3.223, NarTop10Accuracy=0.674, over 6257.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6487, over 5943.29 frames. ], batch size: 13, lr: 2.42e-03 +2024-08-06 14:42:55,039 INFO [trainer.py:765] (7/8) Epoch 35, batch 1800, train_loss[loss=3.111, NarTop10Accuracy=0.6963, over 7123.00 frames. ], tot_loss[loss=3.357, NarTop10Accuracy=0.6469, over 6014.39 frames. ], batch size: 22, lr: 2.42e-03 +2024-08-06 14:43:21,645 INFO [trainer.py:765] (7/8) Epoch 35, batch 1900, train_loss[loss=3.427, NarTop10Accuracy=0.6363, over 6402.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6463, over 6052.78 frames. ], batch size: 49, lr: 2.42e-03 +2024-08-06 14:43:47,366 INFO [trainer.py:765] (7/8) Epoch 35, batch 2000, train_loss[loss=3.653, NarTop10Accuracy=0.5856, over 6482.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6456, over 6028.30 frames. ], batch size: 49, lr: 2.42e-03 +2024-08-06 14:44:12,855 INFO [trainer.py:765] (7/8) Epoch 35, batch 2100, train_loss[loss=3.133, NarTop10Accuracy=0.6983, over 3959.00 frames. ], tot_loss[loss=3.352, NarTop10Accuracy=0.647, over 6014.65 frames. ], batch size: 4, lr: 2.42e-03 +2024-08-06 14:44:38,387 INFO [trainer.py:765] (7/8) Epoch 35, batch 2200, train_loss[loss=3.454, NarTop10Accuracy=0.6235, over 7172.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6453, over 6041.09 frames. ], batch size: 30, lr: 2.42e-03 +2024-08-06 14:44:47,198 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 14:44:57,441 INFO [trainer.py:811] (7/8) Epoch 35, validation: loss=3.219, NarTop10Accuracy=0.6773, over 1907754.00 frames. +2024-08-06 14:44:57,441 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 14:44:57,973 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.083e+02 2.237e+02 2.412e+02 3.944e+02, threshold=4.474e+02, percent-clipped=0.0 +2024-08-06 14:45:14,099 INFO [trainer.py:765] (7/8) Epoch 35, batch 2300, train_loss[loss=3.066, NarTop10Accuracy=0.682, over 5823.00 frames. ], tot_loss[loss=3.372, NarTop10Accuracy=0.6433, over 6084.29 frames. ], batch size: 9, lr: 2.41e-03 +2024-08-06 14:45:38,819 INFO [trainer.py:765] (7/8) Epoch 35, batch 2400, train_loss[loss=3.205, NarTop10Accuracy=0.6738, over 5207.00 frames. ], tot_loss[loss=3.373, NarTop10Accuracy=0.6435, over 5883.60 frames. ], batch size: 7, lr: 2.41e-03 +2024-08-06 14:46:02,146 INFO [trainer.py:765] (7/8) Epoch 35, batch 2500, train_loss[loss=3.355, NarTop10Accuracy=0.6629, over 5008.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6507, over 5529.67 frames. ], batch size: 6, lr: 2.41e-03 +2024-08-06 14:46:23,156 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 14:47:25,441 INFO [trainer.py:765] (7/8) Epoch 36, batch 100, train_loss[loss=3.311, NarTop10Accuracy=0.662, over 7414.00 frames. ], tot_loss[loss=3.307, NarTop10Accuracy=0.6572, over 2375.78 frames. ], batch size: 31, lr: 2.38e-03 +2024-08-06 14:47:58,358 INFO [trainer.py:765] (7/8) Epoch 36, batch 200, train_loss[loss=3.143, NarTop10Accuracy=0.6928, over 6947.00 frames. ], tot_loss[loss=3.296, NarTop10Accuracy=0.6598, over 3869.90 frames. ], batch size: 17, lr: 2.37e-03 +2024-08-06 14:48:30,724 INFO [trainer.py:765] (7/8) Epoch 36, batch 300, train_loss[loss=3.21, NarTop10Accuracy=0.683, over 7198.00 frames. ], tot_loss[loss=3.297, NarTop10Accuracy=0.6594, over 4680.88 frames. ], batch size: 22, lr: 2.37e-03 +2024-08-06 14:49:04,815 INFO [trainer.py:765] (7/8) Epoch 36, batch 400, train_loss[loss=2.972, NarTop10Accuracy=0.7318, over 5092.00 frames. ], tot_loss[loss=3.293, NarTop10Accuracy=0.66, over 5124.93 frames. ], batch size: 7, lr: 2.37e-03 +2024-08-06 14:49:36,588 INFO [trainer.py:765] (7/8) Epoch 36, batch 500, train_loss[loss=3.692, NarTop10Accuracy=0.5696, over 6053.00 frames. ], tot_loss[loss=3.282, NarTop10Accuracy=0.6623, over 5417.36 frames. ], batch size: 11, lr: 2.37e-03 +2024-08-06 14:50:09,655 INFO [trainer.py:765] (7/8) Epoch 36, batch 600, train_loss[loss=3.217, NarTop10Accuracy=0.6821, over 5796.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6584, over 5693.83 frames. ], batch size: 9, lr: 2.37e-03 +2024-08-06 14:50:46,513 INFO [trainer.py:765] (7/8) Epoch 36, batch 700, train_loss[loss=3.253, NarTop10Accuracy=0.6776, over 5041.00 frames. ], tot_loss[loss=3.31, NarTop10Accuracy=0.6567, over 5755.23 frames. ], batch size: 6, lr: 2.37e-03 +2024-08-06 14:51:23,702 INFO [trainer.py:765] (7/8) Epoch 36, batch 800, train_loss[loss=3.683, NarTop10Accuracy=0.5769, over 4231.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.656, over 5792.42 frames. ], batch size: 5, lr: 2.37e-03 +2024-08-06 14:51:54,346 INFO [trainer.py:765] (7/8) Epoch 36, batch 900, train_loss[loss=3.168, NarTop10Accuracy=0.6875, over 6617.00 frames. ], tot_loss[loss=3.312, NarTop10Accuracy=0.6567, over 5808.55 frames. ], batch size: 14, lr: 2.36e-03 +2024-08-06 14:52:30,324 INFO [trainer.py:765] (7/8) Epoch 36, batch 1000, train_loss[loss=3.269, NarTop10Accuracy=0.6773, over 6235.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.6544, over 5908.28 frames. ], batch size: 13, lr: 2.36e-03 +2024-08-06 14:53:06,863 INFO [trainer.py:765] (7/8) Epoch 36, batch 1100, train_loss[loss=3.137, NarTop10Accuracy=0.6956, over 6856.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6509, over 5926.03 frames. ], batch size: 17, lr: 2.36e-03 +2024-08-06 14:53:40,248 INFO [trainer.py:765] (7/8) Epoch 36, batch 1200, train_loss[loss=3.322, NarTop10Accuracy=0.6539, over 7468.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6528, over 5924.90 frames. ], batch size: 31, lr: 2.36e-03 +2024-08-06 14:54:15,855 INFO [trainer.py:765] (7/8) Epoch 36, batch 1300, train_loss[loss=2.941, NarTop10Accuracy=0.7003, over 4924.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6509, over 6007.06 frames. ], batch size: 6, lr: 2.36e-03 +2024-08-06 14:54:51,540 INFO [trainer.py:765] (7/8) Epoch 36, batch 1400, train_loss[loss=3.235, NarTop10Accuracy=0.6595, over 6170.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6491, over 6039.59 frames. ], batch size: 11, lr: 2.36e-03 +2024-08-06 14:55:21,802 INFO [trainer.py:765] (7/8) Epoch 36, batch 1500, train_loss[loss=3.592, NarTop10Accuracy=0.5989, over 6281.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6499, over 5980.73 frames. ], batch size: 48, lr: 2.36e-03 +2024-08-06 14:55:49,902 INFO [trainer.py:765] (7/8) Epoch 36, batch 1600, train_loss[loss=3.25, NarTop10Accuracy=0.6739, over 7163.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6506, over 5964.79 frames. ], batch size: 22, lr: 2.36e-03 +2024-08-06 14:56:04,131 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 14:56:14,600 INFO [trainer.py:811] (7/8) Epoch 36, validation: loss=3.22, NarTop10Accuracy=0.6784, over 1907754.00 frames. +2024-08-06 14:56:14,601 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 14:56:15,103 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.063e+02 2.224e+02 2.398e+02 5.290e+02, threshold=4.447e+02, percent-clipped=0.1 +2024-08-06 14:56:27,177 INFO [trainer.py:765] (7/8) Epoch 36, batch 1700, train_loss[loss=3.257, NarTop10Accuracy=0.6604, over 6339.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6507, over 5942.53 frames. ], batch size: 13, lr: 2.35e-03 +2024-08-06 14:56:53,759 INFO [trainer.py:765] (7/8) Epoch 36, batch 1800, train_loss[loss=3.392, NarTop10Accuracy=0.6455, over 7211.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6502, over 5995.69 frames. ], batch size: 22, lr: 2.35e-03 +2024-08-06 14:57:20,335 INFO [trainer.py:765] (7/8) Epoch 36, batch 1900, train_loss[loss=3.6, NarTop10Accuracy=0.5931, over 5731.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6485, over 6041.54 frames. ], batch size: 49, lr: 2.35e-03 +2024-08-06 14:57:46,056 INFO [trainer.py:765] (7/8) Epoch 36, batch 2000, train_loss[loss=3.696, NarTop10Accuracy=0.569, over 5507.00 frames. ], tot_loss[loss=3.355, NarTop10Accuracy=0.647, over 6021.86 frames. ], batch size: 49, lr: 2.35e-03 +2024-08-06 14:58:11,404 INFO [trainer.py:765] (7/8) Epoch 36, batch 2100, train_loss[loss=3.238, NarTop10Accuracy=0.6675, over 4896.00 frames. ], tot_loss[loss=3.36, NarTop10Accuracy=0.6463, over 5992.04 frames. ], batch size: 5, lr: 2.35e-03 +2024-08-06 14:58:36,832 INFO [trainer.py:765] (7/8) Epoch 36, batch 2200, train_loss[loss=3.619, NarTop10Accuracy=0.5961, over 7148.00 frames. ], tot_loss[loss=3.362, NarTop10Accuracy=0.6462, over 6037.64 frames. ], batch size: 30, lr: 2.35e-03 +2024-08-06 14:59:02,344 INFO [trainer.py:765] (7/8) Epoch 36, batch 2300, train_loss[loss=3.358, NarTop10Accuracy=0.638, over 5782.00 frames. ], tot_loss[loss=3.381, NarTop10Accuracy=0.6423, over 6055.44 frames. ], batch size: 9, lr: 2.35e-03 +2024-08-06 14:59:27,094 INFO [trainer.py:765] (7/8) Epoch 36, batch 2400, train_loss[loss=3.676, NarTop10Accuracy=0.5946, over 5173.00 frames. ], tot_loss[loss=3.38, NarTop10Accuracy=0.6422, over 5875.32 frames. ], batch size: 7, lr: 2.35e-03 +2024-08-06 14:59:50,503 INFO [trainer.py:765] (7/8) Epoch 36, batch 2500, train_loss[loss=3.159, NarTop10Accuracy=0.6777, over 5173.00 frames. ], tot_loss[loss=3.344, NarTop10Accuracy=0.6491, over 5527.74 frames. ], batch size: 6, lr: 2.34e-03 +2024-08-06 15:00:11,098 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 15:01:14,218 INFO [trainer.py:765] (7/8) Epoch 37, batch 100, train_loss[loss=3.296, NarTop10Accuracy=0.6628, over 6982.00 frames. ], tot_loss[loss=3.28, NarTop10Accuracy=0.6639, over 2372.07 frames. ], batch size: 30, lr: 2.31e-03 +2024-08-06 15:01:44,097 INFO [trainer.py:765] (7/8) Epoch 37, batch 200, train_loss[loss=3.088, NarTop10Accuracy=0.7062, over 6838.00 frames. ], tot_loss[loss=3.284, NarTop10Accuracy=0.6623, over 3854.52 frames. ], batch size: 17, lr: 2.31e-03 +2024-08-06 15:02:17,382 INFO [trainer.py:765] (7/8) Epoch 37, batch 300, train_loss[loss=3.301, NarTop10Accuracy=0.6632, over 7390.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.663, over 4661.38 frames. ], batch size: 23, lr: 2.31e-03 +2024-08-06 15:02:48,346 INFO [trainer.py:765] (7/8) Epoch 37, batch 400, train_loss[loss=3.37, NarTop10Accuracy=0.6425, over 5699.00 frames. ], tot_loss[loss=3.288, NarTop10Accuracy=0.6608, over 5119.59 frames. ], batch size: 8, lr: 2.31e-03 +2024-08-06 15:03:26,570 INFO [trainer.py:765] (7/8) Epoch 37, batch 500, train_loss[loss=3.091, NarTop10Accuracy=0.7036, over 6080.00 frames. ], tot_loss[loss=3.296, NarTop10Accuracy=0.6597, over 5423.38 frames. ], batch size: 11, lr: 2.30e-03 +2024-08-06 15:03:58,032 INFO [trainer.py:765] (7/8) Epoch 37, batch 600, train_loss[loss=3.061, NarTop10Accuracy=0.7152, over 5698.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6577, over 5691.49 frames. ], batch size: 9, lr: 2.30e-03 +2024-08-06 15:04:30,247 INFO [trainer.py:765] (7/8) Epoch 37, batch 700, train_loss[loss=3.214, NarTop10Accuracy=0.685, over 5020.00 frames. ], tot_loss[loss=3.317, NarTop10Accuracy=0.6555, over 5744.14 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:12,163 INFO [trainer.py:765] (7/8) Epoch 37, batch 800, train_loss[loss=3.353, NarTop10Accuracy=0.6399, over 4921.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6558, over 5805.81 frames. ], batch size: 6, lr: 2.30e-03 +2024-08-06 15:05:40,606 INFO [trainer.py:765] (7/8) Epoch 37, batch 900, train_loss[loss=3.226, NarTop10Accuracy=0.6841, over 6290.00 frames. ], tot_loss[loss=3.321, NarTop10Accuracy=0.6547, over 5821.99 frames. ], batch size: 13, lr: 2.30e-03 +2024-08-06 15:06:15,608 INFO [trainer.py:765] (7/8) Epoch 37, batch 1000, train_loss[loss=2.956, NarTop10Accuracy=0.7213, over 6534.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6528, over 5926.88 frames. ], batch size: 14, lr: 2.30e-03 +2024-08-06 15:06:42,491 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 15:06:53,168 INFO [trainer.py:811] (7/8) Epoch 37, validation: loss=3.234, NarTop10Accuracy=0.6744, over 1907754.00 frames. +2024-08-06 15:06:53,169 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 15:06:53,809 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.068e+02 2.238e+02 2.409e+02 6.392e+02, threshold=4.475e+02, percent-clipped=0.1 +2024-08-06 15:07:01,306 INFO [trainer.py:765] (7/8) Epoch 37, batch 1100, train_loss[loss=3.667, NarTop10Accuracy=0.5954, over 6873.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6524, over 5953.29 frames. ], batch size: 17, lr: 2.30e-03 +2024-08-06 15:07:32,718 INFO [trainer.py:765] (7/8) Epoch 37, batch 1200, train_loss[loss=3.271, NarTop10Accuracy=0.6701, over 7585.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6538, over 5959.20 frames. ], batch size: 31, lr: 2.30e-03 +2024-08-06 15:08:04,777 INFO [trainer.py:765] (7/8) Epoch 37, batch 1300, train_loss[loss=3.425, NarTop10Accuracy=0.6245, over 5020.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6532, over 6012.25 frames. ], batch size: 6, lr: 2.29e-03 +2024-08-06 15:08:47,880 INFO [trainer.py:765] (7/8) Epoch 37, batch 1400, train_loss[loss=3.194, NarTop10Accuracy=0.6912, over 6065.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.652, over 6034.54 frames. ], batch size: 11, lr: 2.29e-03 +2024-08-06 15:09:16,180 INFO [trainer.py:765] (7/8) Epoch 37, batch 1500, train_loss[loss=3.643, NarTop10Accuracy=0.5843, over 5783.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6498, over 5952.58 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:09:44,191 INFO [trainer.py:765] (7/8) Epoch 37, batch 1600, train_loss[loss=3.533, NarTop10Accuracy=0.6197, over 7131.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.65, over 5927.42 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:10:11,082 INFO [trainer.py:765] (7/8) Epoch 37, batch 1700, train_loss[loss=3.259, NarTop10Accuracy=0.6722, over 6319.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6502, over 5935.19 frames. ], batch size: 13, lr: 2.29e-03 +2024-08-06 15:10:37,752 INFO [trainer.py:765] (7/8) Epoch 37, batch 1800, train_loss[loss=3.407, NarTop10Accuracy=0.6286, over 7166.00 frames. ], tot_loss[loss=3.34, NarTop10Accuracy=0.6509, over 6013.65 frames. ], batch size: 22, lr: 2.29e-03 +2024-08-06 15:11:04,270 INFO [trainer.py:765] (7/8) Epoch 37, batch 1900, train_loss[loss=3.58, NarTop10Accuracy=0.6078, over 6405.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6491, over 6048.41 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:11:29,941 INFO [trainer.py:765] (7/8) Epoch 37, batch 2000, train_loss[loss=3.596, NarTop10Accuracy=0.6061, over 5873.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6485, over 6018.12 frames. ], batch size: 49, lr: 2.29e-03 +2024-08-06 15:11:58,797 INFO [trainer.py:765] (7/8) Epoch 37, batch 2100, train_loss[loss=3.414, NarTop10Accuracy=0.6378, over 3857.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6486, over 5981.29 frames. ], batch size: 4, lr: 2.29e-03 +2024-08-06 15:12:24,311 INFO [trainer.py:765] (7/8) Epoch 37, batch 2200, train_loss[loss=3.409, NarTop10Accuracy=0.639, over 7237.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6497, over 6027.35 frames. ], batch size: 31, lr: 2.28e-03 +2024-08-06 15:12:49,787 INFO [trainer.py:765] (7/8) Epoch 37, batch 2300, train_loss[loss=3.296, NarTop10Accuracy=0.6599, over 5824.00 frames. ], tot_loss[loss=3.348, NarTop10Accuracy=0.6485, over 6038.50 frames. ], batch size: 9, lr: 2.28e-03 +2024-08-06 15:13:14,526 INFO [trainer.py:765] (7/8) Epoch 37, batch 2400, train_loss[loss=2.79, NarTop10Accuracy=0.7555, over 5072.00 frames. ], tot_loss[loss=3.358, NarTop10Accuracy=0.6466, over 5851.79 frames. ], batch size: 7, lr: 2.28e-03 +2024-08-06 15:13:37,942 INFO [trainer.py:765] (7/8) Epoch 37, batch 2500, train_loss[loss=3.249, NarTop10Accuracy=0.6708, over 5141.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6522, over 5515.11 frames. ], batch size: 6, lr: 2.28e-03 +2024-08-06 15:13:59,341 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 15:14:50,846 INFO [trainer.py:765] (7/8) Epoch 38, batch 100, train_loss[loss=3.58, NarTop10Accuracy=0.5957, over 7109.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6553, over 2391.24 frames. ], batch size: 30, lr: 2.25e-03 +2024-08-06 15:15:27,288 INFO [trainer.py:765] (7/8) Epoch 38, batch 200, train_loss[loss=3.205, NarTop10Accuracy=0.6717, over 6772.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.6626, over 3884.26 frames. ], batch size: 17, lr: 2.25e-03 +2024-08-06 15:16:01,280 INFO [trainer.py:765] (7/8) Epoch 38, batch 300, train_loss[loss=3.378, NarTop10Accuracy=0.6511, over 7133.00 frames. ], tot_loss[loss=3.269, NarTop10Accuracy=0.6654, over 4674.49 frames. ], batch size: 22, lr: 2.25e-03 +2024-08-06 15:16:32,594 INFO [trainer.py:765] (7/8) Epoch 38, batch 400, train_loss[loss=2.785, NarTop10Accuracy=0.7388, over 5034.00 frames. ], tot_loss[loss=3.276, NarTop10Accuracy=0.664, over 5118.07 frames. ], batch size: 7, lr: 2.24e-03 +2024-08-06 15:17:04,257 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 15:17:14,104 INFO [trainer.py:811] (7/8) Epoch 38, validation: loss=3.229, NarTop10Accuracy=0.6755, over 1907754.00 frames. +2024-08-06 15:17:14,105 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 15:17:14,630 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.062e+02 2.214e+02 2.396e+02 3.845e+02, threshold=4.429e+02, percent-clipped=0.0 +2024-08-06 15:17:16,479 INFO [trainer.py:765] (7/8) Epoch 38, batch 500, train_loss[loss=3.292, NarTop10Accuracy=0.6609, over 6176.00 frames. ], tot_loss[loss=3.267, NarTop10Accuracy=0.6657, over 5402.97 frames. ], batch size: 11, lr: 2.24e-03 +2024-08-06 15:17:53,875 INFO [trainer.py:765] (7/8) Epoch 38, batch 600, train_loss[loss=3.221, NarTop10Accuracy=0.6744, over 5849.00 frames. ], tot_loss[loss=3.284, NarTop10Accuracy=0.6619, over 5685.54 frames. ], batch size: 9, lr: 2.24e-03 +2024-08-06 15:18:26,466 INFO [trainer.py:765] (7/8) Epoch 38, batch 700, train_loss[loss=3.202, NarTop10Accuracy=0.6756, over 4975.00 frames. ], tot_loss[loss=3.302, NarTop10Accuracy=0.6583, over 5744.02 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:01,129 INFO [trainer.py:765] (7/8) Epoch 38, batch 800, train_loss[loss=2.902, NarTop10Accuracy=0.7208, over 5075.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6556, over 5809.76 frames. ], batch size: 6, lr: 2.24e-03 +2024-08-06 15:19:36,540 INFO [trainer.py:765] (7/8) Epoch 38, batch 900, train_loss[loss=3.544, NarTop10Accuracy=0.6006, over 6636.00 frames. ], tot_loss[loss=3.325, NarTop10Accuracy=0.654, over 5820.72 frames. ], batch size: 14, lr: 2.24e-03 +2024-08-06 15:20:09,134 INFO [trainer.py:765] (7/8) Epoch 38, batch 1000, train_loss[loss=3.397, NarTop10Accuracy=0.6409, over 6251.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6533, over 5923.23 frames. ], batch size: 13, lr: 2.24e-03 +2024-08-06 15:20:47,346 INFO [trainer.py:765] (7/8) Epoch 38, batch 1100, train_loss[loss=3.456, NarTop10Accuracy=0.619, over 6791.00 frames. ], tot_loss[loss=3.335, NarTop10Accuracy=0.6516, over 5961.20 frames. ], batch size: 17, lr: 2.24e-03 +2024-08-06 15:21:25,594 INFO [trainer.py:765] (7/8) Epoch 38, batch 1200, train_loss[loss=3.469, NarTop10Accuracy=0.6268, over 7111.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6495, over 5961.10 frames. ], batch size: 30, lr: 2.23e-03 +2024-08-06 15:21:57,556 INFO [trainer.py:765] (7/8) Epoch 38, batch 1300, train_loss[loss=3.442, NarTop10Accuracy=0.6242, over 4969.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6536, over 6017.16 frames. ], batch size: 6, lr: 2.23e-03 +2024-08-06 15:22:29,468 INFO [trainer.py:765] (7/8) Epoch 38, batch 1400, train_loss[loss=3.163, NarTop10Accuracy=0.681, over 6067.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.653, over 6033.15 frames. ], batch size: 11, lr: 2.23e-03 +2024-08-06 15:23:06,615 INFO [trainer.py:765] (7/8) Epoch 38, batch 1500, train_loss[loss=3.297, NarTop10Accuracy=0.6715, over 5755.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.65, over 5968.55 frames. ], batch size: 49, lr: 2.23e-03 +2024-08-06 15:23:34,640 INFO [trainer.py:765] (7/8) Epoch 38, batch 1600, train_loss[loss=3.277, NarTop10Accuracy=0.6613, over 7275.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6491, over 5944.12 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:01,433 INFO [trainer.py:765] (7/8) Epoch 38, batch 1700, train_loss[loss=3.151, NarTop10Accuracy=0.6805, over 6199.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6497, over 5939.17 frames. ], batch size: 13, lr: 2.23e-03 +2024-08-06 15:24:28,065 INFO [trainer.py:765] (7/8) Epoch 38, batch 1800, train_loss[loss=3.25, NarTop10Accuracy=0.6652, over 7242.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6496, over 6011.28 frames. ], batch size: 22, lr: 2.23e-03 +2024-08-06 15:24:54,672 INFO [trainer.py:765] (7/8) Epoch 38, batch 1900, train_loss[loss=3.405, NarTop10Accuracy=0.6377, over 5829.00 frames. ], tot_loss[loss=3.356, NarTop10Accuracy=0.6472, over 6045.94 frames. ], batch size: 48, lr: 2.23e-03 +2024-08-06 15:25:20,410 INFO [trainer.py:765] (7/8) Epoch 38, batch 2000, train_loss[loss=3.478, NarTop10Accuracy=0.6195, over 6142.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6483, over 6010.85 frames. ], batch size: 49, lr: 2.23e-03 +2024-08-06 15:25:45,856 INFO [trainer.py:765] (7/8) Epoch 38, batch 2100, train_loss[loss=3.103, NarTop10Accuracy=0.7036, over 4850.00 frames. ], tot_loss[loss=3.338, NarTop10Accuracy=0.6501, over 5998.11 frames. ], batch size: 5, lr: 2.22e-03 +2024-08-06 15:26:11,316 INFO [trainer.py:765] (7/8) Epoch 38, batch 2200, train_loss[loss=3.383, NarTop10Accuracy=0.6418, over 7335.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.65, over 6035.56 frames. ], batch size: 31, lr: 2.22e-03 +2024-08-06 15:26:36,708 INFO [trainer.py:765] (7/8) Epoch 38, batch 2300, train_loss[loss=3.219, NarTop10Accuracy=0.686, over 5846.00 frames. ], tot_loss[loss=3.35, NarTop10Accuracy=0.648, over 6042.04 frames. ], batch size: 9, lr: 2.22e-03 +2024-08-06 15:27:01,479 INFO [trainer.py:765] (7/8) Epoch 38, batch 2400, train_loss[loss=3.071, NarTop10Accuracy=0.6914, over 5148.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6462, over 5842.23 frames. ], batch size: 7, lr: 2.22e-03 +2024-08-06 15:27:23,144 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 15:27:33,590 INFO [trainer.py:811] (7/8) Epoch 38, validation: loss=3.213, NarTop10Accuracy=0.6782, over 1907754.00 frames. +2024-08-06 15:27:33,590 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 15:27:34,076 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.098e+02 2.247e+02 2.437e+02 3.550e+02, threshold=4.494e+02, percent-clipped=0.0 +2024-08-06 15:27:35,515 INFO [trainer.py:765] (7/8) Epoch 38, batch 2500, train_loss[loss=3.241, NarTop10Accuracy=0.6652, over 5155.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6529, over 5538.56 frames. ], batch size: 6, lr: 2.22e-03 +2024-08-06 15:27:56,757 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 15:28:51,227 INFO [trainer.py:765] (7/8) Epoch 39, batch 100, train_loss[loss=3.306, NarTop10Accuracy=0.6601, over 7421.00 frames. ], tot_loss[loss=3.257, NarTop10Accuracy=0.6677, over 2379.56 frames. ], batch size: 32, lr: 2.19e-03 +2024-08-06 15:29:28,051 INFO [trainer.py:765] (7/8) Epoch 39, batch 200, train_loss[loss=3.518, NarTop10Accuracy=0.6121, over 6980.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.664, over 3871.61 frames. ], batch size: 17, lr: 2.19e-03 +2024-08-06 15:30:02,018 INFO [trainer.py:765] (7/8) Epoch 39, batch 300, train_loss[loss=3.265, NarTop10Accuracy=0.6669, over 7210.00 frames. ], tot_loss[loss=3.29, NarTop10Accuracy=0.6615, over 4671.53 frames. ], batch size: 22, lr: 2.19e-03 +2024-08-06 15:30:32,992 INFO [trainer.py:765] (7/8) Epoch 39, batch 400, train_loss[loss=3.064, NarTop10Accuracy=0.7055, over 5127.00 frames. ], tot_loss[loss=3.296, NarTop10Accuracy=0.6597, over 5121.79 frames. ], batch size: 7, lr: 2.19e-03 +2024-08-06 15:31:03,569 INFO [trainer.py:765] (7/8) Epoch 39, batch 500, train_loss[loss=3.1, NarTop10Accuracy=0.694, over 6132.00 frames. ], tot_loss[loss=3.294, NarTop10Accuracy=0.6598, over 5392.34 frames. ], batch size: 11, lr: 2.18e-03 +2024-08-06 15:31:40,850 INFO [trainer.py:765] (7/8) Epoch 39, batch 600, train_loss[loss=3.23, NarTop10Accuracy=0.6823, over 5733.00 frames. ], tot_loss[loss=3.299, NarTop10Accuracy=0.6589, over 5662.72 frames. ], batch size: 9, lr: 2.18e-03 +2024-08-06 15:32:14,451 INFO [trainer.py:765] (7/8) Epoch 39, batch 700, train_loss[loss=2.882, NarTop10Accuracy=0.7405, over 5023.00 frames. ], tot_loss[loss=3.309, NarTop10Accuracy=0.6573, over 5740.86 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:32:44,165 INFO [trainer.py:765] (7/8) Epoch 39, batch 800, train_loss[loss=3.135, NarTop10Accuracy=0.6992, over 5200.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.6564, over 5798.96 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:33:21,117 INFO [trainer.py:765] (7/8) Epoch 39, batch 900, train_loss[loss=3.148, NarTop10Accuracy=0.6795, over 6326.00 frames. ], tot_loss[loss=3.315, NarTop10Accuracy=0.656, over 5806.64 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 15:34:02,655 INFO [trainer.py:765] (7/8) Epoch 39, batch 1000, train_loss[loss=3.136, NarTop10Accuracy=0.6956, over 6304.00 frames. ], tot_loss[loss=3.313, NarTop10Accuracy=0.6563, over 5905.76 frames. ], batch size: 13, lr: 2.18e-03 +2024-08-06 15:34:33,095 INFO [trainer.py:765] (7/8) Epoch 39, batch 1100, train_loss[loss=3.207, NarTop10Accuracy=0.6829, over 6838.00 frames. ], tot_loss[loss=3.322, NarTop10Accuracy=0.6543, over 5933.47 frames. ], batch size: 17, lr: 2.18e-03 +2024-08-06 15:35:09,244 INFO [trainer.py:765] (7/8) Epoch 39, batch 1200, train_loss[loss=3.143, NarTop10Accuracy=0.6935, over 7205.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.655, over 5922.47 frames. ], batch size: 30, lr: 2.18e-03 +2024-08-06 15:35:46,813 INFO [trainer.py:765] (7/8) Epoch 39, batch 1300, train_loss[loss=3.504, NarTop10Accuracy=0.6129, over 5083.00 frames. ], tot_loss[loss=3.324, NarTop10Accuracy=0.654, over 6005.36 frames. ], batch size: 6, lr: 2.18e-03 +2024-08-06 15:36:18,850 INFO [trainer.py:765] (7/8) Epoch 39, batch 1400, train_loss[loss=3.202, NarTop10Accuracy=0.6707, over 6166.00 frames. ], tot_loss[loss=3.328, NarTop10Accuracy=0.6533, over 6000.89 frames. ], batch size: 11, lr: 2.17e-03 +2024-08-06 15:36:47,214 INFO [trainer.py:765] (7/8) Epoch 39, batch 1500, train_loss[loss=3.437, NarTop10Accuracy=0.6322, over 5615.00 frames. ], tot_loss[loss=3.336, NarTop10Accuracy=0.6511, over 5963.57 frames. ], batch size: 48, lr: 2.17e-03 +2024-08-06 15:37:15,216 INFO [trainer.py:765] (7/8) Epoch 39, batch 1600, train_loss[loss=3.317, NarTop10Accuracy=0.653, over 7155.00 frames. ], tot_loss[loss=3.343, NarTop10Accuracy=0.6494, over 5957.66 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:37:41,883 INFO [trainer.py:765] (7/8) Epoch 39, batch 1700, train_loss[loss=3.293, NarTop10Accuracy=0.6579, over 6687.00 frames. ], tot_loss[loss=3.341, NarTop10Accuracy=0.6499, over 5920.80 frames. ], batch size: 14, lr: 2.17e-03 +2024-08-06 15:38:08,509 INFO [trainer.py:765] (7/8) Epoch 39, batch 1800, train_loss[loss=3.243, NarTop10Accuracy=0.6663, over 7032.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6498, over 6008.12 frames. ], batch size: 22, lr: 2.17e-03 +2024-08-06 15:38:35,253 INFO [trainer.py:765] (7/8) Epoch 39, batch 1900, train_loss[loss=3.41, NarTop10Accuracy=0.643, over 6999.00 frames. ], tot_loss[loss=3.359, NarTop10Accuracy=0.6469, over 6058.15 frames. ], batch size: 49, lr: 2.17e-03 +2024-08-06 15:38:37,991 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 15:38:48,262 INFO [trainer.py:811] (7/8) Epoch 39, validation: loss=3.177, NarTop10Accuracy=0.6866, over 1907754.00 frames. +2024-08-06 15:38:48,263 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 15:38:48,768 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.106e+02 2.266e+02 2.462e+02 4.274e+02, threshold=4.532e+02, percent-clipped=0.0 +2024-08-06 15:39:11,227 INFO [trainer.py:765] (7/8) Epoch 39, batch 2000, train_loss[loss=3.395, NarTop10Accuracy=0.6405, over 6557.00 frames. ], tot_loss[loss=3.345, NarTop10Accuracy=0.6494, over 6021.88 frames. ], batch size: 49, lr: 2.17e-03 +2024-08-06 15:39:36,692 INFO [trainer.py:765] (7/8) Epoch 39, batch 2100, train_loss[loss=3.553, NarTop10Accuracy=0.6044, over 3845.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6505, over 5994.96 frames. ], batch size: 4, lr: 2.17e-03 +2024-08-06 15:40:02,086 INFO [trainer.py:765] (7/8) Epoch 39, batch 2200, train_loss[loss=3.473, NarTop10Accuracy=0.6269, over 7484.00 frames. ], tot_loss[loss=3.337, NarTop10Accuracy=0.6507, over 6036.15 frames. ], batch size: 31, lr: 2.17e-03 +2024-08-06 15:40:27,496 INFO [trainer.py:765] (7/8) Epoch 39, batch 2300, train_loss[loss=3.242, NarTop10Accuracy=0.6609, over 5788.00 frames. ], tot_loss[loss=3.351, NarTop10Accuracy=0.6481, over 6071.16 frames. ], batch size: 9, lr: 2.16e-03 +2024-08-06 15:40:52,331 INFO [trainer.py:765] (7/8) Epoch 39, batch 2400, train_loss[loss=3.572, NarTop10Accuracy=0.6063, over 5184.00 frames. ], tot_loss[loss=3.353, NarTop10Accuracy=0.6477, over 5887.69 frames. ], batch size: 7, lr: 2.16e-03 +2024-08-06 15:41:15,695 INFO [trainer.py:765] (7/8) Epoch 39, batch 2500, train_loss[loss=3.441, NarTop10Accuracy=0.6133, over 5022.00 frames. ], tot_loss[loss=3.327, NarTop10Accuracy=0.6522, over 5535.61 frames. ], batch size: 6, lr: 2.16e-03 +2024-08-06 15:41:36,826 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 15:42:35,254 INFO [trainer.py:765] (7/8) Epoch 40, batch 100, train_loss[loss=3.482, NarTop10Accuracy=0.6189, over 7335.00 frames. ], tot_loss[loss=3.306, NarTop10Accuracy=0.6583, over 2378.79 frames. ], batch size: 31, lr: 2.13e-03 +2024-08-06 15:43:09,645 INFO [trainer.py:765] (7/8) Epoch 40, batch 200, train_loss[loss=3.485, NarTop10Accuracy=0.6311, over 6829.00 frames. ], tot_loss[loss=3.283, NarTop10Accuracy=0.6635, over 3875.61 frames. ], batch size: 17, lr: 2.13e-03 +2024-08-06 15:43:43,738 INFO [trainer.py:765] (7/8) Epoch 40, batch 300, train_loss[loss=3.343, NarTop10Accuracy=0.6502, over 7210.00 frames. ], tot_loss[loss=3.277, NarTop10Accuracy=0.6642, over 4685.08 frames. ], batch size: 22, lr: 2.13e-03 +2024-08-06 15:44:18,202 INFO [trainer.py:765] (7/8) Epoch 40, batch 400, train_loss[loss=2.867, NarTop10Accuracy=0.7472, over 5070.00 frames. ], tot_loss[loss=3.269, NarTop10Accuracy=0.6649, over 5130.84 frames. ], batch size: 7, lr: 2.13e-03 +2024-08-06 15:44:50,257 INFO [trainer.py:765] (7/8) Epoch 40, batch 500, train_loss[loss=3.136, NarTop10Accuracy=0.695, over 6236.00 frames. ], tot_loss[loss=3.269, NarTop10Accuracy=0.6646, over 5410.24 frames. ], batch size: 11, lr: 2.13e-03 +2024-08-06 15:45:25,431 INFO [trainer.py:765] (7/8) Epoch 40, batch 600, train_loss[loss=3.307, NarTop10Accuracy=0.649, over 5754.00 frames. ], tot_loss[loss=3.289, NarTop10Accuracy=0.6605, over 5676.55 frames. ], batch size: 9, lr: 2.13e-03 +2024-08-06 15:45:58,647 INFO [trainer.py:765] (7/8) Epoch 40, batch 700, train_loss[loss=3.45, NarTop10Accuracy=0.6211, over 5061.00 frames. ], tot_loss[loss=3.304, NarTop10Accuracy=0.6582, over 5734.61 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 15:46:34,887 INFO [trainer.py:765] (7/8) Epoch 40, batch 800, train_loss[loss=3.463, NarTop10Accuracy=0.6275, over 5127.00 frames. ], tot_loss[loss=3.305, NarTop10Accuracy=0.6574, over 5785.63 frames. ], batch size: 6, lr: 2.13e-03 +2024-08-06 15:47:07,290 INFO [trainer.py:765] (7/8) Epoch 40, batch 900, train_loss[loss=3.246, NarTop10Accuracy=0.676, over 6215.00 frames. ], tot_loss[loss=3.3, NarTop10Accuracy=0.6584, over 5797.31 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:47:43,510 INFO [trainer.py:765] (7/8) Epoch 40, batch 1000, train_loss[loss=3.421, NarTop10Accuracy=0.6317, over 6642.00 frames. ], tot_loss[loss=3.311, NarTop10Accuracy=0.6558, over 5919.33 frames. ], batch size: 14, lr: 2.12e-03 +2024-08-06 15:48:18,710 INFO [trainer.py:765] (7/8) Epoch 40, batch 1100, train_loss[loss=3.38, NarTop10Accuracy=0.6438, over 6837.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6539, over 5949.32 frames. ], batch size: 17, lr: 2.12e-03 +2024-08-06 15:48:52,094 INFO [trainer.py:765] (7/8) Epoch 40, batch 1200, train_loss[loss=3.456, NarTop10Accuracy=0.6285, over 7445.00 frames. ], tot_loss[loss=3.32, NarTop10Accuracy=0.6538, over 5953.02 frames. ], batch size: 32, lr: 2.12e-03 +2024-08-06 15:49:29,783 INFO [trainer.py:765] (7/8) Epoch 40, batch 1300, train_loss[loss=3.349, NarTop10Accuracy=0.6461, over 5103.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6532, over 6019.50 frames. ], batch size: 6, lr: 2.12e-03 +2024-08-06 15:49:38,246 INFO [trainer.py:803] (7/8) Computing validation loss +2024-08-06 15:49:48,934 INFO [trainer.py:811] (7/8) Epoch 40, validation: loss=3.171, NarTop10Accuracy=0.6871, over 1907754.00 frames. +2024-08-06 15:49:48,935 INFO [trainer.py:814] (7/8) Maximum memory allocated so far is 30573MB +2024-08-06 15:49:49,615 INFO [optim.py:386] (7/8) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.095e+02 2.264e+02 2.441e+02 4.960e+02, threshold=4.528e+02, percent-clipped=0.1 +2024-08-06 15:50:12,460 INFO [trainer.py:765] (7/8) Epoch 40, batch 1400, train_loss[loss=3.27, NarTop10Accuracy=0.6602, over 6130.00 frames. ], tot_loss[loss=3.332, NarTop10Accuracy=0.6518, over 6056.57 frames. ], batch size: 11, lr: 2.12e-03 +2024-08-06 15:50:45,930 INFO [trainer.py:765] (7/8) Epoch 40, batch 1500, train_loss[loss=3.533, NarTop10Accuracy=0.6139, over 6301.00 frames. ], tot_loss[loss=3.329, NarTop10Accuracy=0.6528, over 5991.85 frames. ], batch size: 52, lr: 2.12e-03 +2024-08-06 15:51:13,820 INFO [trainer.py:765] (7/8) Epoch 40, batch 1600, train_loss[loss=3.169, NarTop10Accuracy=0.6815, over 7271.00 frames. ], tot_loss[loss=3.318, NarTop10Accuracy=0.6551, over 5963.01 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:51:40,570 INFO [trainer.py:765] (7/8) Epoch 40, batch 1700, train_loss[loss=3.335, NarTop10Accuracy=0.6557, over 6233.00 frames. ], tot_loss[loss=3.323, NarTop10Accuracy=0.6537, over 5947.03 frames. ], batch size: 13, lr: 2.12e-03 +2024-08-06 15:52:07,235 INFO [trainer.py:765] (7/8) Epoch 40, batch 1800, train_loss[loss=3.442, NarTop10Accuracy=0.6305, over 7128.00 frames. ], tot_loss[loss=3.326, NarTop10Accuracy=0.6529, over 5989.31 frames. ], batch size: 22, lr: 2.12e-03 +2024-08-06 15:52:33,820 INFO [trainer.py:765] (7/8) Epoch 40, batch 1900, train_loss[loss=3.633, NarTop10Accuracy=0.5902, over 5652.00 frames. ], tot_loss[loss=3.334, NarTop10Accuracy=0.6517, over 6039.93 frames. ], batch size: 49, lr: 2.11e-03 +2024-08-06 15:52:59,510 INFO [trainer.py:765] (7/8) Epoch 40, batch 2000, train_loss[loss=3.408, NarTop10Accuracy=0.6351, over 6105.00 frames. ], tot_loss[loss=3.339, NarTop10Accuracy=0.6507, over 6019.64 frames. ], batch size: 49, lr: 2.11e-03 +2024-08-06 15:53:24,913 INFO [trainer.py:765] (7/8) Epoch 40, batch 2100, train_loss[loss=3.086, NarTop10Accuracy=0.6788, over 3939.00 frames. ], tot_loss[loss=3.331, NarTop10Accuracy=0.6515, over 6003.38 frames. ], batch size: 4, lr: 2.11e-03 +2024-08-06 15:53:50,418 INFO [trainer.py:765] (7/8) Epoch 40, batch 2200, train_loss[loss=3.388, NarTop10Accuracy=0.6445, over 7189.00 frames. ], tot_loss[loss=3.33, NarTop10Accuracy=0.6518, over 6044.60 frames. ], batch size: 30, lr: 2.11e-03 +2024-08-06 15:54:15,885 INFO [trainer.py:765] (7/8) Epoch 40, batch 2300, train_loss[loss=3.269, NarTop10Accuracy=0.6651, over 5754.00 frames. ], tot_loss[loss=3.342, NarTop10Accuracy=0.6499, over 6059.94 frames. ], batch size: 9, lr: 2.11e-03 +2024-08-06 15:54:43,786 INFO [trainer.py:765] (7/8) Epoch 40, batch 2400, train_loss[loss=3.515, NarTop10Accuracy=0.6033, over 5026.00 frames. ], tot_loss[loss=3.347, NarTop10Accuracy=0.6488, over 5867.61 frames. ], batch size: 7, lr: 2.11e-03 +2024-08-06 15:55:07,364 INFO [trainer.py:765] (7/8) Epoch 40, batch 2500, train_loss[loss=3.284, NarTop10Accuracy=0.6491, over 4932.00 frames. ], tot_loss[loss=3.316, NarTop10Accuracy=0.6548, over 5533.62 frames. ], batch size: 6, lr: 2.11e-03 +2024-08-06 15:55:28,566 INFO [trainer.py:650] (7/8) Reaches end of dataloader. +2024-08-06 15:55:28,569 INFO [trainer.py:1069] (7/8) Done! diff --git a/libritts/model.txt b/libritts/model.txt new file mode 100644 index 0000000000000000000000000000000000000000..2d81a33dead3a1d7b72d723a8d8d922be3059a61 --- /dev/null +++ b/libritts/model.txt @@ -0,0 +1,95 @@ +VALLE( + (ar_text_embedding): TokenEmbedding( + (dropout): Dropout(p=0.0, inplace=False) + (word_embeddings): Embedding(512, 1024) + ) + (nar_text_embedding): TokenEmbedding( + (dropout): Dropout(p=0.0, inplace=False) + (word_embeddings): Embedding(512, 1024) + ) + (ar_audio_embedding): TokenEmbedding( + (dropout): Dropout(p=0.0, inplace=False) + (word_embeddings): Embedding(1025, 1024) + ) + (ar_text_prenet): Identity() + (ar_audio_prenet): Identity() + (ar_text_position): SinePositionalEmbedding( + (dropout): Dropout(p=0.1, inplace=False) + ) + (ar_audio_position): SinePositionalEmbedding( + (dropout): Dropout(p=0.1, inplace=False) + ) + (ar_decoder): TransformerEncoder( + (layers): ModuleList( + (0-11): 12 x TransformerEncoderLayer( + (self_attn): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1024, out_features=1024, bias=True) + ) + (linear1): Linear(in_features=1024, out_features=4096, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (linear2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout1): Dropout(p=0.1, inplace=False) + (dropout2): Dropout(p=0.1, inplace=False) + (norm1): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + (norm2): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + ) + ) + (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + ) + (ar_predict_layer): Linear(in_features=1024, out_features=1025, bias=False) + (ar_accuracy_metric): MulticlassAccuracy() + (nar_audio_embeddings): ModuleList( + (0): TokenEmbedding( + (dropout): Dropout(p=0.0, inplace=False) + (word_embeddings): Embedding(1025, 1024) + ) + (1-7): 7 x TokenEmbedding( + (dropout): Dropout(p=0.0, inplace=False) + (word_embeddings): Embedding(1024, 1024) + ) + ) + (nar_text_prenet): Identity() + (nar_audio_prenet): Identity() + (nar_text_position): SinePositionalEmbedding( + (dropout): Dropout(p=0.0, inplace=False) + ) + (nar_audio_position): SinePositionalEmbedding( + (dropout): Dropout(p=0.1, inplace=False) + ) + (nar_decoder): TransformerEncoder( + (layers): ModuleList( + (0-11): 12 x TransformerEncoderLayer( + (self_attn): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1024, out_features=1024, bias=True) + ) + (linear1): Linear(in_features=1024, out_features=4096, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (linear2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout1): Dropout(p=0.1, inplace=False) + (dropout2): Dropout(p=0.1, inplace=False) + (norm1): AdaptiveLayerNorm( + (project_layer): Linear(in_features=1024, out_features=2048, bias=True) + (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + ) + (norm2): AdaptiveLayerNorm( + (project_layer): Linear(in_features=1024, out_features=2048, bias=True) + (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + ) + ) + ) + (norm): AdaptiveLayerNorm( + (project_layer): Linear(in_features=1024, out_features=2048, bias=True) + (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) + ) + ) + (nar_predict_layers): ModuleList( + (0-6): 7 x Linear(in_features=1024, out_features=1024, bias=False) + ) + (nar_stage_embeddings): ModuleList( + (0-6): 7 x TokenEmbedding( + (dropout): Dropout(p=0.0, inplace=False) + (word_embeddings): Embedding(1, 1024) + ) + ) + (nar_accuracy_metric): MulticlassAccuracy() +) diff --git a/libritts/tensorboard_stage1/events.out.tfevents.1722913306.6865771.39841.0 b/libritts/tensorboard_stage1/events.out.tfevents.1722913306.6865771.39841.0 new file mode 100644 index 0000000000000000000000000000000000000000..5efe5fbb6fa442ec1a5a7a6099a9d9d112ae1a62 --- /dev/null +++ b/libritts/tensorboard_stage1/events.out.tfevents.1722913306.6865771.39841.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ed7395e3c473e3a9353cf04f0d5f70489910c2bcc92dccd524f52353d59b190 +size 88 diff --git a/libritts/tensorboard_stage1/events.out.tfevents.1722914810.6865771.114807.0 b/libritts/tensorboard_stage1/events.out.tfevents.1722914810.6865771.114807.0 new file mode 100644 index 0000000000000000000000000000000000000000..7e4ae50f9609403c0310edef9b4e5d93863f8788 --- /dev/null +++ b/libritts/tensorboard_stage1/events.out.tfevents.1722914810.6865771.114807.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f4979b32d721f1c427ef359ffaddbd7ff8d2b56922f621945b36efef39c1cc2 +size 135 diff --git a/libritts/tensorboard_stage1/events.out.tfevents.1722915380.6865771.156929.0 b/libritts/tensorboard_stage1/events.out.tfevents.1722915380.6865771.156929.0 new file mode 100644 index 0000000000000000000000000000000000000000..ad7237c61db7776e17a0242d19b0141825ba013e --- /dev/null +++ b/libritts/tensorboard_stage1/events.out.tfevents.1722915380.6865771.156929.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d128b8ddb4995c0b8ac9c8fd420c63c03a08ec98c7d8f96f9aae8a37fb6e5853 +size 135 diff --git a/libritts/tensorboard_stage1/events.out.tfevents.1722915535.6865771.178589.0 b/libritts/tensorboard_stage1/events.out.tfevents.1722915535.6865771.178589.0 new file mode 100644 index 0000000000000000000000000000000000000000..b2085b01a0aadac8cc928a34bc5a10d3c5336f51 --- /dev/null +++ b/libritts/tensorboard_stage1/events.out.tfevents.1722915535.6865771.178589.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e864718e3ac362d6476eefcd48d807960f3d4783610cfa1561ba08fcafc1bd2e +size 135 diff --git a/libritts/tensorboard_stage1/events.out.tfevents.1722915580.6865771.194837.0 b/libritts/tensorboard_stage1/events.out.tfevents.1722915580.6865771.194837.0 new file mode 100644 index 0000000000000000000000000000000000000000..95b342bed4876803fd42ed6bbaea1293bceead67 --- /dev/null +++ b/libritts/tensorboard_stage1/events.out.tfevents.1722915580.6865771.194837.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2067cb10d9d8c08eea63eab7286864d0b0f7fa51ffef818a37dc94b15e8b1bfb +size 103227 diff --git a/libritts/tensorboard_stage2/events.out.tfevents.1722926323.6865771.773349.0 b/libritts/tensorboard_stage2/events.out.tfevents.1722926323.6865771.773349.0 new file mode 100644 index 0000000000000000000000000000000000000000..2aff6af5c5f0b76fed426605a781b78d90e52ae9 --- /dev/null +++ b/libritts/tensorboard_stage2/events.out.tfevents.1722926323.6865771.773349.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:598f68226f67d9fc9cf62fe5e3f48de9c8e939e947f33171f391436ea257456e +size 88 diff --git a/libritts/tensorboard_stage2/events.out.tfevents.1722926501.6865771.775496.0 b/libritts/tensorboard_stage2/events.out.tfevents.1722926501.6865771.775496.0 new file mode 100644 index 0000000000000000000000000000000000000000..e70c3c12b2f814e516006f63ac5719c47d0acaf1 --- /dev/null +++ b/libritts/tensorboard_stage2/events.out.tfevents.1722926501.6865771.775496.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a6aa54cb9f1ede229d184a31ef53d6d8930f58fc38add56a4aabc3761dc688e +size 434313